summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.coveragerc8
-rw-r--r--.dockerignore8
-rw-r--r--.flake85
-rw-r--r--.pylintrc2
-rw-r--r--.tito/packages/openshift-ansible2
-rw-r--r--.travis.yml12
-rw-r--r--CONTRIBUTING.md77
-rw-r--r--Dockerfile47
-rw-r--r--Dockerfile.rhel726
-rw-r--r--README.md4
-rw-r--r--README_CONTAINER_IMAGE.md41
-rw-r--r--filter_plugins/oo_filters.py38
-rwxr-xr-xhack/build-images.sh87
-rwxr-xr-xhack/push-release.sh55
-rw-r--r--inventory/byo/hosts.origin.example69
-rw-r--r--inventory/byo/hosts.ose.example67
-rw-r--r--openshift-ansible.spec545
-rw-r--r--playbooks/adhoc/contiv/delete_contiv.yml29
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/README.md4
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml4
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml2
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml2
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml2
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade.yml2
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml2
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml2
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade.yml8
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml6
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml2
-rw-r--r--playbooks/common/openshift-cluster/config.yml7
-rw-r--r--playbooks/common/openshift-cluster/disable_excluder.yml16
-rw-r--r--playbooks/common/openshift-cluster/initialize_openshift_version.yml22
-rw-r--r--playbooks/common/openshift-cluster/openshift_hosted.yml8
-rw-r--r--playbooks/common/openshift-cluster/redeploy-certificates/ca.yml4
-rw-r--r--playbooks/common/openshift-cluster/redeploy-certificates/router.yml2
-rw-r--r--playbooks/common/openshift-cluster/reset_excluder.yml4
-rw-r--r--playbooks/common/openshift-cluster/upgrades/disable_excluder.yml21
-rw-r--r--playbooks/common/openshift-cluster/upgrades/post_control_plane.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/validate_excluder.yml29
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml28
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml4
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_5/validator.yml67
-rw-r--r--playbooks/common/openshift-master/config.yml2
-rw-r--r--playbooks/common/openshift-master/scaleup.yml11
-rw-r--r--playbooks/common/openshift-node/config.yml3
-rw-r--r--playbooks/common/openshift-node/scaleup.yml11
-rw-r--r--pytest.ini16
-rw-r--r--requirements.txt7
-rw-r--r--roles/contiv/README.md39
-rw-r--r--roles/contiv/contiv-openshift-vlan-network.pngbin0 -> 42664 bytes
-rw-r--r--roles/contiv/defaults/main.yml106
-rw-r--r--roles/contiv/files/contiv_cni.conf5
-rw-r--r--roles/contiv/handlers/main.yml18
-rw-r--r--roles/contiv/meta/main.yml28
-rw-r--r--roles/contiv/tasks/aci.yml32
-rw-r--r--roles/contiv/tasks/default_network.yml15
-rw-r--r--roles/contiv/tasks/download_bins.yml46
-rw-r--r--roles/contiv/tasks/main.yml14
-rw-r--r--roles/contiv/tasks/netmaster.yml65
-rw-r--r--roles/contiv/tasks/netmaster_firewalld.yml16
-rw-r--r--roles/contiv/tasks/netmaster_iptables.yml21
-rw-r--r--roles/contiv/tasks/netplugin.yml122
-rw-r--r--roles/contiv/tasks/netplugin_firewalld.yml34
-rw-r--r--roles/contiv/tasks/netplugin_iptables.yml29
-rw-r--r--roles/contiv/tasks/ovs.yml28
-rw-r--r--roles/contiv/tasks/packageManagerInstall.yml12
-rw-r--r--roles/contiv/tasks/pkgMgrInstallers/centos-install.yml33
-rw-r--r--roles/contiv/templates/aci-gw.service10
-rw-r--r--roles/contiv/templates/aci_gw.j235
-rw-r--r--roles/contiv/templates/contiv.cfg.j26
-rw-r--r--roles/contiv/templates/netmaster.env.j22
-rw-r--r--roles/contiv/templates/netmaster.service8
-rw-r--r--roles/contiv/templates/netplugin.j29
-rw-r--r--roles/contiv/templates/netplugin.service8
-rw-r--r--roles/contiv_facts/defaults/main.yaml10
-rw-r--r--roles/contiv_facts/handlers/main.yml3
-rw-r--r--roles/contiv_facts/tasks/fedora-install.yml24
-rw-r--r--roles/contiv_facts/tasks/main.yml88
-rw-r--r--roles/contiv_facts/tasks/rpm.yml24
-rw-r--r--roles/docker/templates/custom.conf.j22
-rw-r--r--roles/etcd/defaults/main.yaml1
-rw-r--r--roles/etcd/tasks/etcdctl.yml2
-rw-r--r--roles/etcd/tasks/main.yml53
-rw-r--r--roles/etcd/tasks/system_container.yml72
-rw-r--r--roles/etcd/templates/custom.conf.j23
-rw-r--r--roles/etcd/templates/etcd.conf.j212
-rw-r--r--roles/etcd_common/defaults/main.yml2
-rw-r--r--roles/etcd_server_certificates/meta/main.yml2
-rw-r--r--roles/etcd_server_certificates/tasks/main.yml2
-rw-r--r--roles/lib_openshift/library/oc_adm_ca_server_cert.py33
-rw-r--r--roles/lib_openshift/library/oc_adm_manage_node.py (renamed from roles/lib_openshift/library/oadm_manage_node.py)44
-rw-r--r--roles/lib_openshift/library/oc_adm_policy_group.py2128
-rw-r--r--roles/lib_openshift/library/oc_adm_policy_user.py2122
-rw-r--r--roles/lib_openshift/library/oc_adm_registry.py126
-rw-r--r--roles/lib_openshift/library/oc_adm_router.py138
-rw-r--r--roles/lib_openshift/library/oc_atomic_container.py203
-rw-r--r--roles/lib_openshift/library/oc_edit.py30
-rw-r--r--roles/lib_openshift/library/oc_env.py42
-rw-r--r--roles/lib_openshift/library/oc_label.py30
-rw-r--r--roles/lib_openshift/library/oc_obj.py30
-rw-r--r--roles/lib_openshift/library/oc_objectvalidator.py1423
-rw-r--r--roles/lib_openshift/library/oc_process.py30
-rw-r--r--roles/lib_openshift/library/oc_project.py1676
-rw-r--r--roles/lib_openshift/library/oc_route.py36
-rw-r--r--roles/lib_openshift/library/oc_scale.py42
-rw-r--r--roles/lib_openshift/library/oc_secret.py30
-rw-r--r--roles/lib_openshift/library/oc_service.py41
-rw-r--r--roles/lib_openshift/library/oc_serviceaccount.py30
-rw-r--r--roles/lib_openshift/library/oc_serviceaccount_secret.py30
-rw-r--r--roles/lib_openshift/library/oc_version.py30
-rw-r--r--roles/lib_openshift/meta/main.yml3
-rw-r--r--roles/lib_openshift/src/ansible/oc_adm_manage_node.py (renamed from roles/lib_openshift/src/ansible/oadm_manage_node.py)0
-rw-r--r--roles/lib_openshift/src/ansible/oc_adm_policy_group.py34
-rw-r--r--roles/lib_openshift/src/ansible/oc_adm_policy_user.py34
-rw-r--r--roles/lib_openshift/src/ansible/oc_adm_registry.py4
-rw-r--r--roles/lib_openshift/src/ansible/oc_atomic_container.py137
-rw-r--r--roles/lib_openshift/src/ansible/oc_objectvalidator.py24
-rw-r--r--roles/lib_openshift/src/ansible/oc_project.py33
-rw-r--r--roles/lib_openshift/src/class/oc_adm_ca_server_cert.py3
-rw-r--r--roles/lib_openshift/src/class/oc_adm_manage_node.py (renamed from roles/lib_openshift/src/class/oadm_manage_node.py)0
-rw-r--r--roles/lib_openshift/src/class/oc_adm_policy_group.py223
-rw-r--r--roles/lib_openshift/src/class/oc_adm_policy_user.py217
-rw-r--r--roles/lib_openshift/src/class/oc_adm_registry.py66
-rw-r--r--roles/lib_openshift/src/class/oc_adm_router.py93
-rw-r--r--roles/lib_openshift/src/class/oc_objectvalidator.py86
-rw-r--r--roles/lib_openshift/src/class/oc_project.py189
-rw-r--r--roles/lib_openshift/src/class/oc_route.py6
-rw-r--r--roles/lib_openshift/src/class/oc_service.py6
-rw-r--r--roles/lib_openshift/src/doc/atomic_container36
-rw-r--r--roles/lib_openshift/src/doc/manage_node6
-rw-r--r--roles/lib_openshift/src/doc/objectvalidator27
-rw-r--r--roles/lib_openshift/src/doc/policy_group74
-rw-r--r--roles/lib_openshift/src/doc/policy_user74
-rw-r--r--roles/lib_openshift/src/doc/project81
-rw-r--r--roles/lib_openshift/src/lib/base.py19
-rw-r--r--roles/lib_openshift/src/lib/deploymentconfig.py12
-rw-r--r--roles/lib_openshift/src/lib/project.py85
-rw-r--r--roles/lib_openshift/src/lib/scc.py218
-rw-r--r--roles/lib_openshift/src/lib/service.py5
-rw-r--r--roles/lib_openshift/src/lib/volume.py9
-rw-r--r--roles/lib_openshift/src/sources.yml57
-rwxr-xr-xroles/lib_openshift/src/test/integration/oc_adm_manage_node.yml (renamed from roles/lib_openshift/src/test/integration/oadm_manage_node.yml)8
-rwxr-xr-xroles/lib_openshift/src/test/integration/oc_project.yml83
-rwxr-xr-xroles/lib_openshift/src/test/unit/test_oc_adm_manage_node.py (renamed from roles/lib_openshift/src/test/unit/test_oadm_manage_node.py)34
-rwxr-xr-xroles/lib_openshift/src/test/unit/test_oc_adm_registry.py369
-rwxr-xr-xroles/lib_openshift/src/test/unit/test_oc_adm_router.py474
-rwxr-xr-xroles/lib_openshift/src/test/unit/test_oc_env.py26
-rwxr-xr-xroles/lib_openshift/src/test/unit/test_oc_label.py20
-rwxr-xr-xroles/lib_openshift/src/test/unit/test_oc_objectvalidator.py903
-rwxr-xr-xroles/lib_openshift/src/test/unit/test_oc_process.py20
-rwxr-xr-xroles/lib_openshift/src/test/unit/test_oc_project.py280
-rwxr-xr-xroles/lib_openshift/src/test/unit/test_oc_route.py28
-rwxr-xr-xroles/lib_openshift/src/test/unit/test_oc_scale.py20
-rwxr-xr-xroles/lib_openshift/src/test/unit/test_oc_secret.py24
-rwxr-xr-xroles/lib_openshift/src/test/unit/test_oc_service.py20
-rwxr-xr-xroles/lib_openshift/src/test/unit/test_oc_serviceaccount.py26
-rwxr-xr-xroles/lib_openshift/src/test/unit/test_oc_serviceaccount_secret.py34
-rwxr-xr-xroles/lib_openshift/src/test/unit/test_oc_version.py20
-rw-r--r--roles/lib_openshift/tasks/main.yml10
-rw-r--r--roles/lib_utils/library/yedit.py11
-rw-r--r--roles/lib_utils/meta/main.yml3
-rw-r--r--roles/lib_utils/src/class/yedit.py11
-rwxr-xr-xroles/lib_utils/src/test/unit/test_repoquery.py20
-rwxr-xr-xroles/lib_utils/src/test/unit/test_yedit.py33
-rw-r--r--roles/lib_utils/tasks/main.yml10
-rw-r--r--roles/nuage_master/tasks/main.yaml9
-rw-r--r--roles/nuage_master/tasks/serviceaccount.yml14
-rw-r--r--roles/openshift_ca/tasks/main.yml1
-rw-r--r--roles/openshift_certificate_expiry/library/openshift_cert_expiry.py119
-rw-r--r--roles/openshift_certificate_expiry/test/conftest.py116
-rw-r--r--roles/openshift_certificate_expiry/test/master.server.crt42
-rw-r--r--roles/openshift_certificate_expiry/test/master.server.crt.txt82
-rw-r--r--roles/openshift_certificate_expiry/test/system-node-m01.example.com.crt19
-rw-r--r--roles/openshift_certificate_expiry/test/system-node-m01.example.com.crt.txt75
-rw-r--r--roles/openshift_certificate_expiry/test/test_fakeopensslclasses.py121
-rw-r--r--roles/openshift_certificate_expiry/test/test_load_and_handle_cert.py67
-rw-r--r--roles/openshift_common/tasks/main.yml13
-rw-r--r--roles/openshift_etcd_ca/meta/main.yml (renamed from roles/openshift_serviceaccounts/meta/main.yml)11
-rw-r--r--roles/openshift_etcd_ca/tasks/main.yml1
-rwxr-xr-xroles/openshift_examples/examples-sync.sh15
-rw-r--r--roles/openshift_examples/files/examples/v1.3/xpaas-streams/fis-image-streams.json24
-rw-r--r--roles/openshift_examples/files/examples/v1.3/xpaas-templates/karaf2-camel-amq-template.json362
-rw-r--r--roles/openshift_examples/files/examples/v1.3/xpaas-templates/karaf2-camel-log-template.json336
-rw-r--r--roles/openshift_examples/files/examples/v1.3/xpaas-templates/karaf2-camel-rest-sql-template.json421
-rw-r--r--roles/openshift_examples/files/examples/v1.3/xpaas-templates/karaf2-cxf-rest-template.json385
-rw-r--r--roles/openshift_examples/files/examples/v1.3/xpaas-templates/openjdk18-web-basic-s2i.json267
-rw-r--r--roles/openshift_examples/files/examples/v1.3/xpaas-templates/spring-boot-camel-amq-template.json331
-rw-r--r--roles/openshift_examples/files/examples/v1.3/xpaas-templates/spring-boot-camel-config-template.json327
-rw-r--r--roles/openshift_examples/files/examples/v1.3/xpaas-templates/spring-boot-camel-drools-template.json334
-rw-r--r--roles/openshift_examples/files/examples/v1.3/xpaas-templates/spring-boot-camel-infinispan-template.json315
-rw-r--r--roles/openshift_examples/files/examples/v1.3/xpaas-templates/spring-boot-camel-rest-sql-template.json403
-rw-r--r--roles/openshift_examples/files/examples/v1.3/xpaas-templates/spring-boot-camel-teiid-template.json343
-rw-r--r--roles/openshift_examples/files/examples/v1.3/xpaas-templates/spring-boot-camel-template.json305
-rw-r--r--roles/openshift_examples/files/examples/v1.3/xpaas-templates/spring-boot-camel-xml-template.json305
-rw-r--r--roles/openshift_examples/files/examples/v1.3/xpaas-templates/spring-boot-cxf-jaxrs-template.json305
-rw-r--r--roles/openshift_examples/files/examples/v1.3/xpaas-templates/spring-boot-cxf-jaxws-template.json305
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-templates/karaf2-camel-amq-template.json362
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-templates/karaf2-camel-log-template.json336
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-templates/karaf2-camel-rest-sql-template.json421
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-templates/karaf2-cxf-rest-template.json385
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-templates/spring-boot-camel-amq-template.json331
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-templates/spring-boot-camel-config-template.json327
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-templates/spring-boot-camel-drools-template.json334
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-templates/spring-boot-camel-infinispan-template.json315
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-templates/spring-boot-camel-rest-sql-template.json403
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-templates/spring-boot-camel-teiid-template.json343
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-templates/spring-boot-camel-template.json305
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-templates/spring-boot-camel-xml-template.json305
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-templates/spring-boot-cxf-jaxrs-template.json305
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-templates/spring-boot-cxf-jaxws-template.json305
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/karaf2-camel-amq-template.json362
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/karaf2-camel-log-template.json336
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/karaf2-camel-rest-sql-template.json421
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/karaf2-cxf-rest-template.json385
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/spring-boot-camel-amq-template.json331
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/spring-boot-camel-config-template.json327
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/spring-boot-camel-drools-template.json334
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/spring-boot-camel-infinispan-template.json315
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/spring-boot-camel-rest-sql-template.json403
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/spring-boot-camel-teiid-template.json343
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/spring-boot-camel-template.json305
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/spring-boot-camel-xml-template.json305
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/spring-boot-cxf-jaxrs-template.json305
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/spring-boot-cxf-jaxws-template.json305
-rw-r--r--roles/openshift_excluder/README.md17
-rw-r--r--roles/openshift_excluder/defaults/main.yml6
-rw-r--r--roles/openshift_excluder/meta/main.yml1
-rw-r--r--roles/openshift_excluder/tasks/adjust.yml25
-rw-r--r--roles/openshift_excluder/tasks/disable.yml22
-rw-r--r--roles/openshift_excluder/tasks/enable.yml21
-rw-r--r--roles/openshift_excluder/tasks/exclude.yml27
-rw-r--r--roles/openshift_excluder/tasks/init.yml12
-rw-r--r--roles/openshift_excluder/tasks/install.yml31
-rw-r--r--roles/openshift_excluder/tasks/main.yml2
-rw-r--r--roles/openshift_excluder/tasks/reset.yml12
-rw-r--r--roles/openshift_excluder/tasks/status.yml56
-rw-r--r--roles/openshift_excluder/tasks/unexclude.yml23
-rwxr-xr-xroles/openshift_facts/library/openshift_facts.py34
-rw-r--r--roles/openshift_facts/tasks/main.yml28
-rw-r--r--roles/openshift_facts/vars/main.yml5
-rw-r--r--roles/openshift_health_checker/openshift_checks/__init__.py12
-rw-r--r--roles/openshift_hosted/defaults/main.yml28
-rw-r--r--roles/openshift_hosted/filter_plugins/filters.py42
-rw-r--r--roles/openshift_hosted/handlers/main.yml0
-rw-r--r--roles/openshift_hosted/meta/main.yml15
-rw-r--r--roles/openshift_hosted/tasks/main.yml19
-rw-r--r--roles/openshift_hosted/tasks/registry/registry.yml161
-rw-r--r--roles/openshift_hosted/tasks/registry/secure.yml177
-rw-r--r--roles/openshift_hosted/tasks/registry/storage/object_storage.yml123
-rw-r--r--roles/openshift_hosted/tasks/registry/storage/persistent_volume.yml26
-rw-r--r--roles/openshift_hosted/tasks/registry/storage/s3.yml74
-rw-r--r--roles/openshift_hosted/tasks/router/router.yml162
-rw-r--r--roles/openshift_logging/defaults/main.yml35
-rw-r--r--roles/openshift_logging/files/fluent.conf2
-rw-r--r--roles/openshift_logging/filter_plugins/openshift_logging.py22
-rw-r--r--roles/openshift_logging/meta/main.yaml1
-rw-r--r--roles/openshift_logging/tasks/generate_pvcs.yaml28
-rw-r--r--roles/openshift_logging/tasks/generate_secrets.yaml8
-rw-r--r--roles/openshift_logging/tasks/install_curator.yaml2
-rw-r--r--roles/openshift_logging/tasks/install_elasticsearch.yaml45
-rw-r--r--roles/openshift_logging/tasks/install_kibana.yaml4
-rw-r--r--roles/openshift_logging/tasks/install_logging.yaml4
-rw-r--r--roles/openshift_logging/templates/elasticsearch.yml.j22
-rw-r--r--roles/openshift_logging/templates/es-storage-emptydir.partial1
-rw-r--r--roles/openshift_logging/templates/es-storage-hostpath.partial2
-rw-r--r--roles/openshift_logging/templates/es-storage-pvc.partial2
-rw-r--r--roles/openshift_logging/templates/es.j27
-rw-r--r--roles/openshift_logging/templates/fluentd.j22
-rw-r--r--roles/openshift_logging/templates/pvc.j22
-rw-r--r--roles/openshift_logging/templates/secret.j24
-rw-r--r--roles/openshift_manage_node/tasks/main.yml2
-rw-r--r--roles/openshift_master/meta/main.yml5
-rw-r--r--roles/openshift_master/tasks/main.yml2
-rw-r--r--roles/openshift_master/tasks/system_container.yml79
-rw-r--r--roles/openshift_master/templates/master.yaml.v1.j220
-rw-r--r--roles/openshift_master/vars/main.yml1
-rw-r--r--roles/openshift_master_certificates/tasks/main.yml2
-rw-r--r--roles/openshift_master_facts/filter_plugins/openshift_master.py8
-rw-r--r--roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_predicates.py20
-rw-r--r--roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_priorities.py18
-rw-r--r--roles/openshift_master_facts/test/conftest.py2
-rw-r--r--roles/openshift_master_facts/test/openshift_master_facts_default_predicates_tests.py16
-rw-r--r--roles/openshift_master_facts/test/openshift_master_facts_default_priorities_tests.py16
-rw-r--r--roles/openshift_metrics/defaults/main.yaml7
-rwxr-xr-xroles/openshift_metrics/files/import_jks_certs.sh19
-rw-r--r--roles/openshift_metrics/handlers/main.yml26
-rw-r--r--roles/openshift_metrics/tasks/generate_hawkular_certificates.yaml12
-rw-r--r--roles/openshift_metrics/tasks/import_jks_certs.yaml11
-rw-r--r--roles/openshift_metrics/tasks/install_cassandra.yaml13
-rw-r--r--roles/openshift_metrics/tasks/install_metrics.yaml2
-rw-r--r--roles/openshift_metrics/tasks/update_master_config.yaml9
-rw-r--r--roles/openshift_metrics/templates/hawkular_metrics_rc.j213
-rw-r--r--roles/openshift_metrics/templates/pvc.j22
-rw-r--r--roles/openshift_metrics/vars/main.yaml1
-rw-r--r--roles/openshift_node/tasks/main.yml2
-rw-r--r--roles/openshift_node/tasks/node_system_container.yml38
-rw-r--r--roles/openshift_node/tasks/openvswitch_system_container.yml38
-rw-r--r--roles/openshift_node/templates/node.yaml.v1.j211
-rw-r--r--roles/openshift_node/templates/openshift.docker.node.service4
-rw-r--r--roles/openshift_node/vars/main.yml2
-rw-r--r--roles/openshift_node_upgrade/templates/openshift.docker.node.service2
-rw-r--r--roles/openshift_repos/files/origin/repos/openshift-ansible-centos-paas-sig.repo7
-rw-r--r--roles/openshift_serviceaccounts/tasks/legacy_add_scc_to_user.yml38
-rw-r--r--roles/openshift_serviceaccounts/tasks/main.yml28
-rw-r--r--roles/openshift_serviceaccounts/templates/serviceaccount.j24
-rw-r--r--setup.cfg27
-rw-r--r--test-requirements.txt4
-rw-r--r--tox.ini11
-rw-r--r--utils/.coveragerc18
l---------utils/.pylintrc1
-rw-r--r--utils/Makefile110
-rw-r--r--utils/README.md61
-rw-r--r--utils/setup.cfg27
-rw-r--r--utils/setup.py11
-rw-r--r--utils/src/ooinstall/ansible_plugins/facts_callback.py7
-rw-r--r--utils/test-requirements.txt15
-rw-r--r--utils/test/cli_installer_tests.py102
-rw-r--r--utils/test/fixture.py13
-rw-r--r--utils/test/oo_config_tests.py39
-rw-r--r--utils/test/openshift_ansible_tests.py26
-rw-r--r--utils/test/test_utils.py8
-rw-r--r--utils/tox.ini19
322 files changed, 30222 insertions, 2523 deletions
diff --git a/.coveragerc b/.coveragerc
index 1e819e157..ad7893b91 100644
--- a/.coveragerc
+++ b/.coveragerc
@@ -4,12 +4,14 @@ omit =
*/lib/python*/site-packages/*
*/lib/python*/*
/usr/*
- setup.py
+ */setup.py
# TODO(rhcarvalho): this is used to ignore test files from coverage report.
# We can make this less generic when we stick with a single test pattern in
# the repo.
- test_*.py
- *_tests.py
+ */conftest.py
+ */test_*.py
+ */*_tests.py
+ */test/*
[report]
fail_under = 28
diff --git a/.dockerignore b/.dockerignore
new file mode 100644
index 000000000..968811df5
--- /dev/null
+++ b/.dockerignore
@@ -0,0 +1,8 @@
+.*
+bin
+docs
+test
+utils
+**/*.md
+*.spec
+setup*
diff --git a/.flake8 b/.flake8
new file mode 100644
index 000000000..99ae3c2f0
--- /dev/null
+++ b/.flake8
@@ -0,0 +1,5 @@
+[flake8]
+# TODO: cleanup flake8 issues with utils/test/*
+exclude=.tox,inventory,utils/test
+max_line_length = 120
+ignore = E501,T003
diff --git a/.pylintrc b/.pylintrc
index fd6c6d0bd..e85987de3 100644
--- a/.pylintrc
+++ b/.pylintrc
@@ -60,7 +60,7 @@ confidence=
# --enable=similarities". If you want to run only the classes checker, but have
# no Warning level messages displayed, use"--disable=all --enable=classes
# --disable=W"
-#disable=
+disable=fixme,locally-disabled,file-ignored,duplicate-code
[REPORTS]
diff --git a/.tito/packages/openshift-ansible b/.tito/packages/openshift-ansible
index 3b7826d31..339add87b 100644
--- a/.tito/packages/openshift-ansible
+++ b/.tito/packages/openshift-ansible
@@ -1 +1 @@
-3.5.3-1 ./
+3.6.3-1 ./
diff --git a/.travis.yml b/.travis.yml
index f0a228c23..245202139 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -4,17 +4,19 @@ sudo: false
cache:
- pip
+before_cache:
+ - rm ~/.cache/pip/log/debug.log
+
language: python
python:
- "2.7"
- "3.5"
install:
- - pip install -r requirements.txt
- - pip install tox-travis
+ - pip install tox-travis coveralls
script:
- # TODO(rhcarvalho): check syntax of other important entrypoint playbooks
- - ansible-playbook --syntax-check playbooks/byo/config.yml
- tox
- - cd utils && tox
+
+after_success:
+ - coveralls
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 502ef6aa5..50bb09470 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -72,13 +72,13 @@ See the [RPM build instructions](BUILD.md).
## Running tests
-This section covers how to run tests for the root of this repo, running tests
-for the oo-install wrapper is described in [utils/README.md](utils/README.md).
-
We use [tox](http://readthedocs.org/docs/tox/) to manage virtualenvs and run
tests. Alternatively, tests can be run using
[detox](https://pypi.python.org/pypi/detox/) which allows for running tests in
-parallel
+parallel.
+
+Note: while `detox` may be useful in development to make use of multiple cores,
+it can be buggy at times and produce flakes, thus we do not use it in our CI.
```
@@ -98,43 +98,90 @@ by `tox`:
$ find . -path '*/bin/python' | grep -vF .tox
```
-Extraneous virtualenvs cause tools such as `pylint` to take a very long time
-going through files that are part of the virtualenv.
+The reason for this recommendation is that extraneous virtualenvs cause tools
+such as `pylint` to take a very long time going through files that are part of
+the virtualenv, and test discovery to go through lots of irrelevant files and
+potentially fail.
---
List the test environments available:
+
```
tox -l
```
-Run all of the tests with:
+Run all of the tests and linters with:
+
```
tox
```
-Run all of the tests in parallel with detox:
+Run all of the tests linters in parallel (may flake):
+
```
detox
```
-Running a particular test environment (python 2.7 flake8 tests in this case):
+### Run only unit tests or some specific linter
+
+Run a particular test environment (`flake8` on Python 2.7 in this case):
+
```
-tox -e py27-ansible22-flake8
+tox -e py27-flake8
```
-Running a particular test environment in a clean virtualenv (python 3.5 pylint
-tests in this case):
+Run a particular test environment in a clean virtualenv (`pylint` on Python 3.5
+in this case):
+
```
-tox -r -e py35-ansible22-pylint
+tox -re py35-pylint
```
-If you want to enter the virtualenv created by tox to do additional
+### Tricks
+
+#### Activating a virtualenv managed by tox
+
+If you want to enter a virtualenv created by tox to do additional
testing/debugging (py27-flake8 env in this case):
+
```
-source .tox/py27-ansible22-flake8/bin/activate
+source .tox/py27-flake8/bin/activate
```
+#### Limiting the unit tests that are run
+
+During development, it might be useful to constantly run just a single test file
+or test method, or to pass custom arguments to `pytest`:
+
+```
+tox -e py27-unit -- path/to/test/file.py
+```
+
+Anything after `--` is passed directly to `pytest`. To learn more about what
+other flags you can use, try:
+
+```
+tox -e py27-unit -- -h
+```
+
+As a practical example, the snippet below shows how to list all tests in a
+certain file, and then execute only one test of interest:
+
+```
+$ tox -e py27-unit -- roles/lib_openshift/src/test/unit/test_oc_project.py --collect-only --no-cov
+...
+collected 1 items
+<Module 'roles/lib_openshift/src/test/unit/test_oc_project.py'>
+ <UnitTestCase 'OCProjectTest'>
+ <TestCaseFunction 'test_adding_a_project'>
+...
+$ tox -e py27-unit -- roles/lib_openshift/src/test/unit/test_oc_project.py -k test_adding_a_project
+```
+
+Among other things, this can be used for instance to see the coverage levels of
+individual modules as we work on improving tests.
+
## Submitting contributions
1. Go through the guides from the [introduction](#Introduction).
diff --git a/Dockerfile b/Dockerfile
index f3d45837a..c6593491d 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -1,26 +1,33 @@
-FROM rhel7
+# Using playbook2image as a base
+# See https://github.com/aweiteka/playbook2image for details on the image
+# including documentation for the settings/env vars referenced below
+FROM docker.io/aweiteka/playbook2image:latest
-MAINTAINER Troy Dawson <tdawson@redhat.com>
+MAINTAINER OpenShift Team <dev@lists.openshift.redhat.com>
-LABEL Name="openshift3/installer"
-LABEL Vendor="Red Hat" License=GPLv2+
-LABEL Version="v3.1.1.901"
-LABEL Release="6"
-LABEL BZComponent="aos3-installation-docker"
-LABEL Architecture="x86_64"
-LABEL io.k8s.description="Ansible code and playbooks for installing Openshift Container Platform." \
- io.k8s.display-name="Openshift Installer" \
- io.openshift.tags="openshift,installer"
+LABEL name="openshift-ansible" \
+ summary="OpenShift's installation and configuration tool" \
+ description="A containerized openshift-ansible image to let you run playbooks to install, upgrade, maintain and check an OpenShift cluster" \
+ url="https://github.com/openshift/openshift-ansible" \
+ io.k8s.display-name="openshift-ansible" \
+ io.k8s.description="A containerized openshift-ansible image to let you run playbooks to install, upgrade, maintain and check an OpenShift cluster" \
+ io.openshift.expose-services="" \
+ io.openshift.tags="openshift,install,upgrade,ansible"
-RUN INSTALL_PKGS="atomic-openshift-utils" && \
- yum install -y --enablerepo=rhel-7-server-ose-3.2-rpms $INSTALL_PKGS && \
- rpm -V $INSTALL_PKGS && \
- yum clean all
+# The playbook to be run is specified via the PLAYBOOK_FILE env var.
+# This sets a default of openshift_facts.yml as it's an informative playbook
+# that can help test that everything is set properly (inventory, sshkeys)
+ENV PLAYBOOK_FILE=playbooks/byo/openshift_facts.yml \
+ OPTS="-v" \
+ INSTALL_OC=true
-# Expect user to mount a workdir for container output (installer.cfg, hosts inventory, ansible log)
-VOLUME /var/lib/openshift-installer/
-WORKDIR /var/lib/openshift-installer/
+# playbook2image's assemble script expects the source to be available in
+# /tmp/src (as per the source-to-image specs) so we import it there
+ADD . /tmp/src
-RUN mkdir -p /var/lib/openshift-installer/
+# Running the 'assemble' script provided by playbook2image will install
+# dependencies specified in requirements.txt and install the 'oc' client
+# as per the INSTALL_OC environment setting above
+RUN /usr/libexec/s2i/assemble
-ENTRYPOINT ["/usr/bin/atomic-openshift-installer", "-c", "/var/lib/openshift-installer/installer.cfg", "--ansible-log-path", "/var/lib/openshift-installer/ansible.log"]
+CMD [ "/usr/libexec/s2i/run" ]
diff --git a/Dockerfile.rhel7 b/Dockerfile.rhel7
new file mode 100644
index 000000000..f3d45837a
--- /dev/null
+++ b/Dockerfile.rhel7
@@ -0,0 +1,26 @@
+FROM rhel7
+
+MAINTAINER Troy Dawson <tdawson@redhat.com>
+
+LABEL Name="openshift3/installer"
+LABEL Vendor="Red Hat" License=GPLv2+
+LABEL Version="v3.1.1.901"
+LABEL Release="6"
+LABEL BZComponent="aos3-installation-docker"
+LABEL Architecture="x86_64"
+LABEL io.k8s.description="Ansible code and playbooks for installing Openshift Container Platform." \
+ io.k8s.display-name="Openshift Installer" \
+ io.openshift.tags="openshift,installer"
+
+RUN INSTALL_PKGS="atomic-openshift-utils" && \
+ yum install -y --enablerepo=rhel-7-server-ose-3.2-rpms $INSTALL_PKGS && \
+ rpm -V $INSTALL_PKGS && \
+ yum clean all
+
+# Expect user to mount a workdir for container output (installer.cfg, hosts inventory, ansible log)
+VOLUME /var/lib/openshift-installer/
+WORKDIR /var/lib/openshift-installer/
+
+RUN mkdir -p /var/lib/openshift-installer/
+
+ENTRYPOINT ["/usr/bin/atomic-openshift-installer", "-c", "/var/lib/openshift-installer/installer.cfg", "--ansible-log-path", "/var/lib/openshift-installer/ansible.log"]
diff --git a/README.md b/README.md
index c3c022e59..3ec6555e8 100644
--- a/README.md
+++ b/README.md
@@ -1,5 +1,6 @@
[![Join the chat at https://gitter.im/openshift/openshift-ansible](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/openshift/openshift-ansible)
[![Build Status](https://travis-ci.org/openshift/openshift-ansible.svg?branch=master)](https://travis-ci.org/openshift/openshift-ansible)
+[![Coverage Status](https://coveralls.io/repos/github/openshift/openshift-ansible/badge.svg?branch=master)](https://coveralls.io/github/openshift/openshift-ansible?branch=master)
# OpenShift Ansible
@@ -74,6 +75,9 @@ you are not running a stable release.
- [OpenShift Enterprise](https://docs.openshift.com/enterprise/latest/install_config/install/advanced_install.html)
- [OpenShift Origin](https://docs.openshift.org/latest/install_config/install/advanced_install.html)
+## Containerized OpenShift Ansible
+
+See [README_CONTAINER_IMAGE.md](README_CONTAINER_IMAGE.md) for information on how to package openshift-ansible as a container image.
## Installer Hooks
diff --git a/README_CONTAINER_IMAGE.md b/README_CONTAINER_IMAGE.md
new file mode 100644
index 000000000..35e057af3
--- /dev/null
+++ b/README_CONTAINER_IMAGE.md
@@ -0,0 +1,41 @@
+# Containerized openshift-ansible to run playbooks
+
+The [Dockerfile](Dockerfile) in this repository uses the [playbook2image](https://github.com/aweiteka/playbook2image) source-to-image base image to containerize `openshift-ansible`. The resulting image can run any of the provided playbooks.
+
+**Note**: at this time there are known issues that prevent to run this image for installation/upgrade purposes (i.e. run one of the config/upgrade playbooks) from within one of the hosts that is also an installation target at the same time: if the playbook you want to run attempts to manage the docker daemon and restart it (like install/upgrade playbooks do) this would kill the container itself during its operation.
+
+## Build
+
+To build a container image of `openshift-ansible`:
+
+1. Using standalone **Docker**:
+
+ cd openshift-ansible
+ docker build -t openshift/openshift-ansible .
+
+1. Using an **OpenShift** build:
+
+ oc new-build docker.io/aweiteka/playbook2image~https://github.com/openshift/openshift-ansible
+ oc describe imagestream openshift-ansible
+
+## Usage
+
+The `playbook2image` base image provides several options to control the behaviour of the containers. For more details on these options see the [playbook2image](https://github.com/aweiteka/playbook2image) documentation.
+
+At the very least, when running a container using an image built this way you must specify:
+
+1. An **inventory** file. This can be mounted inside the container as a volume and specified with the `INVENTORY_FILE` environment variable. Alternatively you can serve the inventory file from a web server and use the `INVENTORY_URL` environment variable to fetch it.
+1. **ssh keys** so that Ansible can reach your hosts. These should be mounted as a volume under `/opt/app-root/src/.ssh`
+1. The **playbook** to run. This is set using the `PLAYBOOK_FILE` environment variable. If you don't specify a playbook the [`openshift_facts`](playbooks/byo/openshift_facts.yml) playbook will be run to collecting and show facts about your OpenShift environment.
+
+Here is an example of how to run a containerized `openshift-ansible` playbook that will check the expiration dates of OpenShift's internal certificates using the [`openshift_certificate_expiry` role](roles/openshift_certificate_expiry). The inventory and ssh keys are mounted as volumes (the latter requires setting the uid in the container and SELinux label in the key file via `:Z` so they can be accessed) and the `PLAYBOOK_FILE` environment variable is set to point to an example certificate check playbook that is already part of the image:
+
+ docker run -u `id -u` \
+ -v $HOME/.ssh/id_rsa:/opt/app-root/src/.ssh/id_rsa:Z \
+ -v /etc/ansible/hosts:/tmp/inventory \
+ -e INVENTORY_FILE=/tmp/inventory \
+ -e OPTS="-v" \
+ -e PLAYBOOK_FILE=playbooks/certificate_expiry/default.yaml \
+ openshift/openshift-ansible
+
+The [playbook2image examples](https://github.com/aweiteka/playbook2image/tree/master/examples) provide additional information on how to use an image built from it like this one.
diff --git a/filter_plugins/oo_filters.py b/filter_plugins/oo_filters.py
index ed6923687..a619f9ccb 100644
--- a/filter_plugins/oo_filters.py
+++ b/filter_plugins/oo_filters.py
@@ -1,26 +1,32 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# vim: expandtab:tabstop=4:shiftwidth=4
-# pylint: disable=no-name-in-module, import-error, wrong-import-order, ungrouped-imports
"""
Custom filters for use in openshift-ansible
"""
+import json
import os
import pdb
-import pkg_resources
-import re
-import json
-import yaml
import random
+import re
-from ansible import errors
from collections import Mapping
-from distutils.util import strtobool
-from distutils.version import LooseVersion
+# pylint no-name-in-module and import-error disabled here because pylint
+# fails to properly detect the packages when installed in a virtualenv
+from distutils.util import strtobool # pylint:disable=no-name-in-module,import-error
+from distutils.version import LooseVersion # pylint:disable=no-name-in-module,import-error
from operator import itemgetter
-from ansible.module_utils.six.moves.urllib.parse import urlparse
+
+import pkg_resources
+import yaml
+
+from ansible import errors
+# pylint no-name-in-module and import-error disabled here because pylint
+# fails to properly detect the packages when installed in a virtualenv
+from ansible.compat.six import string_types # pylint:disable=no-name-in-module,import-error
+from ansible.compat.six.moves.urllib.parse import urlparse # pylint:disable=no-name-in-module,import-error
+from ansible.module_utils._text import to_text
from ansible.parsing.yaml.dumper import AnsibleDumper
-from six import string_types
HAS_OPENSSL = False
try:
@@ -29,15 +35,6 @@ try:
except ImportError:
pass
-try:
- # ansible-2.2
- # ansible.utils.unicode.to_unicode is deprecated in ansible-2.2,
- # ansible.module_utils._text.to_text should be used instead.
- from ansible.module_utils._text import to_text
-except ImportError:
- # ansible-2.1
- from ansible.utils.unicode import to_unicode as to_text
-
def oo_pdb(arg):
""" This pops you into a pdb instance where arg is the data passed in
@@ -117,8 +114,7 @@ def oo_merge_hostvars(hostvars, variables, inventory_hostname):
raise errors.AnsibleFilterError("|failed expects variables is a dictionary")
if not isinstance(inventory_hostname, string_types):
raise errors.AnsibleFilterError("|failed expects inventory_hostname is a string")
- # pylint: disable=no-member
- ansible_version = pkg_resources.get_distribution("ansible").version
+ ansible_version = pkg_resources.get_distribution("ansible").version # pylint: disable=maybe-no-member
merged_hostvars = {}
if LooseVersion(ansible_version) >= LooseVersion('2.0.0'):
merged_hostvars = oo_merge_dicts(
diff --git a/hack/build-images.sh b/hack/build-images.sh
new file mode 100755
index 000000000..f6210e239
--- /dev/null
+++ b/hack/build-images.sh
@@ -0,0 +1,87 @@
+#!/bin/bash
+
+set -o errexit
+set -o nounset
+set -o pipefail
+
+STARTTIME=$(date +%s)
+source_root=$(dirname "${0}")/..
+
+prefix="openshift/openshift-ansible"
+version="latest"
+verbose=false
+options=""
+help=false
+
+for args in "$@"
+do
+ case $args in
+ --prefix=*)
+ prefix="${args#*=}"
+ ;;
+ --version=*)
+ version="${args#*=}"
+ ;;
+ --no-cache)
+ options="${options} --no-cache"
+ ;;
+ --verbose)
+ verbose=true
+ ;;
+ --help)
+ help=true
+ ;;
+ esac
+done
+
+# allow ENV to take precedent over switches
+prefix="${PREFIX:-$prefix}"
+version="${OS_TAG:-$version}"
+
+if [ "$help" = true ]; then
+ echo "Builds the docker images for openshift-ansible"
+ echo
+ echo "Options: "
+ echo " --prefix=PREFIX"
+ echo " The prefix to use for the image names."
+ echo " default: openshift/openshift-ansible"
+ echo
+ echo " --version=VERSION"
+ echo " The version used to tag the image"
+ echo " default: latest"
+ echo
+ echo " --no-cache"
+ echo " If set will perform the build without a cache."
+ echo
+ echo " --verbose"
+ echo " Enables printing of the commands as they run."
+ echo
+ echo " --help"
+ echo " Prints this help message"
+ echo
+ exit 0
+fi
+
+if [ "$verbose" = true ]; then
+ set -x
+fi
+
+BUILD_STARTTIME=$(date +%s)
+comp_path=$source_root/
+docker_tag=${prefix}:${version}
+echo
+echo
+echo "--- Building component '$comp_path' with docker tag '$docker_tag' ---"
+docker build ${options} -t $docker_tag $comp_path
+BUILD_ENDTIME=$(date +%s); echo "--- $docker_tag took $(($BUILD_ENDTIME - $BUILD_STARTTIME)) seconds ---"
+echo
+echo
+
+echo
+echo
+echo "++ Active images"
+docker images | grep ${prefix} | grep ${version} | sort
+echo
+
+
+ret=$?; ENDTIME=$(date +%s); echo "$0 took $(($ENDTIME - $STARTTIME)) seconds"; exit "$ret"
diff --git a/hack/push-release.sh b/hack/push-release.sh
new file mode 100755
index 000000000..8639143af
--- /dev/null
+++ b/hack/push-release.sh
@@ -0,0 +1,55 @@
+#!/bin/bash
+
+# This script pushes all of the built images to a registry.
+#
+# Set OS_PUSH_BASE_REGISTRY to prefix the destination images
+#
+
+set -o errexit
+set -o nounset
+set -o pipefail
+
+STARTTIME=$(date +%s)
+OS_ROOT=$(dirname "${BASH_SOURCE}")/..
+
+PREFIX="${PREFIX:-openshift/openshift-ansible}"
+
+# Go to the top of the tree.
+cd "${OS_ROOT}"
+
+# Allow a release to be repushed with a tag
+tag="${OS_PUSH_TAG:-}"
+if [[ -n "${tag}" ]]; then
+ tag=":${tag}"
+else
+ tag=":latest"
+fi
+
+# Source tag
+source_tag="${OS_TAG:-}"
+if [[ -z "${source_tag}" ]]; then
+ source_tag="latest"
+fi
+
+images=(
+ ${PREFIX}
+)
+
+PUSH_OPTS=""
+if docker push --help | grep -q force; then
+ PUSH_OPTS="--force"
+fi
+
+if [[ "${OS_PUSH_BASE_REGISTRY-}" != "" || "${tag}" != "" ]]; then
+ set -e
+ for image in "${images[@]}"; do
+ docker tag "${image}:${source_tag}" "${OS_PUSH_BASE_REGISTRY-}${image}${tag}"
+ done
+ set +e
+fi
+
+for image in "${images[@]}"; do
+ docker push ${PUSH_OPTS} "${OS_PUSH_BASE_REGISTRY-}${image}${tag}"
+done
+
+ret=$?; ENDTIME=$(date +%s); echo "$0 took $(($ENDTIME - $STARTTIME)) seconds"; exit "$ret"
diff --git a/inventory/byo/hosts.origin.example b/inventory/byo/hosts.origin.example
index 7741730ad..20764fb95 100644
--- a/inventory/byo/hosts.origin.example
+++ b/inventory/byo/hosts.origin.example
@@ -30,17 +30,17 @@ deployment_type=origin
# use this to lookup the latest exact version of the container images, which is the tag actually used to configure
# the cluster. For RPM installations we just verify the version detected in your configured repos matches this
# release.
-openshift_release=v1.4
+openshift_release=v1.5
# Specify an exact container image tag to install or configure.
# WARNING: This value will be used for all hosts in containerized environments, even those that have another version installed.
# This could potentially trigger an upgrade and downtime, so be careful with modifying this value after the cluster is set up.
-#openshift_image_tag=v1.2.0
+#openshift_image_tag=v1.5.0
# Specify an exact rpm version to install or configure.
# WARNING: This value will be used for all hosts in RPM based environments, even those that have another version installed.
# This could potentially trigger an upgrade and downtime, so be careful with modifying this value after the cluster is set up.
-#openshift_pkg_version=-1.2.0
+#openshift_pkg_version=-1.5.0
# Install the openshift examples
#openshift_install_examples=true
@@ -89,6 +89,8 @@ openshift_release=v1.4
# Skip upgrading Docker during an OpenShift upgrade, leaves the current Docker version alone.
# docker_upgrade=False
+# Specify exact version of etcd to configure or upgrade to.
+# etcd_version="3.1.0"
# Upgrade Hooks
#
@@ -263,6 +265,15 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# Override master servingInfo.maxRequestsInFlight
#openshift_master_max_requests_inflight=500
+# Override master and node servingInfo.minTLSVersion and .cipherSuites
+# valid TLS versions are VersionTLS10, VersionTLS11, VersionTLS12
+# example cipher suites override, valid cipher suites are https://golang.org/pkg/crypto/tls/#pkg-constants
+#openshift_master_min_tls_version=VersionTLS12
+#openshift_master_cipher_suites=['TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256', '...']
+#
+#openshift_node_min_tls_version=VersionTLS12
+#openshift_node_cipher_suites=['TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256', '...']
+
# default storage plugin dependencies to install, by default the ceph and
# glusterfs plugin dependencies will be installed, if available.
#osn_storage_plugin_deps=['ceph','glusterfs','iscsi']
@@ -300,7 +311,51 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
#
# Disable management of the OpenShift Router
#openshift_hosted_manage_router=false
-
+#
+# Router sharding support has been added and can be achieved by supplying the correct
+# data to the inventory. The variable to house the data is openshift_hosted_routers
+# and is in the form of a list. If no data is passed then a default router will be
+# created. There are multiple combinations of router sharding. The one described
+# below supports routers on separate nodes.
+#openshift_hosted_routers:
+#- name: router1
+# stats_port: 1936
+# ports:
+# - 80:80
+# - 443:443
+# replicas: 1
+# namespace: default
+# serviceaccount: router
+# selector: type=router1
+# images: "openshift3/ose-${component}:${version}"
+# edits: []
+# certificates:
+# certfile: /path/to/certificate/abc.crt
+# keyfile: /path/to/certificate/abc.key
+# cafile: /path/to/certificate/ca.crt
+#- name: router2
+# stats_port: 1936
+# ports:
+# - 80:80
+# - 443:443
+# replicas: 1
+# namespace: default
+# serviceaccount: router
+# selector: type=router2
+# images: "openshift3/ose-${component}:${version}"
+# certificates:
+# certfile: /path/to/certificate/xyz.crt
+# keyfile: /path/to/certificate/xyz.key
+# cafile: /path/to/certificate/ca.crt
+# edits:
+# # ROUTE_LABELS sets the router to listen for routes
+# # tagged with the provided values
+# - key: spec.template.spec.containers[0].env
+# value:
+# name: ROUTE_LABELS
+# value: "route=external"
+# action: append
+#
# OpenShift Registry Console Options
# Override the console image prefix for enterprise deployments, not used in origin
# default is "registry.access.redhat.com/openshift3/" and the image appended is "registry-console"
@@ -484,8 +539,8 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# list of options please see roles/openshift_logging/README.md
#
# Configure loggingPublicURL in the master config for aggregate logging, defaults
-# to https://kibana.{{ openshift_master_default_subdomain }}
-#openshift_master_logging_public_url=https://kibana.example.com
+# to kibana.{{ openshift_master_default_subdomain }}
+#openshift_master_logging_public_url=kibana.example.com
# Configure the number of elastic search nodes, unless you're using dynamic provisioning
# this value must be 1
#openshift_hosted_logging_elasticsearch_cluster_size=1
@@ -587,7 +642,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# Configure nodeIP in the node config
# This is needed in cases where node traffic is desired to go over an
# interface other than the default network interface.
-#openshift_node_set_node_ip=True
+#openshift_set_node_ip=True
# Force setting of system hostname when configuring OpenShift
# This works around issues related to installations that do not have valid dns
diff --git a/inventory/byo/hosts.ose.example b/inventory/byo/hosts.ose.example
index 3da9be081..3b9861a1d 100644
--- a/inventory/byo/hosts.ose.example
+++ b/inventory/byo/hosts.ose.example
@@ -30,17 +30,17 @@ deployment_type=openshift-enterprise
# use this to lookup the latest exact version of the container images, which is the tag actually used to configure
# the cluster. For RPM installations we just verify the version detected in your configured repos matches this
# release.
-openshift_release=v3.4
+openshift_release=v3.5
# Specify an exact container image tag to install or configure.
# WARNING: This value will be used for all hosts in containerized environments, even those that have another version installed.
# This could potentially trigger an upgrade and downtime, so be careful with modifying this value after the cluster is set up.
-#openshift_image_tag=v3.2.0.46
+#openshift_image_tag=v3.5.0
# Specify an exact rpm version to install or configure.
# WARNING: This value will be used for all hosts in RPM based environments, even those that have another version installed.
# This could potentially trigger an upgrade and downtime, so be careful with modifying this value after the cluster is set up.
-#openshift_pkg_version=-3.2.0.46
+#openshift_pkg_version=-3.5.0
# Install the openshift examples
#openshift_install_examples=true
@@ -89,6 +89,8 @@ openshift_release=v3.4
# Skip upgrading Docker during an OpenShift upgrade, leaves the current Docker version alone.
# docker_upgrade=False
+# Specify exact version of etcd to configure or upgrade to.
+# etcd_version="3.1.0"
# Upgrade Hooks
#
@@ -263,6 +265,15 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# Override master servingInfo.maxRequestsInFlight
#openshift_master_max_requests_inflight=500
+# Override master and node servingInfo.minTLSVersion and .cipherSuites
+# valid TLS versions are VersionTLS10, VersionTLS11, VersionTLS12
+# example cipher suites override, valid cipher suites are https://golang.org/pkg/crypto/tls/#pkg-constants
+#openshift_master_min_tls_version=VersionTLS12
+#openshift_master_cipher_suites=['TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256', '...']
+#
+#openshift_node_min_tls_version=VersionTLS12
+#openshift_node_cipher_suites=['TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256', '...']
+
# default storage plugin dependencies to install, by default the ceph and
# glusterfs plugin dependencies will be installed, if available.
#osn_storage_plugin_deps=['ceph','glusterfs']
@@ -300,6 +311,50 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
#
# Disable management of the OpenShift Router
#openshift_hosted_manage_router=false
+#
+# Router sharding support has been added and can be achieved by supplying the correct
+# data to the inventory. The variable to house the data is openshift_hosted_routers
+# and is in the form of a list. If no data is passed then a default router will be
+# created. There are multiple combinations of router sharding. The one described
+# below supports routers on separate nodes.
+#openshift_hosted_routers:
+#- name: router1
+# stats_port: 1936
+# ports:
+# - 80:80
+# - 443:443
+# replicas: 1
+# namespace: default
+# serviceaccount: router
+# selector: type=router1
+# images: "openshift3/ose-${component}:${version}"
+# edits: []
+# certificates:
+# certfile: /path/to/certificate/abc.crt
+# keyfile: /path/to/certificate/abc.key
+# cafile: /path/to/certificate/ca.crt
+#- name: router2
+# stats_port: 1936
+# ports:
+# - 80:80
+# - 443:443
+# replicas: 1
+# namespace: default
+# serviceaccount: router
+# selector: type=router2
+# images: "openshift3/ose-${component}:${version}"
+# certificates:
+# certfile: /path/to/certificate/xyz.crt
+# keyfile: /path/to/certificate/xyz.key
+# cafile: /path/to/certificate/ca.crt
+# edits:
+# # ROUTE_LABELS sets the router to listen for routes
+# # tagged with the provided values
+# - key: spec.template.spec.containers[0].env
+# value:
+# name: ROUTE_LABELS
+# value: "route=external"
+# action: append
# OpenShift Registry Console Options
# Override the console image prefix for enterprise deployments, not used in origin
@@ -485,8 +540,8 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# list of options please see roles/openshift_logging/README.md
#
# Configure loggingPublicURL in the master config for aggregate logging, defaults
-# to https://kibana.{{ openshift_master_default_subdomain }}
-#openshift_master_logging_public_url=https://kibana.example.com
+# to kibana.{{ openshift_master_default_subdomain }}
+#openshift_master_logging_public_url=kibana.example.com
# Configure the number of elastic search nodes, unless you're using dynamic provisioning
# this value must be 1
#openshift_hosted_logging_elasticsearch_cluster_size=1
@@ -588,7 +643,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# Configure nodeIP in the node config
# This is needed in cases where node traffic is desired to go over an
# interface other than the default network interface.
-#openshift_node_set_node_ip=True
+#openshift_set_node_ip=True
# Force setting of system hostname when configuring OpenShift
# This works around issues related to installations that do not have valid dns
diff --git a/openshift-ansible.spec b/openshift-ansible.spec
index c03d4eb8f..0cc66d48c 100644
--- a/openshift-ansible.spec
+++ b/openshift-ansible.spec
@@ -9,7 +9,7 @@
%global __requires_exclude ^/usr/bin/ansible-playbook$
Name: openshift-ansible
-Version: 3.5.3
+Version: 3.6.3
Release: 1%{?dist}
Summary: Openshift and Atomic Enterprise Ansible
License: ASL 2.0
@@ -24,7 +24,6 @@ Requires: tar
Requires: openshift-ansible-docs = %{version}-%{release}
Requires: java-1.8.0-openjdk-headless
Requires: httpd-tools
-Requires: python-ruamel-yaml
Requires: libselinux-python
%description
@@ -66,6 +65,8 @@ cp inventory/byo/* docs/example-inventories/
# openshift-ansible-playbooks install
cp -rp playbooks %{buildroot}%{_datadir}/ansible/%{name}/
+# remove contiv plabooks
+rm -rf %{buildroot}%{_datadir}/ansible/%{name}/playbooks/adhoc/contiv
# BZ1330091
find -L %{buildroot}%{_datadir}/ansible/%{name}/playbooks -name lookup_plugins -type l -delete
@@ -73,10 +74,16 @@ find -L %{buildroot}%{_datadir}/ansible/%{name}/playbooks -name filter_plugins -
# openshift-ansible-roles install
cp -rp roles %{buildroot}%{_datadir}/ansible/%{name}/
+# remove contiv role
+rm -rf %{buildroot}%{_datadir}/ansible/%{name}/roles/contiv/*
# openshift_master_facts symlinks filter_plugins/oo_filters.py from ansible_plugins/filter_plugins
pushd %{buildroot}%{_datadir}/ansible/%{name}/roles/openshift_master_facts/filter_plugins
ln -sf ../../../../../ansible_plugins/filter_plugins/oo_filters.py oo_filters.py
popd
+# openshift_master_facts symlinks lookup_plugins/oo_option.py from ansible_plugins/lookup_plugins
+pushd %{buildroot}%{_datadir}/ansible/%{name}/roles/openshift_master_facts/lookup_plugins
+ln -sf ../../../../../ansible_plugins/lookup_plugins/oo_option.py oo_option.py
+popd
# openshift-ansible-filter-plugins install
cp -rp filter_plugins %{buildroot}%{_datadir}/ansible_plugins/
@@ -263,6 +270,540 @@ Atomic OpenShift Utilities includes
%changelog
+* Fri Mar 17 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.3-1
+- enable docker excluder since the time it is installed (jchaloup@redhat.com)
+
+* Thu Mar 16 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.2-1
+- enable excluders during node/master scaling up (jchaloup@redhat.com)
+- Fixing variable naming for 35 scoping. (kwoodson@redhat.com)
+- Fix get_router_replicas infrastructure node count. (abutcher@redhat.com)
+- Fix containerized openvswitch race (sdodson@redhat.com)
+
+* Thu Mar 16 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.1-1
+- Bump version to 3.6.0 (smunilla@redhat.com)
+- Improve CONTRIBUTING guide with testing tricks (rhcarvalho@gmail.com)
+- Update versions in example inventories (sdodson@redhat.com)
+- Only call excluder playbooks on masters and nodes (sdodson@redhat.com)
+- Since we've decided that we're no longer paying attention to current status
+ remove this as it was toggling things (sdodson@redhat.com)
+- Remove travis notifications (jdetiber@redhat.com)
+- Removing dependency on master facts for master_public_url default
+ (ewolinet@redhat.com)
+- don't assume openshift_upgrade_target is in a form d.d (jchaloup@redhat.com)
+- Cherry picked from #3657 (ewolinet@redhat.com)
+- Revert "Enable docker during installation and upgrade by default"
+ (skuznets@redhat.com)
+- Nuage service account handling by single master
+ (vishal.patil@nuagenetworks.net)
+- Add router svcacct cluster-reader role (rteague@redhat.com)
+- Cherry picking from #3644 (ewolinet@redhat.com)
+- Revert module_utils six for openshift_health_checker (jdetiber@redhat.com)
+- Refactor and remove openshift_serviceaccount (rteague@redhat.com)
+- Fix typo (sdodson@redhat.com)
+- Force to use TLSv1.2 (related to https://github.com/openshift/openshift-
+ ansible/pull/2707) (olivier@openkumo.fr)
+- Raise on dry-run failures. (kwoodson@redhat.com)
+- validate excluders on non-atomic hosts only (jchaloup@redhat.com)
+- enable docker excluder since the time it is installed (jchaloup@redhat.com)
+- cherry picking from #3621 #3614 #3627 (ewolinet@redhat.com)
+- Renaming oadm_manage_node to oc_adm_manage_node (rteague@redhat.com)
+- add 'hawkular/metrics' when updating config (jcantril@redhat.com)
+- update all the masters (jcantril@redhat.com)
+- bug 1430661. Update masterConfig metricsPublicURL on install
+ (jcantril@redhat.com)
+- nuage: Move role back to config (smilner@redhat.com)
+- Fix incorrect comparison when detecting petsets (tbielawa@redhat.com)
+- Removed unused, unwanted, incorrectly committed code. (kwoodson@redhat.com)
+- Minor updates to README_CONTAINER_IMAGE.md (pep@redhat.com)
+- Fix references to openshift_set_node_ip in inventory examples
+ (gskgoskk@gmail.com)
+- Bug 1428711 - [IntService_public_324] ES pod is unable to read
+ searchguard.truststore after upgarde logging from 3.3.1 to 3.5.0
+ (rmeggins@redhat.com)
+- bug 1428249. Use ES hostmount storage if it exists (jcantril@redhat.com)
+- Use ansible.compat.six where possible (jdetiber@redhat.com)
+- Remove debug task (tbielawa@redhat.com)
+- Use six from ansible.module_utils for remote hosts (jdetiber@redhat.com)
+- re-enable excluders if they are enabled after openshift version detection
+ (jchaloup@redhat.com)
+- Allow overriding minTLSVersion and cipherSuites (meggen@redhat.com)
+- extend the excluders to containerized deployment (jchaloup@redhat.com)
+- Fixing the way policies are found. The old method was unreliable. This
+ method searches all and matches on properties. (kwoodson@redhat.com)
+- openshift_excluders depends on openshift_repos (sdodson@redhat.com)
+- add ability to specify an etcd version (mmckinst@umich.edu)
+- Lowering test coverage percentage. (kwoodson@redhat.com)
+- Removing ordereddict. Replaced with sorted keys. (kwoodson@redhat.com)
+- New role (tbielawa@redhat.com)
+- Fixed for linting. (kwoodson@redhat.com)
+- enable excluders by default (jchaloup@redhat.com)
+- ignore the docker excluder status if it is not enabled by a user
+ (jchaloup@redhat.com)
+- Fix pylint/pyflakes errors on master (sdodson@redhat.com)
+- Identify PetSets in 3.4 clusters and fail if any are detected
+ (tbielawa@redhat.com)
+- More logging fixes (ewolinet@redhat.com)
+- Fix for issue 3541 (srampal@cisco.com)
+- Fix to OpenshiftCLIConfig to support an ordereddict. This was breaking test
+ cases. (kwoodson@redhat.com)
+- - update excluders to latest, in non-upgrade scenarios do not update - check
+ both available excluder versions are at most of upgrade target version - get
+ excluder status through status command - make excluders enablement
+ configurable (jchaloup@redhat.com)
+- Adding scripts for building and pushing images (bleanhar@redhat.com)
+- Adding test_oc_adm_router. (kwoodson@redhat.com)
+- Loosely couple docker to iptables service (rteague@redhat.com)
+- Generic message directing people to contact support (sdodson@redhat.com)
+- Fixing plugin, nodeselectors, and secret pull check (ewolinet@redhat.com)
+- Adding into the origin inventory doc. (kwoodson@redhat.com)
+- Add oc_objectvalidator to upgrade check (sdodson@redhat.com)
+- Augmenting documentation for router sharding. (kwoodson@redhat.com)
+- Adding router test. (kwoodson@redhat.com)
+- openshift_facts: ensure system containers deps are installed
+ (gscrivan@redhat.com)
+- Preserve order of Docker registries (eric.mountain@amadeus.com)
+- Updating metrics defaults (ewolinet@redhat.com)
+- Enable coveralls.io (jdetiber@redhat.com)
+- Fix indentation of run_once (sdodson@redhat.com)
+- Update docs for test consolidation and remove the Makefile
+ (jdetiber@redhat.com)
+- Consolidate root/utils tests (jdetiber@redhat.com)
+- Remove dummy setup/teardown methods (rhcarvalho@gmail.com)
+- Clean up test files (rhcarvalho@gmail.com)
+- Remove commented-out test code (rhcarvalho@gmail.com)
+- Make generic OCObjectValidator from OCSDNValidator (mkhan@redhat.com)
+- logging needs openshift_master_facts before openshift_facts
+ (rmeggins@redhat.com)
+- separate out test tool configs from setup.cfg (jdetiber@redhat.com)
+- Dockerfile and docs to run containerized playbooks (pep@redhat.com)
+- Lower test coverage percentage. (kwoodson@redhat.com)
+- Mock runs differntly on travis. Fix the mock test params to be ANY.
+ (kwoodson@redhat.com)
+- Fixed the none namespace. Fixed tests with latest loc_oc_binary call.
+ (kwoodson@redhat.com)
+- Updating the namespace param to None. (kwoodson@redhat.com)
+- Regenerated code with latest yedit changes. (kwoodson@redhat.com)
+- Fixed tests to align with new naming. (kwoodson@redhat.com)
+- Fixed docs. Added check for delete failures. Updated namespace to None.
+ (kwoodson@redhat.com)
+- Fixing linters (kwoodson@redhat.com)
+- Adding integration test. Fixed issue with node_selector.
+ (kwoodson@redhat.com)
+- Adding oc_project to lib_openshift. (kwoodson@redhat.com)
+- Remove old commented-out tests (rhcarvalho@gmail.com)
+- Remove redundant assertion (rhcarvalho@gmail.com)
+- Fix test (rhcarvalho@gmail.com)
+- Lint utils/test (rhcarvalho@gmail.com)
+- Rewrap long lines (rhcarvalho@gmail.com)
+- Remove unused argument (rhcarvalho@gmail.com)
+- Remove unused Makefile variables (rhcarvalho@gmail.com)
+- Adding some more logging defaults (ewolinet@redhat.com)
+- node/sdn: make /var/lib/cni persistent to ensure IPAM allocations stick
+ around across node restart (dcbw@redhat.com)
+- BZ1422348 - Don't install python-ruamel-yaml (sdodson@redhat.com)
+- Re-generate modules (sdodson@redhat.com)
+- Only set ownership to etcd for thirdparty datadir (sdodson@redhat.com)
+- Added ports. (kwoodson@redhat.com)
+- Fixed router name to produce 2nd router. (kwoodson@redhat.com)
+- Updated to work with an array of routers. (kwoodson@redhat.com)
+- Adding support for router sharding. (kwoodson@redhat.com)
+- Removing the openshift_master_facts dependency (ewolinet@redhat.com)
+- bug 1420256. Initialize openshift_logging pvc_facts to empty
+ (jcantril@redhat.com)
+- Add oc_adm_policy_user task cluster-role policy (rteague@redhat.com)
+- Correct config for hosted registry (rteague@redhat.com)
+- Fixing checkout for bindings with -binding suffix (jupierce@redhat.com)
+- Leave an empty contiv role directory (sdodson@redhat.com)
+- Updating stdout check for changed_when (ewolinet@redhat.com)
+- test fixes for openshift_certificates_expiry (jdetiber@redhat.com)
+- oadm_policy_group/adm_policy_user module (jupierce@redhat.com)
+- Fail on Atomic if docker is too old (smilner@redhat.com)
+- Remove contiv role and playbook from rpm packages (sdodson@redhat.com)
+- Resolving yammlint errors (ewolinet@redhat.com)
+- Fixed error handling when oc adm ca create-server-cert fails. Fixed a logic
+ error in secure. (kwoodson@redhat.com)
+- removing extra when condition (kwoodson@redhat.com)
+- Removing run_once. (kwoodson@redhat.com)
+- Adding the activeDeadlineSeconds. Removed debug. (kwoodson@redhat.com)
+- Separating routes so logic is simpler. (kwoodson@redhat.com)
+- Defaulting variables properly to avoid undefined route in dict error.
+ (kwoodson@redhat.com)
+- Add v1.3 FIS templates (sdodson@redhat.com)
+- v1.4 Add FIS templates (sdodson@redhat.com)
+- Add FIS templates (sdodson@redhat.com)
+- Removed duplicate host param. (kwoodson@redhat.com)
+- Fixed failures on create when objects exist. (kwoodson@redhat.com)
+- Add ca-bundle.crt to list of certs to synchronize. (abutcher@redhat.com)
+- Do not force custom ca cert deployment. (abutcher@redhat.com)
+- regenerate lib_openshift with yedit exception changes (jdiaz@redhat.com)
+- Adding changed_whens for role, rolebinding, and scc reconciliation based on
+ output from oadm policy command (ewolinet@redhat.com)
+- raise exceptions when walking through object path (jdiaz@redhat.com)
+- logging fluentd filter was renamed to viaq (rmeggins@redhat.com)
+- Add 'persistentVolumeClaim' to volume_info type (rteague@redhat.com)
+- Updating delete/recreate with replace --force. (kwoodson@redhat.com)
+- Fixed logic error. Ensure both svc and dc exist. (kwoodson@redhat.com)
+- Modified base debug statements. Fixed oc_secret debug/verbose flag. Added
+ reencrypt for route. (kwoodson@redhat.com)
+- Adding support for a route with certs and reencrypt. (kwoodson@redhat.com)
+- node: use the new oc_atomic_container module (gscrivan@redhat.com)
+- master: use the new oc_atomic_container module (gscrivan@redhat.com)
+- etcd: use the new oc_atomic_container module (gscrivan@redhat.com)
+- lib_openshift: new module atomic_container (gscrivan@redhat.com)
+- Combined (squashed) commit for all changes related to adding Contiv support
+ into Openshift Ansible. This is the first (beta) release of Contiv with
+ Openshift and is only supported for Openshift Origin + Bare metal deployments
+ at the time of this commit. Please refer to the Openshift and Contiv official
+ documentation for details of the level of support for different features and
+ modes of operation. (srampal@cisco.com)
+- Re-generate lib_openshift (sdodson@redhat.com)
+- Make s3_volume_mount available to set_fact call (smilner@redhat.com)
+- Correct fact creation for pvc (rteague@redhat.com)
+- [oc_obj] Move namespace argument to end of command. (abutcher@redhat.com)
+- Create hosted registry service (rteague@redhat.com)
+- Correct typo in haproxy router collection. (abutcher@redhat.com)
+- Fix issue #3505, add notes about origin upgrade versions support in BYO
+ upgrade README file (contact@stephane-klein.info)
+- Moving replica logic to filter_plugin to fix skipped task variable behavior.
+ (kwoodson@redhat.com)
+- install the latest excluders (jchaloup@redhat.com)
+- openshift_hosted: Update tasks to use oc_ modules (rteague@redhat.com)
+- Rebased. (kwoodson@redhat.com)
+- Fixed indentation (kwoodson@redhat.com)
+- Adding get_env_var to deploymentconfig. (kwoodson@redhat.com)
+- Fixed default variables. Added a fix to generated secret in env var.
+ (kwoodson@redhat.com)
+- Revert "Add centos paas sig common" (sdodson@redhat.com)
+- Fix Quick Installer failed due to a Python method failure
+ (tbielawa@redhat.com)
+- Removed JGroups cert and password generation. (juraci@kroehling.de)
+- Fix symlink to lookup_plugins/oo_option.py (jchaloup@redhat.com)
+- Use 2 and 3 friendly urlparse in oo_filters (smilner@redhat.com)
+- Update v1.5 content (sdodson@redhat.com)
+- Update v1.4 content (sdodson@redhat.com)
+- xPaaS ose-v1.3.6 (sdodson@redhat.com)
+- Prepare for origin moving to OCP version scheme (ccoleman@redhat.com)
+- initialize_openshift_version: handle excluder packages (gscrivan@redhat.com)
+- Add insecure edge termination policy for kibana. (whearn@redhat.com)
+- openshift_logging default to 2 replicas of primary shards
+ (jcantril@redhat.com)
+- Fixing doc for oc_adm_ca_server_cert. (kwoodson@redhat.com)
+- Convert selectattr tests to use 'match' (rteague@redhat.com)
+- Re-generate lib_openshift and lib_utils libraries (sdodson@redhat.com)
+- curator config must be in /etc/curator not /usr/curator (rmeggins@redhat.com)
+- Updated for pylint. Fixed create doc. (kwoodson@redhat.com)
+- Attempt to handle router preparation errors. (kwoodson@redhat.com)
+- Fixing the generate tox tests. (kwoodson@redhat.com)
+- BZ1414276 - Quote ansible_ssh_user when determining group id
+ (sdodson@redhat.com)
+- Moving import to local class. (kwoodson@redhat.com)
+- Added required_together. Added two minor bug fixes for when data is not
+ passed. (kwoodson@redhat.com)
+- fix up ruamel.yaml/pyyaml no-member lint errors (jdetiber@redhat.com)
+- Renamed NotContainerized to NotContainerizedMixin and dropped no-member
+ (smilner@redhat.com)
+- Removed unrequired no-members from yedit and generated code
+ (smilner@redhat.com)
+- Removing reference to oadm. Moved parameter under general params.
+ (kwoodson@redhat.com)
+- adding tag to update_master_config (ewolinet@redhat.com)
+- CloudFront oc_secret contents should be a list (smilner@redhat.com)
+- lib_openshift oc file lookup improvements (jdetiber@redhat.com)
+- roles/lib_openshift: Handle /usr/local/bin/oc with sudo (walters@verbum.org)
+- if no key, cert, cacert, or default_cert is passed then do not pass to oc
+ (kwoodson@redhat.com)
+- Added backup feature. Fixed a bug with reading the certificate and verifying
+ names. Added force option. (kwoodson@redhat.com)
+- Add SDNValidator Module (mkhan@redhat.com)
+- bug 1425321. Default the master api port based on the facts
+ (jcantril@redhat.com)
+- Bug 1420219 - No log entry can be found in Kibana UI after deploying logging
+ stacks with ansible (rmeggins@redhat.com)
+- Address cert expiry parsing review comments (tbielawa@redhat.com)
+- Fix typo (rhcarvalho@gmail.com)
+- Update link to project homepage (rhcarvalho@gmail.com)
+- Implement fake openssl cert classes (tbielawa@redhat.com)
+- Removed oadm_ references in doc. (kwoodson@redhat.com)
+- Remove unused plays (jhadvig@redhat.com)
+- Remove pytest-related dependencies from setup.py (rhcarvalho@gmail.com)
+- Added copy support when modifying cert and key on existence
+ (kwoodson@redhat.com)
+- Small spacing fix. (kwoodson@redhat.com)
+- Updated doc and defined defaults for signer_* (kwoodson@redhat.com)
+- Removed unused code. Made tests executable. (kwoodson@redhat.com)
+- Removing cmd, fixed docs and comments. (kwoodson@redhat.com)
+- Rename of oadm_ca to oc_adm_ca. Decided to whittle down to the direct call,
+ server_cert. (kwoodson@redhat.com)
+- Fixing doc. (kwoodson@redhat.com)
+- Adding oadm_ca to lib_openshift. (kwoodson@redhat.com)
+- Fixing docs. Fixed default_cert suggestion. (kwoodson@redhat.com)
+- Renamed modules, fixed docs, renamed variables, and cleaned up logic.
+ (kwoodson@redhat.com)
+- Renaming registry and router roles to oc_adm_ (kwoodson@redhat.com)
+- Fixing registry doc and suggestions. (kwoodson@redhat.com)
+- Adding router and registry to lib_openshift. (kwoodson@redhat.com)
+- bug 142026. Ensure Ops PVC prefix are initialized to empty when ops e…
+ nabled (jcantril@redhat.com)
+- Reverting logic for verify api handler to be uniform with other ways we
+ verify, will be uniformly updated in future (ewolinet@redhat.com)
+- bug 1417261. Quote name and secrets in logging templates
+ (jcantril@redhat.com)
+- openshift_facts: handle 'latest' version (gscrivan@redhat.com)
+- Surrounding node selector values with quotes (ewolinet@redhat.com)
+- Raise the bar on coverage requirements (rhcarvalho@gmail.com)
+- Accept extra positional arguments in tox (rhcarvalho@gmail.com)
+- Replace nose with pytest (utils) (rhcarvalho@gmail.com)
+- Clean up utils/README.md (rhcarvalho@gmail.com)
+- Replace nose with pytest (rhcarvalho@gmail.com)
+- Extract assertion common to all tests as function (rhcarvalho@gmail.com)
+- Replace nose yield-style tests w/ pytest fixtures (rhcarvalho@gmail.com)
+- Configure pytest to run tests and coverage (rhcarvalho@gmail.com)
+- Fix validation of generated code (rhcarvalho@gmail.com)
+- Make tests run with either nosetests or pytest (rhcarvalho@gmail.com)
+- Replace assert_equal with plain assert (rhcarvalho@gmail.com)
+- Make usage of short_version/release consistent (rhcarvalho@gmail.com)
+- Reorganize tests and helper functions logically (rhcarvalho@gmail.com)
+- Remove test duplication (rhcarvalho@gmail.com)
+- Move similar test cases together (rhcarvalho@gmail.com)
+- Insert paths in the second position of sys.path (rhcarvalho@gmail.com)
+- Rename test for consistency (rhcarvalho@gmail.com)
+- Replace has_key in new modules (smilner@redhat.com)
+- Fix symlink to filter_plugins/oo_filters.py (jchaloup@redhat.com)
+- Correct logic test for running pods (rteague@redhat.com)
+- Temporarily lower the bar for minimum coverage (rhcarvalho@gmail.com)
+- Unset exec bit in tests, add missing requirements (jdetiber@redhat.com)
+- Include missing unit tests to test runner config (rhcarvalho@gmail.com)
+- Fix tests on Python 3 (rhcarvalho@gmail.com)
+- Remove dead code in installer (rhcarvalho@gmail.com)
+- Remove dead code (rhcarvalho@gmail.com)
+- Document how to find dead Python code (rhcarvalho@gmail.com)
+- updating until statments on uri module for api verification
+ (ewolinet@redhat.com)
+- add dependency on openshift_repos (sdodson@redhat.com)
+- Fixing a bug by removing default debug (kwoodson@redhat.com)
+- Updating to use uri module instead (ewolinet@redhat.com)
+- Updating node playbooks to use oc_obj (rteague@redhat.com)
+- Add centos paas sig common (sdodson@redhat.com)
+- Disentangle openshift_repos from openshift_facts (sdodson@redhat.com)
+- Adding missing handler to resolve error that it was not found
+ (ewolinet@redhat.com)
+- String compatibility for python2,3 (kwoodson@redhat.com)
+- Fix indenting/ordering in router cert redeploy (sdodson@redhat.com)
+- post_control_plane.yml: don't fail on grep (gscrivan@redhat.com)
+- facts/main: Require Python 3 for Fedora, Python 2 everywhere else
+ (walters@verbum.org)
+- Fix typo, add symlinks for roles (sdodson@redhat.com)
+- Resolve deprecation warning (rteague@redhat.com)
+- Revert temporary hack to skip router/registry upgrade. (dgoodwin@redhat.com)
+- Don't attempt to install python-ruamel-yaml on atomic (sdodson@redhat.com)
+- Pleasing the linting gods. (kwoodson@redhat.com)
+- Fixed tests for pyyaml vs ruamel. Added import logic. Fixed safe load.
+ (kwoodson@redhat.com)
+- update example templates+imagestreams (bparees@redhat.com)
+- Adding fallback support for pyyaml. (kwoodson@redhat.com)
+- bug 1420217. Default ES memory to be compariable to 3.4 deployer
+ (jcantril@redhat.com)
+- Register cloudfront privkey when required (smilner@redhat.com)
+- initialize oo_nodes_to_upgrade group when running control plane upgrade only
+ (jchaloup@redhat.com)
+- adding some quotes for safety (ewolinet@redhat.com)
+- Revert "Add block+when skip to `openshift_facts` tasks" (abutcher@redhat.com)
+- Add missing full hostname for the Hawkular Metrics certificate (BZ1421060)
+ Fix issue where the signer certificate's name is static, preventing
+ redeployments from being acceptable. (mwringe@redhat.com)
+- fixing use of oc_scale module (ewolinet@redhat.com)
+- fixing default for logging (ewolinet@redhat.com)
+- Fix some lint (jdetiber@redhat.com)
+- Fixed issue where upgrade fails when using daemon sets (e.g. aggregated
+ logging) (adbaldi+ghub@gmail.com)
+- upgrades: fix path to disable_excluder.yml (jchaloup@redhat.com)
+- Add upgrade job step after the entire upgrade performs (maszulik@redhat.com)
+- Ansible Lint cleanup and making filter/lookup plugins used by
+ openshift_master_facts available within the role (jdetiber@redhat.com)
+- Update variant_version (smilner@redhat.com)
+- Add block+when skip to `openshift_facts` tasks (tbielawa@redhat.com)
+- Trying to fix up/audit note some changes (tbielawa@redhat.com)
+- updating defaults for logging and metrics roles (ewolinet@redhat.com)
+- Fix logic for checking docker-registry (rteague@redhat.com)
+- node, vars/main.yml: define l_is_ha and l_is_same_version
+ (gscrivan@redhat.com)
+- Modify playbooks to use oc_obj module (rteague@redhat.com)
+- master, vars/main.yml: define l_is_ha and l_is_same_version
+ (gscrivan@redhat.com)
+- oc route commands now using the oc_route module (smilner@redhat.com)
+- Modify playbooks to use oc_label module (rteague@redhat.com)
+- Fix cases where child classes override OpenShiftCLI values
+ (jdetiber@redhat.com)
+- BZ1421860: increase Heapster's metric resolution to 30s (mwringe@redhat.com)
+- BZ1421834: increase the Heapster metric resolution to 30s
+ (mwringe@redhat.com)
+- Fix Bug 1419654 Remove legacy config_base fallback to /etc/openshift
+ (sdodson@redhat.com)
+- Modify playbooks to use oadm_manage_node module (rteague@redhat.com)
+- Removing trailing spaces (esauer@redhat.com)
+- Removed adhoc s3_registry (smilner@redhat.com)
+- replace 'oc service' command with its lib_openshift equivalent
+ (jchaloup@redhat.com)
+- Making router pods scale with infra nodes (esauer@redhat.com)
+- Provisioning of nfs share and PV for logging ops (efreiber@redhat.com)
+- Add libselinux-python dependency for localhost (sdodson@redhat.com)
+- oc secrets now done via oc_secret module (smilner@redhat.com)
+- More fixes for reboot/wait for hosts. (dgoodwin@redhat.com)
+- fix openshift_logging where defaults filter needs quoting
+ (jcantril@redhat.com)
+- Do not hard code package names (rhcarvalho@gmail.com)
+- Refactor code to access values from task_vars (rhcarvalho@gmail.com)
+- oc serviceaccount now done via oc_serviceaccount module (smilner@redhat.com)
+- bug 1420229. Bounce metrics components to recognize changes on updates or
+ upgrades (jcantril@redhat.com)
+- node: simplify when conditionals (gscrivan@redhat.com)
+- openvswitch: simplify when conditionals (gscrivan@redhat.com)
+- uninstall: delete master-api and master-controllers (gscrivan@redhat.com)
+- master: support HA deployments with system containers (gscrivan@redhat.com)
+- Ensure etcd client certs are regenerated with embedded etcd.
+ (abutcher@redhat.com)
+- bug 1420425. Allow setting of public facing certs for kibana in
+ openshift_logging role (jcantril@redhat.com)
+- bug 1399523. Ops pvc should have different prefix from non-ops for
+ openshift_logging (jcantril@redhat.com)
+- Include rpm/git paths in expiry README. (tbielawa@redhat.com)
+- Fixing docs, linting, and comments. (kwoodson@redhat.com)
+- fix bug 1420204. Default openshift_logging_use_journal to empty so fluentd
+ detects and is consistent with deployer (jcantril@redhat.com)
+- Let pylint use as many CPUs as available (rhcarvalho@gmail.com)
+- Add note about extraneous virtualenvs (rhcarvalho@gmail.com)
+- Document how to create new checks (rhcarvalho@gmail.com)
+- Introduce tag notation for checks (rhcarvalho@gmail.com)
+- Replace multi-role checks with action plugin (rhcarvalho@gmail.com)
+- Removing the /usr/bin/ansible-playbook dependency in in the spec file
+ (mwoodson@redhat.com)
+- use the correct name for the ruamel-yaml python module (jchaloup@redhat.com)
+- Reword module documentation (rhcarvalho@gmail.com)
+- Separate import groups with a blank line (rhcarvalho@gmail.com)
+- Remove commented-out debugging code (rhcarvalho@gmail.com)
+- Replace service account secrets handling with oc_serviceaccount_secret module
+ (jchaloup@redhat.com)
+- node: refactor Docker container tasks in a block (gscrivan@redhat.com)
+- etcd: use as system container (gscrivan@redhat.com)
+- Implement uninstall for system containers (gscrivan@redhat.com)
+- system-containers: implement idempotent update (gscrivan@redhat.com)
+- atomic-openshift: install as a system container (gscrivan@redhat.com)
+- make sure cluster_size is an int for arith. ops (rmeggins@redhat.com)
+- Bug 1420234 - illegal_argument_exception in Kibana UI. (rmeggins@redhat.com)
+- bug 1420538. Allow users to set supplementalGroup for Cassandra
+ (jcantril@redhat.com)
+- Document openshift_cockpit_deployer_prefix and add
+ openshift_cockpit_deployer_version (sdodson@redhat.com)
+- Make the cert expiry playbooks runnable (tbielawa@redhat.com)
+- Ensure embedded etcd config uses CA bundle. (abutcher@redhat.com)
+- bug 1420684. On logging upgrade use the correct value for namespace
+ (jcantril@redhat.com)
+- Fixing docs. (kwoodson@redhat.com)
+- bug 1419962. fix openshift_metrics pwd issue after reinstall where cassandra
+ has incorrect pwd exception (jcantril@redhat.com)
+- Fixing for linters. (kwoodson@redhat.com)
+- Adding test cases. (kwoodson@redhat.com)
+- Fixing docs. (kwoodson@redhat.com)
+- oc process (ihorvath@redhat.com)
+- node: ensure conntrack-tools is installed (gscrivan@redhat.com)
+- Updating defaults to pull from previously defined variable names used in
+ playbooks (ewolinet@redhat.com)
+- Pleasing the linting bot. (kwoodson@redhat.com)
+- fixup! master: latest use same predicates as last version
+ (gscrivan@redhat.com)
+- fixup! master: latest use same priorities as last version
+ (gscrivan@redhat.com)
+- Adding integration tests. (kwoodson@redhat.com)
+- Set image change triggers to auto=true for OCP 3.4 - for v1.5
+ (simaishi@redhat.com)
+- Reference class instead of self.__class__ within super constructor to avoid
+ calling self forever. (abutcher@redhat.com)
+- Adding oc_env to lib_openshift. (kwoodson@redhat.com)
+- Fixing for flake8 spacing. (kwoodson@redhat.com)
+- Fixing tests for linters. (kwoodson@redhat.com)
+- Adding port support for route. (kwoodson@redhat.com)
+- use pvc_size instead of pv_size for openshift_metrics since the role creates
+ claims (jcantril@redhat.com)
+- Added temporary kubeconfig file. Fixed tests to coincide with tmpfile.
+ (kwoodson@redhat.com)
+- Set image change triggers to auto=true for OCP 3.4
+ (https://github.com/ManageIQ/manageiq-pods/pull/88) (simaishi@redhat.com)
+- fixes 1419839. Install only heapster for openshift_metrics when heapster
+ standalone flag is set (jcantril@redhat.com)
+- Adding code to copy kubeconfig before running oc commands.
+ (kwoodson@redhat.com)
+- master: latest use same predicates as last version (gscrivan@redhat.com)
+- master: latest use same priorities as last version (gscrivan@redhat.com)
+- Changed lib_openshift to use real temporary files. (twiest@redhat.com)
+- Fixed ansible module unit and integration tests and added runners.
+ (twiest@redhat.com)
+- Moving to ansible variable. (kwoodson@redhat.com)
+- Specifying port for wait_for call. (kwoodson@redhat.com)
+- Reverting commit 3257 and renaming master_url to openshift_logging_master_url
+ (ewolinet@redhat.com)
+- [openshift_ca] Reference client binary from openshift_ca_host.
+ (abutcher@redhat.com)
+- Fix playbooks/byo/openshift_facts.yml include path (sdodson@redhat.com)
+- Add missing symlink to roles (rhcarvalho@gmail.com)
+- Bump registry-console to 3.5 (sdodson@redhat.com)
+- Added oc_serviceaccount_secret to lib_openshift. (twiest@redhat.com)
+- fix 1406057. Allow openshift_metrics nodeselectors for components
+ (jcantril@redhat.com)
+- Use service annotations to redeploy router service serving cert signer cert.
+ (abutcher@redhat.com)
+- Move excluder disablement into control plane and node upgrade playbooks
+ (sdodson@redhat.com)
+- Add excluder management to upgrade and config playbooks (sdodson@redhat.com)
+- Add openshift_excluder role (sdodson@redhat.com)
+- Fix RHEL Subscribe std_include path (tbielawa@redhat.com)
+- Copies CloudFront pem file to registry hosts (smilner@redhat.com)
+- Remove legacy router/registry certs and client configs from synchronized
+ master certs. (abutcher@redhat.com)
+- Bump registry to 3.4 (sdodson@redhat.com)
+- Sync latest image stream content (sdodson@redhat.com)
+- Support latest for containerized version (gscrivan@redhat.com)
+- Ensure python2-ruamel-yaml is installed (sdodson@redhat.com)
+- openshift_logging link pull secret to serviceaccounts fix unlabel when
+ undeploying (jcantril@redhat.com)
+- fixes 1414625. Fix check of keytool in openshift_metrics role
+ (jcantril@redhat.com)
+- Doc enhancements. (kwoodson@redhat.com)
+- fixes 1417261. Points playbooks to the correct 3.5 roles for logging and
+ metrics (jcantril@redhat.com)
+- Change default docker log driver from json-file to journald.
+ (abutcher@redhat.com)
+- Add logic to verify patched version of Ansible (rteague@redhat.com)
+- Restructure certificate redeploy playbooks (abutcher@redhat.com)
+- Temporary hack to skip router/registry upgrade. (dgoodwin@redhat.com)
+- Fixing linters. (kwoodson@redhat.com)
+- run node upgrade if master is node as part of the control plan upgrade only
+ (jchaloup@redhat.com)
+- Appease yamllint (sdodson@redhat.com)
+- Adding include_role to block to resolve when eval (ewolinet@redhat.com)
+- Updating oc_apply to use command instead of shell (ewolinet@redhat.com)
+- Wrap openshift_hosted_logging include_role within a block.
+ (abutcher@redhat.com)
+- Adding unit test. Fixed redudant calls to get. (kwoodson@redhat.com)
+- Fixing doc and generating new label with updated base. (kwoodson@redhat.com)
+- oc_label ansible module (jdiaz@redhat.com)
+- Fixing copy pasta comments. Fixed required in docs. (kwoodson@redhat.com)
+- Fix openshift_hosted_logging bool typo. (abutcher@redhat.com)
+- Updating oc_apply changed_when conditions, fixing filter usage for
+ openshift_hosted_logging playbook (ewolinet@redhat.com)
+- Add default ansible.cfg file (rteague@redhat.com)
+- Move current node upgrade tasks under openshift_node_upgrade role
+ (jchaloup@redhat.com)
+- Fix host when waiting for a master system restart. (dgoodwin@redhat.com)
+- Adding bool filter to when openshift_logging_use_ops evals and updating
+ oc_apply to handle trying to update immutable fields (ewolinet@redhat.com)
+- Fixing for tox tests. (flake8|pylint) (kwoodson@redhat.com)
+- Adding unit test for oc_service. Added environment fix for non-standard oc
+ installs. (kwoodson@redhat.com)
+- Adding integration tests. (kwoodson@redhat.com)
+- Adding oc_service to lib_openshift. (kwoodson@redhat.com)
+- Sync etcd ca certs from etcd_ca_host to other etcd hosts
+ (jawed.khelil@amadeus.com)
+
* Tue Jan 31 2017 Scott Dodson <sdodson@redhat.com> 3.5.3-1
- Adding bool filter to ensure that we correctly set ops host for fluentd
(ewolinet@redhat.com)
diff --git a/playbooks/adhoc/contiv/delete_contiv.yml b/playbooks/adhoc/contiv/delete_contiv.yml
new file mode 100644
index 000000000..91948c72e
--- /dev/null
+++ b/playbooks/adhoc/contiv/delete_contiv.yml
@@ -0,0 +1,29 @@
+---
+- name: delete contiv
+ hosts: all
+ gather_facts: False
+ tasks:
+ - systemd:
+ name: "{{ item }}"
+ state: stopped
+ with_items:
+ - contiv-etcd
+ - netmaster
+ - netplugin
+ - openvswitch
+ ignore_errors: True
+ - file:
+ path: "{{ item }}"
+ state: absent
+ with_items:
+ - /opt/cni
+ - /opt/contiv
+ - /etc/systemd/system/netmaster.service
+ - /etc/systemd/system/netplugin.service
+ - /etc/systemd/system/contiv-etcd.service
+ - /etc/systemd/system/contiv-etcd.service.d
+ - /var/lib/contiv-etcd
+ - /etc/default/netmaster
+ - /etc/default/netplugin
+ - /etc/openvswitch/conf.db
+ - command: systemctl daemon-reload
diff --git a/playbooks/byo/openshift-cluster/upgrades/README.md b/playbooks/byo/openshift-cluster/upgrades/README.md
index e5b80a9b4..0425ba518 100644
--- a/playbooks/byo/openshift-cluster/upgrades/README.md
+++ b/playbooks/byo/openshift-cluster/upgrades/README.md
@@ -4,5 +4,5 @@ cluster. Additional notes for the associated upgrade playbooks are
provided in their respective directories.
# Upgrades available
-- [OpenShift Enterprise 3.4 to 3.5](v3_5/README.md)
-- [OpenShift Enterprise 3.3 to 3.4](v3_4/README.md)
+- [OpenShift Enterprise 3.4 to 3.5](v3_5/README.md) (works also to upgrade OpenShift origin from 1.4.x to 1.5.x)
+- [OpenShift Enterprise 3.3 to 3.4](v3_4/README.md) (works also to upgrade OpenShift origin from 1.3.x to 1.4.x)
diff --git a/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml
index 4ee6afe2a..304559f6e 100644
--- a/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml
@@ -28,7 +28,7 @@
tasks:
- name: Mark node unschedulable
- oadm_manage_node:
+ oc_adm_manage_node:
node: "{{ openshift.node.nodename | lower }}"
schedulable: False
delegate_to: "{{ groups.oo_first_master.0 }}"
@@ -51,7 +51,7 @@
when: l_docker_upgrade is defined and l_docker_upgrade | bool
- name: Set node schedulability
- oadm_manage_node:
+ oc_adm_manage_node:
node: "{{ openshift.node.nodename | lower }}"
schedulable: True
delegate_to: "{{ groups.oo_first_master.0 }}"
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml
index b1510e062..d268850d8 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml
@@ -46,7 +46,7 @@
tags:
- pre_upgrade
-- include: ../../../../common/openshift-cluster/disable_excluder.yml
+- include: ../../../../common/openshift-cluster/upgrades/disable_excluder.yml
tags:
- pre_upgrade
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml
index b61d9e58a..d11e51640 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml
@@ -54,7 +54,7 @@
tags:
- pre_upgrade
-- include: ../../../../common/openshift-cluster/disable_excluder.yml
+- include: ../../../../common/openshift-cluster/upgrades/disable_excluder.yml
tags:
- pre_upgrade
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml
index f0b2a2c75..5a0f143ac 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml
@@ -47,7 +47,7 @@
tags:
- pre_upgrade
-- include: ../../../../common/openshift-cluster/disable_excluder.yml
+- include: ../../../../common/openshift-cluster/upgrades/disable_excluder.yml
tags:
- pre_upgrade
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade.yml
index 82a1d0935..25d8cd2ba 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade.yml
@@ -46,7 +46,7 @@
tags:
- pre_upgrade
-- include: ../../../../common/openshift-cluster/disable_excluder.yml
+- include: ../../../../common/openshift-cluster/upgrades/disable_excluder.yml
tags:
- pre_upgrade
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml b/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml
index 7ae1b3e6e..d52f3c111 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml
@@ -54,7 +54,7 @@
tags:
- pre_upgrade
-- include: ../../../../common/openshift-cluster/disable_excluder.yml
+- include: ../../../../common/openshift-cluster/upgrades/disable_excluder.yml
tags:
- pre_upgrade
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml b/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml
index ec63ea60e..07c734a40 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml
@@ -47,7 +47,7 @@
tags:
- pre_upgrade
-- include: ../../../../common/openshift-cluster/disable_excluder.yml
+- include: ../../../../common/openshift-cluster/upgrades/disable_excluder.yml
tags:
- pre_upgrade
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade.yml
index 69cabcd33..86f5a36ca 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade.yml
@@ -46,10 +46,12 @@
tags:
- pre_upgrade
-- include: ../../../../common/openshift-cluster/disable_excluder.yml
+- include: ../../../../common/openshift-cluster/upgrades/disable_excluder.yml
tags:
- pre_upgrade
+# Note: During upgrade the openshift excluder is not unexcluded inside the initialize_openshift_version.yml play.
+# So it is necassary to run the play after running disable_excluder.yml.
- include: ../../../../common/openshift-cluster/initialize_openshift_version.yml
tags:
- pre_upgrade
@@ -82,6 +84,10 @@
tags:
- pre_upgrade
+- include: ../../../../common/openshift-cluster/upgrades/v3_5/validator.yml
+ tags:
+ - pre_upgrade
+
- include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml
tags:
- pre_upgrade
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml b/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml
index 719057d2b..a2f1cd2b1 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml
@@ -54,7 +54,7 @@
tags:
- pre_upgrade
-- include: ../../../../common/openshift-cluster/disable_excluder.yml
+- include: ../../../../common/openshift-cluster/upgrades/disable_excluder.yml
tags:
- pre_upgrade
@@ -90,6 +90,10 @@
tags:
- pre_upgrade
+- include: ../../../../common/openshift-cluster/upgrades/v3_5/validator.yml
+ tags:
+ - pre_upgrade
+
- include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml
tags:
- pre_upgrade
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml b/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml
index 259be6f8e..f858de3d5 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml
@@ -47,7 +47,7 @@
tags:
- pre_upgrade
-- include: ../../../../common/openshift-cluster/disable_excluder.yml
+- include: ../../../../common/openshift-cluster/upgrades/disable_excluder.yml
tags:
- pre_upgrade
diff --git a/playbooks/common/openshift-cluster/config.yml b/playbooks/common/openshift-cluster/config.yml
index 113b401f9..ff4c4b0d7 100644
--- a/playbooks/common/openshift-cluster/config.yml
+++ b/playbooks/common/openshift-cluster/config.yml
@@ -27,6 +27,9 @@
when: openshift_docker_selinux_enabled is not defined
- include: disable_excluder.yml
+ vars:
+ # the excluders needs to be disabled no matter what status says
+ with_status_check: false
tags:
- always
@@ -57,3 +60,7 @@
- include: openshift_hosted.yml
tags:
- hosted
+
+- include: reset_excluder.yml
+ tags:
+ - always
diff --git a/playbooks/common/openshift-cluster/disable_excluder.yml b/playbooks/common/openshift-cluster/disable_excluder.yml
index eb146bab8..68bffb5f5 100644
--- a/playbooks/common/openshift-cluster/disable_excluder.yml
+++ b/playbooks/common/openshift-cluster/disable_excluder.yml
@@ -1,11 +1,17 @@
---
- name: Record excluder state and disable
- hosts: l_oo_all_hosts
+ hosts: oo_masters_to_config:oo_nodes_to_config
gather_facts: no
tasks:
+
+ # During installation the excluders are installed with present state.
+ # So no pre-validation check here as the excluders are either to be installed (present = latest)
+ # or they are not going to be updated if already installed
+
+ # disable excluders based on their status
- include_role:
name: openshift_excluder
- tasks_from: status
- - include_role:
- name: openshift_excluder
- tasks_from: unexclude
+ tasks_from: disable
+ vars:
+ openshift_excluder_package_state: present
+ docker_excluder_package_state: present
diff --git a/playbooks/common/openshift-cluster/initialize_openshift_version.yml b/playbooks/common/openshift-cluster/initialize_openshift_version.yml
index 07b38920f..1f74e929f 100644
--- a/playbooks/common/openshift-cluster/initialize_openshift_version.yml
+++ b/playbooks/common/openshift-cluster/initialize_openshift_version.yml
@@ -18,6 +18,18 @@
msg: Incompatible versions of yum and subscription-manager found. You may need to update yum and yum-utils.
when: "not openshift.common.is_atomic | bool and 'Plugin \"search-disabled-repos\" requires API 2.7. Supported API is 2.6.' in yum_ver_test.stdout"
+# TODO(jchaloup): find a different way how to make repoquery --qf '%version` atomic-openshift work without disabling the excluders
+- include: disable_excluder.yml
+ vars:
+ # the excluders needs to be disabled no matter what status says
+ with_status_check: false
+ # Only openshift excluder needs to be temporarily disabled
+ # So ignore the docker one
+ enable_docker_excluder: false
+ tags:
+ - always
+ when: openshift_upgrade_target is not defined
+
- name: Determine openshift_version to configure on first master
hosts: oo_first_master
roles:
@@ -32,3 +44,13 @@
openshift_version: "{{ hostvars[groups.oo_first_master.0].openshift_version }}"
roles:
- openshift_version
+
+ # Re-enable excluders if they are meant to be enabled (and only during installation, upgrade disables the excluders before this play)
+- include: reset_excluder.yml
+ vars:
+ # Only openshift excluder needs to be re-enabled
+ # So ignore the docker one
+ enable_docker_excluder: false
+ tags:
+ - always
+ when: openshift_upgrade_target is not defined
diff --git a/playbooks/common/openshift-cluster/openshift_hosted.yml b/playbooks/common/openshift-cluster/openshift_hosted.yml
index 06cda36a5..5db71b857 100644
--- a/playbooks/common/openshift-cluster/openshift_hosted.yml
+++ b/playbooks/common/openshift-cluster/openshift_hosted.yml
@@ -53,6 +53,8 @@
pre_tasks:
- set_fact:
openshift_logging_kibana_hostname: "{{ openshift_hosted_logging_hostname | default('kibana.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true))) }}"
+ - set_fact:
+ openshift_metrics_hawkular_hostname: "{{ g_metrics_hostname | default('hawkular-metrics.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true))) }}"
tasks:
- block:
@@ -60,3 +62,9 @@
name: openshift_logging
tasks_from: update_master_config
when: openshift_hosted_logging_deploy | default(false) | bool
+
+ - block:
+ - include_role:
+ name: openshift_metrics
+ tasks_from: update_master_config
+ when: openshift_hosted_metrics_deploy | default(false) | bool
diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/ca.yml b/playbooks/common/openshift-cluster/redeploy-certificates/ca.yml
index 9d4d3ea26..cbb4a2434 100644
--- a/playbooks/common/openshift-cluster/redeploy-certificates/ca.yml
+++ b/playbooks/common/openshift-cluster/redeploy-certificates/ca.yml
@@ -31,7 +31,7 @@
- name: Generate new etcd CA
hosts: oo_first_etcd
roles:
- - role: etcd_ca
+ - role: openshift_etcd_ca
etcd_peers: "{{ groups.oo_etcd_to_config | default([], true) }}"
etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
etcd_certificates_etcd_hosts: "{{ groups.oo_etcd_to_config | default([], true) }}"
@@ -294,7 +294,7 @@
client_path: "{{ openshift.common.config_base }}/master/admin.kubeconfig"
ca_path: "{{ openshift.common.config_base }}/master/ca-bundle.crt"
- name: Lookup default group for ansible_ssh_user
- command: "/usr/bin/id -g {{ ansible_ssh_user }}"
+ command: "/usr/bin/id -g {{ ansible_ssh_user | quote }}"
changed_when: false
register: _ansible_ssh_user_gid
- set_fact:
diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/router.yml b/playbooks/common/openshift-cluster/redeploy-certificates/router.yml
index 35eedd5ee..a7b614341 100644
--- a/playbooks/common/openshift-cluster/redeploy-certificates/router.yml
+++ b/playbooks/common/openshift-cluster/redeploy-certificates/router.yml
@@ -51,7 +51,7 @@
name: router-certs
namespace: default
state: absent
- run_once: true
+ run_once: true
- name: Remove router service annotations
command: >
diff --git a/playbooks/common/openshift-cluster/reset_excluder.yml b/playbooks/common/openshift-cluster/reset_excluder.yml
index fe86f4c23..eaa8ce39c 100644
--- a/playbooks/common/openshift-cluster/reset_excluder.yml
+++ b/playbooks/common/openshift-cluster/reset_excluder.yml
@@ -1,8 +1,8 @@
---
- name: Re-enable excluder if it was previously enabled
- hosts: l_oo_all_hosts
+ hosts: oo_masters_to_config:oo_nodes_to_config
gather_facts: no
tasks:
- include_role:
name: openshift_excluder
- tasks_from: reset
+ tasks_from: enable
diff --git a/playbooks/common/openshift-cluster/upgrades/disable_excluder.yml b/playbooks/common/openshift-cluster/upgrades/disable_excluder.yml
new file mode 100644
index 000000000..d1e431c5e
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/disable_excluder.yml
@@ -0,0 +1,21 @@
+---
+- name: Record excluder state and disable
+ hosts: oo_masters_to_config:oo_nodes_to_config
+ gather_facts: no
+ tasks:
+ - include: pre/validate_excluder.yml
+ vars:
+ #repoquery_cmd: repoquery_cmd
+ #openshift_upgrade_target: openshift_upgrade_target
+ excluder: "{{ item }}"
+ with_items:
+ - "{{ openshift.common.service_type }}-docker-excluder"
+ - "{{ openshift.common.service_type }}-excluder"
+
+ # disable excluders based on their status
+ - include_role:
+ name: openshift_excluder
+ tasks_from: disable
+ vars:
+ openshift_excluder_package_state: latest
+ docker_excluder_package_state: latest
diff --git a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml
index 01c1e0c15..6f096f705 100644
--- a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml
@@ -22,7 +22,7 @@
selector: 'router'
register: all_routers
- - set_fact: haproxy_routers="{{ (all_routers.reults.results[0]['items'] | oo_pods_match_component(openshift_deployment_type, 'haproxy-router') | oo_select_keys_from_list(['metadata']) }}"
+ - set_fact: haproxy_routers="{{ all_routers.results.results[0]['items'] | oo_pods_match_component(openshift_deployment_type, 'haproxy-router') | oo_select_keys_from_list(['metadata']) }}"
when:
- all_routers.results.returncode == 0
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/validate_excluder.yml b/playbooks/common/openshift-cluster/upgrades/pre/validate_excluder.yml
new file mode 100644
index 000000000..6de1ed061
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/pre/validate_excluder.yml
@@ -0,0 +1,29 @@
+---
+# input variables:
+# - repoquery_cmd
+# - excluder
+# - openshift_upgrade_target
+- block:
+ - name: Get available excluder version
+ command: >
+ {{ repoquery_cmd }} --qf '%{version}' "{{ excluder }}"
+ register: excluder_version
+ failed_when: false
+ changed_when: false
+
+ - name: Docker excluder version detected
+ debug:
+ msg: "{{ excluder }}: {{ excluder_version.stdout }}"
+
+ - name: Printing upgrade target version
+ debug:
+ msg: "{{ openshift_upgrade_target }}"
+
+ - name: Check the available {{ excluder }} version is at most of the upgrade target version
+ fail:
+ msg: "Available {{ excluder }} version {{ excluder_version.stdout }} is higher than the upgrade target version"
+ when:
+ - "{{ excluder_version.stdout != '' }}"
+ - "{{ excluder_version.stdout.split('.')[0:2] | join('.') | version_compare(openshift_upgrade_target.split('.')[0:2] | join('.'), '>', strict=True) }}"
+ when:
+ - not openshift.common.is_atomic | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
index fd01a6625..e16a1f6d0 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
@@ -173,7 +173,11 @@
- name: Reconcile Cluster Roles
command: >
{{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig
- policy reconcile-cluster-roles --additive-only=true --confirm
+ policy reconcile-cluster-roles --additive-only=true --confirm -o name
+ register: reconcile_cluster_role_result
+ changed_when:
+ - reconcile_cluster_role_result.stdout != ''
+ - reconcile_cluster_role_result.rc == 0
run_once: true
- name: Reconcile Cluster Role Bindings
@@ -184,19 +188,31 @@
--exclude-groups=system:authenticated:oauth
--exclude-groups=system:unauthenticated
--exclude-users=system:anonymous
- --additive-only=true --confirm
+ --additive-only=true --confirm -o name
when: origin_reconcile_bindings | bool or ent_reconcile_bindings | bool
+ register: reconcile_bindings_result
+ changed_when:
+ - reconcile_bindings_result.stdout != ''
+ - reconcile_bindings_result.rc == 0
run_once: true
- name: Reconcile Jenkins Pipeline Role Bindings
command: >
- {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig policy reconcile-cluster-role-bindings system:build-strategy-jenkinspipeline --confirm
+ {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig policy reconcile-cluster-role-bindings system:build-strategy-jenkinspipeline --confirm -o name
run_once: true
+ register: reconcile_jenkins_role_binding_result
+ changed_when:
+ - reconcile_jenkins_role_binding_result.stdout != ''
+ - reconcile_jenkins_role_binding_result.rc == 0
when: openshift.common.version_gte_3_4_or_1_4 | bool
- name: Reconcile Security Context Constraints
command: >
- {{ openshift.common.client_binary }} adm policy reconcile-sccs --confirm --additive-only=true
+ {{ openshift.common.client_binary }} adm policy reconcile-sccs --confirm --additive-only=true -o name
+ register: reconcile_scc_result
+ changed_when:
+ - reconcile_scc_result.stdout != ''
+ - reconcile_scc_result.rc == 0
run_once: true
- set_fact:
@@ -246,7 +262,7 @@
# or docker actually needs an upgrade before proceeding. Perhaps best to save this until
# we merge upgrade functionality into the base roles and a normal config.yml playbook run.
- name: Mark node unschedulable
- oadm_manage_node:
+ oc_adm_manage_node:
node: "{{ openshift.node.nodename | lower }}"
schedulable: False
delegate_to: "{{ groups.oo_first_master.0 }}"
@@ -268,7 +284,7 @@
post_tasks:
- name: Set node schedulability
- oadm_manage_node:
+ oc_adm_manage_node:
node: "{{ openshift.node.nodename | lower }}"
schedulable: True
delegate_to: "{{ groups.oo_first_master.0 }}"
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
index 4e1838c71..e9f894942 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
@@ -15,7 +15,7 @@
# or docker actually needs an upgrade before proceeding. Perhaps best to save this until
# we merge upgrade functionality into the base roles and a normal config.yml playbook run.
- name: Mark node unschedulable
- oadm_manage_node:
+ oc_adm_manage_node:
node: "{{ openshift.node.nodename | lower }}"
schedulable: False
delegate_to: "{{ groups.oo_first_master.0 }}"
@@ -37,7 +37,7 @@
post_tasks:
- name: Set node schedulability
- oadm_manage_node:
+ oc_adm_manage_node:
node: "{{ openshift.node.nodename | lower }}"
schedulable: True
delegate_to: "{{ groups.oo_first_master.0 }}"
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/validator.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/validator.yml
new file mode 100644
index 000000000..ae63c9ca9
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_5/validator.yml
@@ -0,0 +1,67 @@
+---
+###############################################################################
+# Pre upgrade checks for known data problems, if this playbook fails you should
+# contact support. If you're not supported contact users@lists.openshift.com
+#
+# oc_objectvalidator provides these two checks
+# 1 - SDN Data issues, never seen in the wild but known possible due to code audits
+# https://github.com/openshift/origin/issues/12697
+# 2 - Namespace protections, https://bugzilla.redhat.com/show_bug.cgi?id=1428934
+#
+###############################################################################
+- name: Verify 3.5 specific upgrade checks
+ hosts: oo_first_master
+ roles:
+ - { role: lib_openshift }
+ tasks:
+ - name: Check for invalid namespaces and SDN errors
+ oc_objectvalidator:
+
+ # What's all this PetSet business about?
+ #
+ # 'PetSets' were ALPHA resources in Kube <= 3.4. In >= 3.5 they are
+ # no longer supported. The BETA resource 'StatefulSets' replaces
+ # them. We can't migrate clients PetSets to
+ # StatefulSets. Additionally, Red Hat has never officially supported
+ # these resource types. Sorry users, but if you were using
+ # unsupported resources from the Kube documentation then we can't
+ # help you at this time.
+ #
+ # Reference: https://bugzilla.redhat.com/show_bug.cgi?id=1428229
+ - name: Check if legacy PetSets exist
+ oc_obj:
+ state: list
+ all_namespaces: true
+ kind: petsets
+ register: l_do_petsets_exist
+
+ - name: Fail on unsupported resource migration 'PetSets'
+ fail:
+ msg: >
+ PetSet objects were detected in your cluster. These are an
+ Alpha feature in upstream Kubernetes 1.4 and are not supported
+ by Red Hat. In Kubernetes 1.5, they are replaced by the Beta
+ feature StatefulSets. Red Hat currently does not offer support
+ for either PetSets or StatefulSets.
+
+ Automatically migrating PetSets to StatefulSets in OpenShift
+ Container Platform (OCP) 3.5 is not supported. See the
+ Kubernetes "Upgrading from PetSets to StatefulSets"
+ documentation for additional information:
+
+ https://kubernetes.io/docs/tasks/manage-stateful-set/upgrade-pet-set-to-stateful-set/
+
+ PetSets MUST be removed before upgrading to OCP 3.5. Red Hat
+ strongly recommends reading the above referenced documentation
+ in its entirety before taking any destructive actions.
+
+ If you want to simply remove all PetSets without manually
+ migrating to StatefulSets, run this command as a user with
+ cluster-admin privileges:
+
+ $ oc get petsets --all-namespaces -o yaml | oc delete -f - --cascade=false
+ when:
+ # Search did not fail, valid resource type found
+ - l_do_petsets_exist.results.returncode == 0
+ # Items do exist in the search results
+ - l_do_petsets_exist.results.results.0['items'] | length > 0
diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml
index 7a334e771..68b9db03a 100644
--- a/playbooks/common/openshift-master/config.yml
+++ b/playbooks/common/openshift-master/config.yml
@@ -127,6 +127,8 @@
etcd_cert_subdir: "openshift-master-{{ openshift.common.hostname }}"
etcd_cert_config_dir: "{{ openshift.common.config_base }}/master"
etcd_cert_prefix: "master.etcd-"
+ - role: nuage_master
+ when: openshift.common.use_nuage | bool
post_tasks:
- name: Create group for deployment type
diff --git a/playbooks/common/openshift-master/scaleup.yml b/playbooks/common/openshift-master/scaleup.yml
index 18e5c665f..c59747081 100644
--- a/playbooks/common/openshift-master/scaleup.yml
+++ b/playbooks/common/openshift-master/scaleup.yml
@@ -60,8 +60,19 @@
- openshift_facts
- openshift_docker
+- include: ../openshift-cluster/disable_excluder.yml
+ vars:
+ # the excluders needs to be disabled no matter what status says
+ with_status_check: false
+ tags:
+ - always
+
- include: ../openshift-master/config.yml
- include: ../openshift-loadbalancer/config.yml
- include: ../openshift-node/config.yml
+
+- include: ../openshift-cluster/reset_excluder.yml
+ tags:
+ - always
diff --git a/playbooks/common/openshift-node/config.yml b/playbooks/common/openshift-node/config.yml
index 933fd584d..6c5a299c1 100644
--- a/playbooks/common/openshift-node/config.yml
+++ b/playbooks/common/openshift-node/config.yml
@@ -84,6 +84,9 @@
when: openshift.common.use_flannel | bool
- role: nuage_node
when: openshift.common.use_nuage | bool
+ - role: contiv
+ contiv_role: netplugin
+ when: openshift.common.use_contiv | bool
- role: nickhammond.logrotate
- role: openshift_manage_node
openshift_master_host: "{{ groups.oo_first_master.0 }}"
diff --git a/playbooks/common/openshift-node/scaleup.yml b/playbooks/common/openshift-node/scaleup.yml
index bb3b1e780..d81bd152e 100644
--- a/playbooks/common/openshift-node/scaleup.yml
+++ b/playbooks/common/openshift-node/scaleup.yml
@@ -27,4 +27,15 @@
- openshift_facts
- openshift_docker
+- include: ../openshift-cluster/disable_excluder.yml
+ vars:
+ # the excluders needs to be disabled no matter what status says
+ with_status_check: false
+ tags:
+ - always
+
- include: ../openshift-node/config.yml
+
+- include: ../openshift-cluster/reset_excluder.yml
+ tags:
+ - always
diff --git a/pytest.ini b/pytest.ini
new file mode 100644
index 000000000..502fd1f46
--- /dev/null
+++ b/pytest.ini
@@ -0,0 +1,16 @@
+[pytest]
+norecursedirs =
+ .*
+ __pycache__
+ cover
+ docs
+python_files =
+ # TODO(rhcarvalho): rename test files to follow a single pattern. "test*.py"
+ # is Python unittest's default, while pytest discovers both "test_*.py" and
+ # "*_test.py" by default.
+ test_*.py
+ *_tests.py
+addopts =
+ --cov=.
+ --cov-report=term
+ --cov-report=html
diff --git a/requirements.txt b/requirements.txt
index 5a6a161cb..241313b6f 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,5 +1,6 @@
ansible>=2.2
-six
+click
pyOpenSSL
-PyYAML
-ruamel.yaml
+# We need to disable ruamel.yaml for now because of test failures
+#ruamel.yaml
+six
diff --git a/roles/contiv/README.md b/roles/contiv/README.md
new file mode 100644
index 000000000..fa36039d9
--- /dev/null
+++ b/roles/contiv/README.md
@@ -0,0 +1,39 @@
+## Contiv
+
+Install Contiv components (netmaster, netplugin, contiv_etcd) on Master and Minion nodes
+
+## Requirements
+
+* Ansible 2.2
+* Centos/ RHEL
+
+## Current Contiv restrictions when used with Openshift
+
+* Openshift Origin only
+* VLAN encap mode only (default for Openshift Ansible)
+* Bare metal deployments only
+* Requires additional network configuration on the external physical routers (ref. Openshift docs Contiv section)
+
+## Key Ansible inventory configuration parameters
+
+* ``openshift_use_contiv=True``
+* ``openshift_use_openshift_sdn=False``
+* ``os_sdn_network_plugin_name='cni'``
+* ``netmaster_interface=eth0``
+* ``netplugin_interface=eth1``
+* ref. Openshift docs Contiv section for more details
+
+## Example bare metal deployment of Openshift + Contiv
+
+* Example bare metal deployment
+
+![Screenshot](roles/contiv/contiv-openshift-vlan-network.png)
+
+* contiv241 is a Master + minion node
+* contiv242 and contiv243 are minion nodes
+* VLANs 1001, 1002 used for contiv container networks
+* VLAN 10 used for cluster-internal host network
+* VLANs added to isolated VRF on external physical switch
+* Static routes added on external switch as shown to allow routing between host and container networks
+* External switch also used for public internet access
+
diff --git a/roles/contiv/contiv-openshift-vlan-network.png b/roles/contiv/contiv-openshift-vlan-network.png
new file mode 100644
index 000000000..2462fa337
--- /dev/null
+++ b/roles/contiv/contiv-openshift-vlan-network.png
Binary files differ
diff --git a/roles/contiv/defaults/main.yml b/roles/contiv/defaults/main.yml
new file mode 100644
index 000000000..1ccae61f2
--- /dev/null
+++ b/roles/contiv/defaults/main.yml
@@ -0,0 +1,106 @@
+---
+# The version of Contiv binaries to use
+contiv_version: 1.0.0-beta.3-02-21-2017.20-52-42.UTC
+
+# The version of cni binaries
+cni_version: v0.4.0
+
+contiv_default_subnet: "20.1.1.1/24"
+contiv_default_gw: "20.1.1.254"
+# TCP port that Netmaster listens for network connections
+netmaster_port: 9999
+
+# Default for contiv_role
+contiv_role: netmaster
+
+
+# TCP port that Netplugin listens for network connections
+netplugin_port: 6640
+contiv_rpc_port1: 9001
+contiv_rpc_port2: 9002
+contiv_rpc_port3: 9003
+
+# Interface used by Netplugin for inter-host traffic when encap_mode is vlan.
+# The interface must support 802.1Q trunking.
+netplugin_interface: "eno16780032"
+
+# IP address of the interface used for control communication within the cluster
+# It needs to be reachable from all nodes in the cluster.
+netplugin_ctrl_ip: "{{ hostvars[inventory_hostname]['ansible_' + netplugin_interface].ipv4.address }}"
+
+# IP used to terminate vxlan tunnels
+netplugin_vtep_ip: "{{ hostvars[inventory_hostname]['ansible_' + netplugin_interface].ipv4.address }}"
+
+# Interface used to bind Netmaster service
+netmaster_interface: "{{ netplugin_interface }}"
+
+# Path to the contiv binaries
+bin_dir: /usr/bin
+
+# Path to the contivk8s cni binary
+cni_bin_dir: /opt/cni/bin
+
+# Path to cni archive download directory
+cni_download_dir: /tmp
+
+# URL for cni binaries
+cni_bin_url_base: "https://github.com/containernetworking/cni/releases/download/"
+cni_bin_url: "{{ cni_bin_url_base }}/{{ cni_version }}/cni-{{ cni_version }}.tbz2"
+
+
+# Contiv config directory
+contiv_config_dir: /opt/contiv/config
+
+# Directory to store downloaded Contiv releases
+contiv_releases_directory: /opt/contiv
+contiv_current_release_directory: "{{ contiv_releases_directory }}/{{ contiv_version }}"
+
+#The default url to download the Contiv tar's from
+contiv_download_url_base: "https://github.com/contiv/netplugin/releases/download"
+contiv_download_url: "{{ contiv_download_url_base }}/{{ contiv_version }}/netplugin-{{ contiv_version }}.tar.bz2"
+
+# This is where kubelet looks for plugin files
+kube_plugin_dir: /usr/libexec/kubernetes/kubelet-plugins/net/exec
+
+# Specifies routed mode vs bridged mode for networking (bridge | routing)
+# if you are using an external router for all routing, you should select bridge here
+netplugin_fwd_mode: bridge
+
+# Contiv fabric mode aci|default
+contiv_fabric_mode: default
+
+# Encapsulation type vlan|vxlan to use for instantiating container networks
+contiv_encap_mode: vlan
+
+# Backend used by Netplugin for instantiating container networks
+netplugin_driver: ovs
+
+# Create a default Contiv network for use by pods
+contiv_default_network: true
+
+# VLAN/ VXLAN tag value to be used for the default network
+contiv_default_network_tag: 1
+
+#SRFIXME (use the openshift variables)
+https_proxy: ""
+http_proxy: ""
+no_proxy: ""
+
+# The following are aci specific parameters when contiv_fabric_mode: aci is set.
+# Otherwise, you can ignore these.
+apic_url: ""
+apic_username: ""
+apic_password: ""
+apic_leaf_nodes: ""
+apic_phys_dom: ""
+apic_contracts_unrestricted_mode: no
+apic_epg_bridge_domain: not_specified
+is_atomic: False
+kube_cert_dir: "/data/src/github.com/openshift/origin/openshift.local.config/master"
+master_name: "{{ groups['masters'][0] }}"
+contiv_etcd_port: 22379
+etcd_url: "{{ hostvars[groups['masters'][0]]['ansible_' + netmaster_interface].ipv4.address }}:{{ contiv_etcd_port }}"
+kube_ca_cert: "{{ kube_cert_dir }}/ca.crt"
+kube_key: "{{ kube_cert_dir }}/admin.key"
+kube_cert: "{{ kube_cert_dir }}/admin.crt"
+kube_master_api_port: 8443
diff --git a/roles/contiv/files/contiv_cni.conf b/roles/contiv/files/contiv_cni.conf
new file mode 100644
index 000000000..441dffd41
--- /dev/null
+++ b/roles/contiv/files/contiv_cni.conf
@@ -0,0 +1,5 @@
+{
+ "cniVersion": "0.1.0",
+ "name": "contiv-net",
+ "type": "contivk8s"
+}
diff --git a/roles/contiv/handlers/main.yml b/roles/contiv/handlers/main.yml
new file mode 100644
index 000000000..0fbe73036
--- /dev/null
+++ b/roles/contiv/handlers/main.yml
@@ -0,0 +1,18 @@
+---
+- name: reload systemd
+ command: systemctl --system daemon-reload
+
+- name: restart netmaster
+ service:
+ name: netmaster
+ state: restarted
+ when: netmaster_started.changed == false
+
+- name: restart netplugin
+ service:
+ name: netplugin
+ state: restarted
+ when: netplugin_started.changed == false
+
+- name: Save iptables rules
+ command: service iptables save
diff --git a/roles/contiv/meta/main.yml b/roles/contiv/meta/main.yml
new file mode 100644
index 000000000..3223afb6e
--- /dev/null
+++ b/roles/contiv/meta/main.yml
@@ -0,0 +1,28 @@
+---
+galaxy_info:
+ author: Cisco
+ description:
+ company: Cisco
+ license:
+ min_ansible_version: 2.2
+ platforms:
+ - name: EL
+ versions:
+ - 7
+ categories:
+ - cloud
+ - system
+dependencies:
+- role: contiv_facts
+- role: etcd
+ etcd_service: contiv-etcd
+ etcd_is_thirdparty: True
+ etcd_peer_port: 22380
+ etcd_client_port: 22379
+ etcd_conf_dir: /etc/contiv-etcd/
+ etcd_data_dir: /var/lib/contiv-etcd/
+ etcd_ca_host: "{{ inventory_hostname }}"
+ etcd_cert_config_dir: /etc/contiv-etcd/
+ etcd_url_scheme: http
+ etcd_peer_url_scheme: http
+ when: contiv_role == "netmaster"
diff --git a/roles/contiv/tasks/aci.yml b/roles/contiv/tasks/aci.yml
new file mode 100644
index 000000000..30d2eb339
--- /dev/null
+++ b/roles/contiv/tasks/aci.yml
@@ -0,0 +1,32 @@
+---
+- name: ACI | Check aci-gw container image
+ command: "docker inspect contiv/aci-gw"
+ register: docker_aci_inspect_result
+ ignore_errors: yes
+
+- name: ACI | Pull aci-gw container
+ command: "docker pull contiv/aci-gw"
+ when: "'No such image' in docker_aci_inspect_result.stderr"
+
+- name: ACI | Copy shell script used by aci-gw service
+ template:
+ src: aci_gw.j2
+ dest: "{{ bin_dir }}/aci_gw.sh"
+ mode: u=rwx,g=rx,o=rx
+
+- name: ACI | Copy systemd units for aci-gw
+ template:
+ src: aci-gw.service
+ dest: /etc/systemd/system/aci-gw.service
+ notify: reload systemd
+
+- name: ACI | Enable aci-gw service
+ service:
+ name: aci-gw
+ enabled: yes
+
+- name: ACI | Start aci-gw service
+ service:
+ name: aci-gw
+ state: started
+ register: aci-gw_started
diff --git a/roles/contiv/tasks/default_network.yml b/roles/contiv/tasks/default_network.yml
new file mode 100644
index 000000000..9cf98bb80
--- /dev/null
+++ b/roles/contiv/tasks/default_network.yml
@@ -0,0 +1,15 @@
+---
+- name: Contiv | Wait for netmaster
+ command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" tenant ls'
+ register: tenant_result
+ until: tenant_result.stdout.find("default") != -1
+ retries: 9
+ delay: 10
+
+- name: Contiv | Check if default-net exists
+ command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" net ls'
+ register: net_result
+
+- name: Contiv | Create default-net
+ command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" net create --subnet={{ contiv_default_subnet }} -e {{ contiv_encap_mode }} -p {{ contiv_default_network_tag }} --gateway={{ contiv_default_gw }} default-net'
+ when: net_result.stdout.find("default-net") == -1
diff --git a/roles/contiv/tasks/download_bins.yml b/roles/contiv/tasks/download_bins.yml
new file mode 100644
index 000000000..319fce46c
--- /dev/null
+++ b/roles/contiv/tasks/download_bins.yml
@@ -0,0 +1,46 @@
+---
+- name: Download Bins | Create directory for current Contiv release
+ file:
+ path: "{{ contiv_current_release_directory }}"
+ state: directory
+
+- name: Install bzip2
+ yum:
+ name: bzip2
+ state: installed
+
+- name: Download Bins | Download Contiv tar file
+ get_url:
+ url: "{{ contiv_download_url }}"
+ dest: "{{ contiv_current_release_directory }}"
+ mode: 0755
+ validate_certs: False
+ environment:
+ http_proxy: "{{ http_proxy|default('') }}"
+ https_proxy: "{{ https_proxy|default('') }}"
+ no_proxy: "{{ no_proxy|default('') }}"
+
+- name: Download Bins | Extract Contiv tar file
+ unarchive:
+ src: "{{ contiv_current_release_directory }}/netplugin-{{ contiv_version }}.tar.bz2"
+ dest: "{{ contiv_current_release_directory }}"
+ copy: no
+
+- name: Download Bins | Download cni tar file
+ get_url:
+ url: "{{ cni_bin_url }}"
+ dest: "{{ cni_download_dir }}"
+ mode: 0755
+ validate_certs: False
+ environment:
+ http_proxy: "{{ http_proxy|default('') }}"
+ https_proxy: "{{ https_proxy|default('') }}"
+ no_proxy: "{{ no_proxy|default('') }}"
+ register: download_file
+
+- name: Download Bins | Extract cni tar file
+ unarchive:
+ src: "{{ download_file.dest }}"
+ dest: "{{ cni_download_dir }}"
+ copy: no
+ when: download_file.changed
diff --git a/roles/contiv/tasks/main.yml b/roles/contiv/tasks/main.yml
new file mode 100644
index 000000000..40a0f9e61
--- /dev/null
+++ b/roles/contiv/tasks/main.yml
@@ -0,0 +1,14 @@
+---
+- name: Ensure bin_dir exists
+ file:
+ path: "{{ bin_dir }}"
+ recurse: yes
+ state: directory
+
+- include: download_bins.yml
+
+- include: netmaster.yml
+ when: contiv_role == "netmaster"
+
+- include: netplugin.yml
+ when: contiv_role == "netplugin"
diff --git a/roles/contiv/tasks/netmaster.yml b/roles/contiv/tasks/netmaster.yml
new file mode 100644
index 000000000..5057767b8
--- /dev/null
+++ b/roles/contiv/tasks/netmaster.yml
@@ -0,0 +1,65 @@
+---
+- include: netmaster_firewalld.yml
+ when: has_firewalld
+
+- include: netmaster_iptables.yml
+ when: not has_firewalld and has_iptables
+
+- name: Netmaster | Check is /etc/hosts file exists
+ stat:
+ path: /etc/hosts
+ register: hosts
+
+- name: Netmaster | Create hosts file if it is not present
+ file:
+ path: /etc/hosts
+ state: touch
+ when: not hosts.stat.exists
+
+- name: Netmaster | Build hosts file
+ lineinfile:
+ dest: /etc/hosts
+ regexp: .*netmaster$
+ line: "{{ hostvars[item]['ansible_' + netmaster_interface].ipv4.address }} netmaster"
+ state: present
+ when: hostvars[item]['ansible_' + netmaster_interface].ipv4.address is defined
+ with_items: groups['masters']
+
+- name: Netmaster | Create netmaster symlinks
+ file:
+ src: "{{ contiv_current_release_directory }}/{{ item }}"
+ dest: "{{ bin_dir }}/{{ item }}"
+ state: link
+ with_items:
+ - netmaster
+ - netctl
+
+- name: Netmaster | Copy environment file for netmaster
+ template:
+ src: netmaster.env.j2
+ dest: /etc/default/netmaster
+ mode: 0644
+ notify: restart netmaster
+
+- name: Netmaster | Copy systemd units for netmaster
+ template:
+ src: netmaster.service
+ dest: /etc/systemd/system/netmaster.service
+ notify: reload systemd
+
+- name: Netmaster | Enable Netmaster
+ service:
+ name: netmaster
+ enabled: yes
+
+- name: Netmaster | Start Netmaster
+ service:
+ name: netmaster
+ state: started
+ register: netmaster_started
+
+- include: aci.yml
+ when: contiv_fabric_mode == "aci"
+
+- include: default_network.yml
+ when: contiv_default_network == true
diff --git a/roles/contiv/tasks/netmaster_firewalld.yml b/roles/contiv/tasks/netmaster_firewalld.yml
new file mode 100644
index 000000000..2975351ac
--- /dev/null
+++ b/roles/contiv/tasks/netmaster_firewalld.yml
@@ -0,0 +1,16 @@
+---
+- name: Netmaster Firewalld | Open Netmaster port
+ firewalld:
+ port: "{{ netmaster_port }}/tcp"
+ permanent: false
+ state: enabled
+ # in case this is also a node where firewalld turned off
+ ignore_errors: yes
+
+- name: Netmaster Firewalld | Save Netmaster port
+ firewalld:
+ port: "{{ netmaster_port }}/tcp"
+ permanent: true
+ state: enabled
+ # in case this is also a node where firewalld turned off
+ ignore_errors: yes
diff --git a/roles/contiv/tasks/netmaster_iptables.yml b/roles/contiv/tasks/netmaster_iptables.yml
new file mode 100644
index 000000000..2d0fb95ae
--- /dev/null
+++ b/roles/contiv/tasks/netmaster_iptables.yml
@@ -0,0 +1,21 @@
+---
+- name: Netmaster IPtables | Get iptables rules
+ command: iptables -L --wait
+ register: iptablesrules
+ always_run: yes
+
+- name: Netmaster IPtables | Enable iptables at boot
+ service:
+ name: iptables
+ enabled: yes
+ state: started
+
+- name: Netmaster IPtables | Open Netmaster with iptables
+ command: /sbin/iptables -I INPUT 1 -p tcp --dport {{ item }} -j ACCEPT -m comment --comment "contiv"
+ with_items:
+ - "{{ netmaster_port }}"
+ - "{{ contiv_rpc_port1 }}"
+ - "{{ contiv_rpc_port2 }}"
+ - "{{ contiv_rpc_port3 }}"
+ when: iptablesrules.stdout.find("contiv") == -1
+ notify: Save iptables rules
diff --git a/roles/contiv/tasks/netplugin.yml b/roles/contiv/tasks/netplugin.yml
new file mode 100644
index 000000000..97b9762df
--- /dev/null
+++ b/roles/contiv/tasks/netplugin.yml
@@ -0,0 +1,122 @@
+---
+- include: netplugin_firewalld.yml
+ when: has_firewalld
+
+- include: netplugin_iptables.yml
+ when: has_iptables
+
+- name: Netplugin | Ensure localhost entry correct in /etc/hosts
+ lineinfile:
+ dest: /etc/hosts
+ regexp: '^127\.0\.0\.1.*'
+ line: '127.0.0.1 localhost {{ ansible_hostname }}'
+ state: present
+
+- name: Netplugin | Remove incorrect localhost entry in /etc/hosts
+ lineinfile:
+ dest: /etc/hosts
+ regexp: '^::1. localhost '
+ line: '::1 '
+ state: absent
+
+- include: ovs.yml
+ when: netplugin_driver == "ovs"
+
+- name: Netplugin | Create Netplugin bin symlink
+ file:
+ src: "{{ contiv_current_release_directory }}/netplugin"
+ dest: "{{ bin_dir }}/netplugin"
+ state: link
+
+
+- name: Netplugin | Ensure cni_bin_dir exists
+ file:
+ path: "{{ cni_bin_dir }}"
+ recurse: yes
+ state: directory
+
+- name: Netplugin | Create CNI bin symlink
+ file:
+ src: "{{ contiv_current_release_directory }}/contivk8s"
+ dest: "{{ cni_bin_dir }}/contivk8s"
+ state: link
+
+- name: Netplugin | Copy CNI loopback bin
+ copy:
+ src: "{{ cni_download_dir }}/loopback"
+ dest: "{{ cni_bin_dir }}/loopback"
+ remote_src: True
+ mode: 0755
+
+- name: Netplugin | Ensure kube_plugin_dir and cni/net.d directories exist
+ file:
+ path: "{{ item }}"
+ recurse: yes
+ state: directory
+ with_items:
+ - "{{ kube_plugin_dir }}"
+ - "/etc/cni/net.d"
+
+- name: Netplugin | Ensure contiv_config_dir exists
+ file:
+ path: "{{ contiv_config_dir }}"
+ recurse: yes
+ state: directory
+
+- name: Netplugin | Copy contiv_cni.conf file
+ copy:
+ src: contiv_cni.conf
+ dest: "{{ item }}"
+ with_items:
+ - "{{ kube_plugin_dir }}/contiv_cni.conf"
+ - "/etc/cni/net.d"
+# notify: restart kubelet
+
+- name: Netplugin | Setup contiv.json config for the cni plugin
+ template:
+ src: contiv.cfg.j2
+ dest: "{{ contiv_config_dir }}/contiv.json"
+ notify: restart netplugin
+
+- name: Netplugin | Copy environment file for netplugin
+ template:
+ src: netplugin.j2
+ dest: /etc/default/netplugin
+ mode: 0644
+ notify: restart netplugin
+
+- name: Docker | Make sure proxy setting exists
+ lineinfile:
+ dest: /etc/sysconfig/docker-network
+ regexp: '^https_proxy.*'
+ line: 'https_proxy={{ https_proxy }}'
+ state: present
+ register: docker_updated
+
+- name: Netplugin | Copy systemd unit for netplugin
+ template:
+ src: netplugin.service
+ dest: /etc/systemd/system/netplugin.service
+ notify: reload systemd
+
+- name: systemd reload
+ command: systemctl daemon-reload
+ when: docker_updated|changed
+
+- name: Docker | Restart docker
+ service:
+ name: docker
+ state: restarted
+ when: docker_updated|changed
+
+- name: Netplugin | Enable Netplugin
+ service:
+ name: netplugin
+ enabled: yes
+
+- name: Netplugin | Start Netplugin
+ service:
+ name: netplugin
+ state: started
+ register: netplugin_started
+# notify: restart kubelet
diff --git a/roles/contiv/tasks/netplugin_firewalld.yml b/roles/contiv/tasks/netplugin_firewalld.yml
new file mode 100644
index 000000000..3aeffae56
--- /dev/null
+++ b/roles/contiv/tasks/netplugin_firewalld.yml
@@ -0,0 +1,34 @@
+---
+- name: Netplugin Firewalld | Open Netplugin port
+ firewalld:
+ port: "{{ netplugin_port }}/tcp"
+ permanent: false
+ state: enabled
+ # in case this is also a node where firewalld turned off
+ ignore_errors: yes
+
+- name: Netplugin Firewalld | Save Netplugin port
+ firewalld:
+ port: "{{ netplugin_port }}/tcp"
+ permanent: true
+ state: enabled
+ # in case this is also a node where firewalld turned off
+ ignore_errors: yes
+
+- name: Netplugin Firewalld | Open vxlan port
+ firewalld:
+ port: "8472/udp"
+ permanent: false
+ state: enabled
+ # in case this is also a node where firewalld turned off
+ ignore_errors: yes
+ when: contiv_encap_mode == "vxlan"
+
+- name: Netplugin Firewalld | Save firewalld vxlan port for flanneld
+ firewalld:
+ port: "8472/udp"
+ permanent: true
+ state: enabled
+ # in case this is also a node where firewalld turned off
+ ignore_errors: yes
+ when: contiv_encap_mode == "vxlan"
diff --git a/roles/contiv/tasks/netplugin_iptables.yml b/roles/contiv/tasks/netplugin_iptables.yml
new file mode 100644
index 000000000..8c348ac67
--- /dev/null
+++ b/roles/contiv/tasks/netplugin_iptables.yml
@@ -0,0 +1,29 @@
+---
+- name: Netplugin IPtables | Get iptables rules
+ command: iptables -L --wait
+ register: iptablesrules
+ always_run: yes
+
+- name: Netplugin IPtables | Enable iptables at boot
+ service:
+ name: iptables
+ enabled: yes
+ state: started
+
+- name: Netplugin IPtables | Open Netmaster with iptables
+ command: /sbin/iptables -I INPUT 1 -p tcp --dport {{ item }} -j ACCEPT -m comment --comment "contiv"
+ with_items:
+ - "{{ netmaster_port }}"
+ - "{{ contiv_rpc_port1 }}"
+ - "{{ contiv_rpc_port2 }}"
+ - "{{ contiv_rpc_port3 }}"
+ - "{{ contiv_etcd_port }}"
+ - "{{ kube_master_api_port }}"
+ when: iptablesrules.stdout.find("contiv") == -1
+ notify: Save iptables rules
+
+- name: Netplugin IPtables | Open vxlan port with iptables
+ command: /sbin/iptables -I INPUT 1 -p udp --dport 8472 -j ACCEPT -m comment --comment "vxlan"
+
+- name: Netplugin IPtables | Open vxlan port with iptables
+ command: /sbin/iptables -I INPUT 1 -p udp --dport 4789 -j ACCEPT -m comment --comment "vxlan"
diff --git a/roles/contiv/tasks/ovs.yml b/roles/contiv/tasks/ovs.yml
new file mode 100644
index 000000000..0c1b994c7
--- /dev/null
+++ b/roles/contiv/tasks/ovs.yml
@@ -0,0 +1,28 @@
+---
+- include: packageManagerInstall.yml
+ when: source_type == "packageManager"
+ tags:
+ - binary-update
+
+- name: OVS | Configure selinux for ovs
+ command: "semanage permissive -a openvswitch_t"
+
+- name: OVS | Enable ovs
+ service:
+ name: openvswitch
+ enabled: yes
+
+- name: OVS | Start ovs
+ service:
+ name: openvswitch
+ state: started
+ register: ovs_started
+
+- name: OVS | Configure ovs
+ command: "ovs-vsctl set-manager {{ item }}"
+ with_items:
+ - "tcp:127.0.0.1:6640"
+ - "ptcp:6640"
+
+- name: OVS | Configure ovsdb-server
+ command: "ovs-appctl -t ovsdb-server ovsdb-server/add-remote ptcp:6640"
diff --git a/roles/contiv/tasks/packageManagerInstall.yml b/roles/contiv/tasks/packageManagerInstall.yml
new file mode 100644
index 000000000..2eff1b85f
--- /dev/null
+++ b/roles/contiv/tasks/packageManagerInstall.yml
@@ -0,0 +1,12 @@
+---
+- name: Package Manager | Init the did_install fact
+ set_fact:
+ did_install: false
+
+- include: pkgMgrInstallers/centos-install.yml
+ when: ansible_distribution == "CentOS" and not is_atomic
+
+- name: Package Manager | Set fact saying we did CentOS package install
+ set_fact:
+ did_install: true
+ when: ansible_distribution == "CentOS"
diff --git a/roles/contiv/tasks/pkgMgrInstallers/centos-install.yml b/roles/contiv/tasks/pkgMgrInstallers/centos-install.yml
new file mode 100644
index 000000000..51c3d35ac
--- /dev/null
+++ b/roles/contiv/tasks/pkgMgrInstallers/centos-install.yml
@@ -0,0 +1,33 @@
+---
+- name: PkgMgr CentOS | Install net-tools pkg for route
+ yum:
+ pkg=net-tools
+ state=latest
+
+- name: PkgMgr CentOS | Get openstack kilo rpm
+ get_url:
+ url: https://repos.fedorapeople.org/repos/openstack/openstack-kilo/rdo-release-kilo-2.noarch.rpm
+ dest: /tmp/rdo-release-kilo-2.noarch.rpm
+ validate_certs: False
+ environment:
+ http_proxy: "{{ http_proxy|default('') }}"
+ https_proxy: "{{ https_proxy|default('') }}"
+ no_proxy: "{{ no_proxy|default('') }}"
+ tags:
+ - ovs_install
+
+- name: PkgMgr CentOS | Install openstack kilo rpm
+ yum: name=/tmp/rdo-release-kilo-2.noarch.rpm state=present
+ tags:
+ - ovs_install
+
+- name: PkgMgr CentOS | Install ovs
+ yum:
+ pkg=openvswitch
+ state=latest
+ environment:
+ http_proxy: "{{ http_proxy|default('') }}"
+ https_proxy: "{{ https_proxy|default('') }}"
+ no_proxy: "{{ no_proxy|default('') }}"
+ tags:
+ - ovs_install
diff --git a/roles/contiv/templates/aci-gw.service b/roles/contiv/templates/aci-gw.service
new file mode 100644
index 000000000..8e4b66fbe
--- /dev/null
+++ b/roles/contiv/templates/aci-gw.service
@@ -0,0 +1,10 @@
+[Unit]
+Description=Contiv ACI gw
+After=auditd.service systemd-user-sessions.service time-sync.target docker.service
+
+[Service]
+ExecStart={{ bin_dir }}/aci_gw.sh start
+ExecStop={{ bin_dir }}/aci_gw.sh stop
+KillMode=control-group
+Restart=on-failure
+RestartSec=10
diff --git a/roles/contiv/templates/aci_gw.j2 b/roles/contiv/templates/aci_gw.j2
new file mode 100644
index 000000000..ab4ad46a6
--- /dev/null
+++ b/roles/contiv/templates/aci_gw.j2
@@ -0,0 +1,35 @@
+#!/bin/bash
+
+usage="$0 start"
+if [ $# -ne 1 ]; then
+ echo USAGE: $usage
+ exit 1
+fi
+
+case $1 in
+start)
+ set -e
+
+ docker run --net=host \
+ -e "APIC_URL={{ apic_url }}" \
+ -e "APIC_USERNAME={{ apic_username }}" \
+ -e "APIC_PASSWORD={{ apic_password }}" \
+ -e "APIC_LEAF_NODE={{ apic_leaf_nodes }}" \
+ -e "APIC_PHYS_DOMAIN={{ apic_phys_dom }}" \
+ -e "APIC_EPG_BRIDGE_DOMAIN={{ apic_epg_bridge_domain }}" \
+ -e "APIC_CONTRACTS_UNRESTRICTED_MODE={{ apic_contracts_unrestricted_mode }}" \
+ --name=contiv-aci-gw \
+ contiv/aci-gw
+ ;;
+
+stop)
+ # don't stop on error
+ docker stop contiv-aci-gw
+ docker rm contiv-aci-gw
+ ;;
+
+*)
+ echo USAGE: $usage
+ exit 1
+ ;;
+esac
diff --git a/roles/contiv/templates/contiv.cfg.j2 b/roles/contiv/templates/contiv.cfg.j2
new file mode 100644
index 000000000..2c9a666a9
--- /dev/null
+++ b/roles/contiv/templates/contiv.cfg.j2
@@ -0,0 +1,6 @@
+{
+ "K8S_API_SERVER": "https://{{ hostvars[groups['masters'][0]]['ansible_' + netmaster_interface].ipv4.address }}:{{ kube_master_api_port }}",
+ "K8S_CA": "{{ openshift.common.config_base }}/node/ca.crt",
+ "K8S_KEY": "{{ openshift.common.config_base }}/node/system:node:{{ openshift.common.hostname }}.key",
+ "K8S_CERT": "{{ openshift.common.config_base }}/node/system:node:{{ openshift.common.hostname }}.crt"
+}
diff --git a/roles/contiv/templates/netmaster.env.j2 b/roles/contiv/templates/netmaster.env.j2
new file mode 100644
index 000000000..5b5c84a2e
--- /dev/null
+++ b/roles/contiv/templates/netmaster.env.j2
@@ -0,0 +1,2 @@
+NETMASTER_ARGS='--cluster-store etcd://{{ etcd_url }} --cluster-mode=kubernetes'
+
diff --git a/roles/contiv/templates/netmaster.service b/roles/contiv/templates/netmaster.service
new file mode 100644
index 000000000..21c0380be
--- /dev/null
+++ b/roles/contiv/templates/netmaster.service
@@ -0,0 +1,8 @@
+[Unit]
+Description=Netmaster
+After=auditd.service systemd-user-sessions.service contiv-etcd.service
+
+[Service]
+EnvironmentFile=/etc/default/netmaster
+ExecStart={{ bin_dir }}/netmaster $NETMASTER_ARGS
+KillMode=control-group
diff --git a/roles/contiv/templates/netplugin.j2 b/roles/contiv/templates/netplugin.j2
new file mode 100644
index 000000000..f3d26c037
--- /dev/null
+++ b/roles/contiv/templates/netplugin.j2
@@ -0,0 +1,9 @@
+{% if contiv_encap_mode == "vlan" %}
+NETPLUGIN_ARGS='-vlan-if {{ netplugin_interface }} -ctrl-ip {{ netplugin_ctrl_ip }} -plugin-mode kubernetes -cluster-store etcd://{{ etcd_url }}'
+{% endif %}
+{# Note: Commenting out vxlan encap mode support until it is fully supported
+{% if contiv_encap_mode == "vxlan" %}
+NETPLUGIN_ARGS='-vtep-ip {{ netplugin_ctrl_ip }} -e {{contiv_encap_mode}} -ctrl-ip {{ netplugin_ctrl_ip }} -plugin-mode kubernetes -cluster-store etcd://{{ etcd_url }}'
+{% endif %}
+#}
+
diff --git a/roles/contiv/templates/netplugin.service b/roles/contiv/templates/netplugin.service
new file mode 100644
index 000000000..dc7b95bb5
--- /dev/null
+++ b/roles/contiv/templates/netplugin.service
@@ -0,0 +1,8 @@
+[Unit]
+Description=Netplugin
+After=auditd.service systemd-user-sessions.service contiv-etcd.service
+
+[Service]
+EnvironmentFile=/etc/default/netplugin
+ExecStart={{ bin_dir }}/netplugin $NETPLUGIN_ARGS
+KillMode=control-group
diff --git a/roles/contiv_facts/defaults/main.yaml b/roles/contiv_facts/defaults/main.yaml
new file mode 100644
index 000000000..a6c08fa63
--- /dev/null
+++ b/roles/contiv_facts/defaults/main.yaml
@@ -0,0 +1,10 @@
+---
+# The directory where binaries are stored on Ansible
+# managed systems.
+bin_dir: /usr/bin
+
+# The directory used by Ansible to temporarily store
+# files on Ansible managed systems.
+ansible_temp_dir: /tmp/.ansible/files
+
+source_type: packageManager
diff --git a/roles/contiv_facts/handlers/main.yml b/roles/contiv_facts/handlers/main.yml
new file mode 100644
index 000000000..f7799b425
--- /dev/null
+++ b/roles/contiv_facts/handlers/main.yml
@@ -0,0 +1,3 @@
+---
+- name: reload systemd
+ command: systemctl --system daemon-reload
diff --git a/roles/contiv_facts/tasks/fedora-install.yml b/roles/contiv_facts/tasks/fedora-install.yml
new file mode 100644
index 000000000..db56a18c0
--- /dev/null
+++ b/roles/contiv_facts/tasks/fedora-install.yml
@@ -0,0 +1,24 @@
+---
+- name: Install dnf
+ yum:
+ name: dnf
+ state: installed
+
+- name: Update repo cache
+ command: dnf update -y
+ retries: 5
+ delay: 10
+ environment:
+ https_proxy: "{{ https_proxy }}"
+ http_proxy: "{{ http_proxy }}"
+ no_proxy: "{{ no_proxy }}"
+
+- name: Install libselinux-python
+ command: dnf install {{ item }} -y
+ with_items:
+ - python-dnf
+ - libselinux-python
+ environment:
+ https_proxy: "{{ https_proxy }}"
+ http_proxy: "{{ http_proxy }}"
+ no_proxy: "{{ no_proxy }}"
diff --git a/roles/contiv_facts/tasks/main.yml b/roles/contiv_facts/tasks/main.yml
new file mode 100644
index 000000000..926e0e0be
--- /dev/null
+++ b/roles/contiv_facts/tasks/main.yml
@@ -0,0 +1,88 @@
+---
+- name: Determine if Atomic
+ stat: path=/run/ostree-booted
+ register: s
+ changed_when: false
+ always_run: yes
+
+- name: Init the is_atomic fact
+ set_fact:
+ is_atomic: false
+
+- name: Set the is_atomic fact
+ set_fact:
+ is_atomic: true
+ when: s.stat.exists
+
+- name: Determine if CoreOS
+ raw: "grep '^NAME=' /etc/os-release | sed s'/NAME=//'"
+ register: distro
+ always_run: yes
+
+- name: Init the is_coreos fact
+ set_fact:
+ is_coreos: false
+
+- name: Set the is_coreos fact
+ set_fact:
+ is_coreos: true
+ when: "'CoreOS' in distro.stdout"
+
+- name: Set docker config file directory
+ set_fact:
+ docker_config_dir: "/etc/sysconfig"
+
+- name: Override docker config file directory for Debian
+ set_fact:
+ docker_config_dir: "/etc/default"
+ when: ansible_distribution == "Debian" or ansible_distribution == "Ubuntu"
+
+- name: Create config file directory
+ file:
+ path: "{{ docker_config_dir }}"
+ state: directory
+
+- name: Set the bin directory path for CoreOS
+ set_fact:
+ bin_dir: "/opt/bin"
+ when: is_coreos
+
+- name: Create the directory used to store binaries
+ file:
+ path: "{{ bin_dir }}"
+ state: directory
+
+- name: Create Ansible temp directory
+ file:
+ path: "{{ ansible_temp_dir }}"
+ state: directory
+
+- name: Determine if has rpm
+ stat: path=/usr/bin/rpm
+ register: s
+ changed_when: false
+ always_run: yes
+
+- name: Init the has_rpm fact
+ set_fact:
+ has_rpm: false
+
+- name: Set the has_rpm fact
+ set_fact:
+ has_rpm: true
+ when: s.stat.exists
+
+- name: Init the has_firewalld fact
+ set_fact:
+ has_firewalld: false
+
+- name: Init the has_iptables fact
+ set_fact:
+ has_iptables: false
+
+# collect information about what packages are installed
+- include: rpm.yml
+ when: has_rpm
+
+- include: fedora-install.yml
+ when: not is_atomic and ansible_distribution == "Fedora"
diff --git a/roles/contiv_facts/tasks/rpm.yml b/roles/contiv_facts/tasks/rpm.yml
new file mode 100644
index 000000000..d2f66dac5
--- /dev/null
+++ b/roles/contiv_facts/tasks/rpm.yml
@@ -0,0 +1,24 @@
+---
+- name: RPM | Determine if firewalld installed
+ command: "rpm -q firewalld"
+ register: s
+ changed_when: false
+ failed_when: false
+ always_run: yes
+
+- name: Set the has_firewalld fact
+ set_fact:
+ has_firewalld: true
+ when: s.rc == 0
+
+- name: Determine if iptables-services installed
+ command: "rpm -q iptables-services"
+ register: s
+ changed_when: false
+ failed_when: false
+ always_run: yes
+
+- name: Set the has_iptables fact
+ set_fact:
+ has_iptables: true
+ when: s.rc == 0
diff --git a/roles/docker/templates/custom.conf.j2 b/roles/docker/templates/custom.conf.j2
index 53ed56abc..9b47cb6ab 100644
--- a/roles/docker/templates/custom.conf.j2
+++ b/roles/docker/templates/custom.conf.j2
@@ -1,5 +1,5 @@
# {{ ansible_managed }}
[Unit]
-Requires=iptables.service
+Wants=iptables.service
After=iptables.service
diff --git a/roles/etcd/defaults/main.yaml b/roles/etcd/defaults/main.yaml
index e0746d70d..29153f4df 100644
--- a/roles/etcd/defaults/main.yaml
+++ b/roles/etcd/defaults/main.yaml
@@ -14,3 +14,4 @@ etcd_advertise_client_urls: "{{ etcd_url_scheme }}://{{ etcd_ip }}:{{ etcd_clien
etcd_listen_client_urls: "{{ etcd_url_scheme }}://{{ etcd_ip }}:{{ etcd_client_port }}"
etcd_data_dir: /var/lib/etcd/
+etcd_systemd_dir: "/etc/systemd/system/{{ etcd_service }}.service.d"
diff --git a/roles/etcd/tasks/etcdctl.yml b/roles/etcd/tasks/etcdctl.yml
index bb6fabf64..649ad23c1 100644
--- a/roles/etcd/tasks/etcdctl.yml
+++ b/roles/etcd/tasks/etcdctl.yml
@@ -1,6 +1,6 @@
---
- name: Install etcd for etcdctl
- package: name=etcd state=present
+ package: name=etcd{{ '-' + etcd_version if etcd_version is defined else '' }} state=present
when: not openshift.common.is_atomic | bool
- name: Configure etcd profile.d alises
diff --git a/roles/etcd/tasks/main.yml b/roles/etcd/tasks/main.yml
index 5f3ca461e..c09da3b61 100644
--- a/roles/etcd/tasks/main.yml
+++ b/roles/etcd/tasks/main.yml
@@ -7,7 +7,7 @@
etcd_ip: "{{ etcd_ip }}"
- name: Install etcd
- package: name=etcd state=present
+ package: name=etcd{{ '-' + etcd_version if etcd_version is defined else '' }} state=present
when: not etcd_is_containerized | bool
- name: Pull etcd container
@@ -26,13 +26,60 @@
- etcd_is_containerized | bool
- not openshift.common.is_etcd_system_container | bool
-- name: Ensure etcd datadir exists when containerized
+
+# Start secondary etcd instance for third party integrations
+# TODO: Determine an alternative to using thirdparty variable
+
+- name: Create configuration directory
+ file:
+ path: "{{ etcd_conf_dir }}"
+ state: directory
+ mode: 0700
+ when: etcd_is_thirdparty | bool
+
+ # TODO: retest with symlink to confirm it does or does not function
+- name: Copy service file for etcd instance
+ copy:
+ src: /usr/lib/systemd/system/etcd.service
+ dest: "/etc/systemd/system/{{ etcd_service }}.service"
+ remote_src: True
+ when: etcd_is_thirdparty | bool
+
+- name: Create third party etcd service.d directory exists
+ file:
+ path: "{{ etcd_systemd_dir }}"
+ state: directory
+ when: etcd_is_thirdparty | bool
+
+- name: Configure third part etcd service unit file
+ template:
+ dest: "{{ etcd_systemd_dir }}/custom.conf"
+ src: custom.conf.j2
+ when: etcd_is_thirdparty
+
+ # TODO: this task may not be needed with Validate permissions
+- name: Ensure etcd datadir exists
file:
path: "{{ etcd_data_dir }}"
state: directory
mode: 0700
when: etcd_is_containerized | bool
+- name: Ensure etcd datadir ownership for thirdparty datadir
+ file:
+ path: "{{ etcd_data_dir }}"
+ state: directory
+ mode: 0700
+ owner: etcd
+ group: etcd
+ recurse: True
+ when: etcd_is_thirdparty | bool
+
+ # TODO: Determine if the below reload would work here, for now just reload
+- name:
+ command: systemctl daemon-reload
+ when: etcd_is_thirdparty | bool
+
- name: Disable system etcd when containerized
systemd:
name: etcd
@@ -67,7 +114,7 @@
- name: Write etcd global config file
template:
src: etcd.conf.j2
- dest: /etc/etcd/etcd.conf
+ dest: "{{ etcd_conf_file }}"
backup: true
notify:
- restart etcd
diff --git a/roles/etcd/tasks/system_container.yml b/roles/etcd/tasks/system_container.yml
index 241180e2c..3b80164cc 100644
--- a/roles/etcd/tasks/system_container.yml
+++ b/roles/etcd/tasks/system_container.yml
@@ -1,17 +1,16 @@
---
+- name: Load lib_openshift modules
+ include_role:
+ name: lib_openshift
+
- name: Pull etcd system container
command: atomic pull --storage=ostree {{ openshift.etcd.etcd_image }}
register: pull_result
changed_when: "'Pulling layer' in pull_result.stdout"
-- name: Check etcd system container package
- command: >
- atomic containers list --no-trunc -a -f container=etcd
- register: result
-
- name: Set initial Etcd cluster
set_fact:
- etcd_initial_cluster: >
+ etcd_initial_cluster: >-
{% for host in etcd_peers | default([]) -%}
{% if loop.last -%}
{{ hostvars[host].etcd_hostname }}={{ etcd_peer_url_scheme }}://{{ hostvars[host].etcd_ip }}:{{ etcd_peer_port }}
@@ -20,44 +19,23 @@
{%- endif -%}
{% endfor -%}
-- name: Update Etcd system container package
- command: >
- atomic containers update
- --set ETCD_LISTEN_PEER_URLS={{ etcd_listen_peer_urls }}
- --set ETCD_NAME={{ etcd_hostname }}
- --set ETCD_INITIAL_CLUSTER={{ etcd_initial_cluster | replace('\n', '') }}
- --set ETCD_LISTEN_CLIENT_URLS={{ etcd_listen_client_urls }}
- --set ETCD_INITIAL_ADVERTISE_PEER_URLS={{ etcd_initial_advertise_peer_urls }}
- --set ETCD_INITIAL_CLUSTER_STATE={{ etcd_initial_cluster_state }}
- --set ETCD_INITIAL_CLUSTER_TOKEN={{ etcd_initial_cluster_token }}
- --set ETCD_ADVERTISE_CLIENT_URLS={{ etcd_advertise_client_urls }}
- --set ETCD_CA_FILE={{ etcd_system_container_conf_dir }}/ca.crt
- --set ETCD_CERT_FILE={{ etcd_system_container_conf_dir }}/server.crt
- --set ETCD_KEY_FILE={{ etcd_system_container_conf_dir }}/server.key
- --set ETCD_PEER_CA_FILE={{ etcd_system_container_conf_dir }}/ca.crt
- --set ETCD_PEER_CERT_FILE={{ etcd_system_container_conf_dir }}/peer.crt
- --set ETCD_PEER_KEY_FILE={{ etcd_system_container_conf_dir }}/peer.key
- etcd
- when:
- - ("etcd" in result.stdout)
-
-- name: Install Etcd system container package
- command: >
- atomic install --system --name=etcd
- --set ETCD_LISTEN_PEER_URLS={{ etcd_listen_peer_urls }}
- --set ETCD_NAME={{ etcd_hostname }}
- --set ETCD_INITIAL_CLUSTER={{ etcd_initial_cluster | replace('\n', '') }}
- --set ETCD_LISTEN_CLIENT_URLS={{ etcd_listen_client_urls }}
- --set ETCD_INITIAL_ADVERTISE_PEER_URLS={{ etcd_initial_advertise_peer_urls }}
- --set ETCD_INITIAL_CLUSTER_STATE={{ etcd_initial_cluster_state }}
- --set ETCD_INITIAL_CLUSTER_TOKEN={{ etcd_initial_cluster_token }}
- --set ETCD_ADVERTISE_CLIENT_URLS={{ etcd_advertise_client_urls }}
- --set ETCD_CA_FILE={{ etcd_system_container_conf_dir }}/ca.crt
- --set ETCD_CERT_FILE={{ etcd_system_container_conf_dir }}/server.crt
- --set ETCD_KEY_FILE={{ etcd_system_container_conf_dir }}/server.key
- --set ETCD_PEER_CA_FILE={{ etcd_system_container_conf_dir }}/ca.crt
- --set ETCD_PEER_CERT_FILE={{ etcd_system_container_conf_dir }}/peer.crt
- --set ETCD_PEER_KEY_FILE={{ etcd_system_container_conf_dir }}/peer.key
- {{ openshift.etcd.etcd_image }}
- when:
- - ("etcd" not in result.stdout)
+- name: Install or Update Etcd system container package
+ oc_atomic_container:
+ name: etcd
+ image: "{{ openshift.etcd.etcd_image }}"
+ state: latest
+ values:
+ - ETCD_LISTEN_PEER_URLS={{ etcd_listen_peer_urls }}
+ - ETCD_NAME={{ etcd_hostname }}
+ - ETCD_INITIAL_CLUSTER={{ etcd_initial_cluster }}
+ - ETCD_LISTEN_CLIENT_URLS={{ etcd_listen_client_urls }}
+ - ETCD_INITIAL_ADVERTISE_PEER_URLS={{ etcd_initial_advertise_peer_urls }}
+ - ETCD_INITIAL_CLUSTER_STATE={{ etcd_initial_cluster_state }}
+ - ETCD_INITIAL_CLUSTER_TOKEN={{ etcd_initial_cluster_token }}
+ - ETCD_ADVERTISE_CLIENT_URLS={{ etcd_advertise_client_urls }}
+ - ETCD_CA_FILE={{ etcd_system_container_conf_dir }}/ca.crt
+ - ETCD_CERT_FILE={{ etcd_system_container_conf_dir }}/server.crt
+ - ETCD_KEY_FILE={{ etcd_system_container_conf_dir }}/server.key
+ - ETCD_PEER_CA_FILE={{ etcd_system_container_conf_dir }}/ca.crt
+ - ETCD_PEER_CERT_FILE={{ etcd_system_container_conf_dir }}/peer.crt
+ - ETCD_PEER_KEY_FILE={{ etcd_system_container_conf_dir }}/peer.key
diff --git a/roles/etcd/templates/custom.conf.j2 b/roles/etcd/templates/custom.conf.j2
new file mode 100644
index 000000000..d3433c658
--- /dev/null
+++ b/roles/etcd/templates/custom.conf.j2
@@ -0,0 +1,3 @@
+[Service]
+WorkingDirectory={{ etcd_data_dir }}
+EnvironmentFile=-{{ etcd_conf_file }}
diff --git a/roles/etcd/templates/etcd.conf.j2 b/roles/etcd/templates/etcd.conf.j2
index 7ccf78212..990a86c21 100644
--- a/roles/etcd/templates/etcd.conf.j2
+++ b/roles/etcd/templates/etcd.conf.j2
@@ -8,7 +8,7 @@
{% endfor -%}
{% endmacro -%}
-{% if etcd_peers | default([]) | length > 1 %}
+{% if (etcd_peers | default([]) | length > 1) or (etcd_is_thirdparty) %}
ETCD_NAME={{ etcd_hostname }}
ETCD_LISTEN_PEER_URLS={{ etcd_listen_peer_urls }}
{% else %}
@@ -23,6 +23,16 @@ ETCD_LISTEN_CLIENT_URLS={{ etcd_listen_client_urls }}
#ETCD_MAX_WALS=5
#ETCD_CORS=
+{% if etcd_is_thirdparty %}
+#[cluster]
+ETCD_INITIAL_ADVERTISE_PEER_URLS={{ etcd_initial_advertise_peer_urls }}
+
+# TODO: This needs to be altered to support the correct etcd instances
+ETCD_INITIAL_CLUSTER={{ etcd_hostname}}={{ etcd_initial_advertise_peer_urls }}
+ETCD_INITIAL_CLUSTER_STATE={{ etcd_initial_cluster_state }}
+ETCD_INITIAL_CLUSTER_TOKEN=thirdparty-etcd-cluster-1
+{% endif %}
+
{% if etcd_peers | default([]) | length > 1 %}
#[cluster]
ETCD_INITIAL_ADVERTISE_PEER_URLS={{ etcd_initial_advertise_peer_urls }}
diff --git a/roles/etcd_common/defaults/main.yml b/roles/etcd_common/defaults/main.yml
index 2eb9af921..c5efb0a0c 100644
--- a/roles/etcd_common/defaults/main.yml
+++ b/roles/etcd_common/defaults/main.yml
@@ -2,6 +2,7 @@
# etcd server vars
etcd_conf_dir: "{{ '/etc/etcd' if not openshift.common.is_etcd_system_container else '/var/lib/etcd/etcd.etcd/etc' }}"
etcd_system_container_conf_dir: /var/lib/etcd/etc
+etcd_conf_file: "{{ etcd_conf_dir }}/etcd.conf"
etcd_ca_file: "{{ etcd_conf_dir }}/ca.crt"
etcd_cert_file: "{{ etcd_conf_dir }}/server.crt"
etcd_key_file: "{{ etcd_conf_dir }}/server.key"
@@ -33,3 +34,4 @@ etcd_hostname: "{{ inventory_hostname }}"
etcd_ip: "{{ ansible_default_ipv4.address }}"
etcd_is_atomic: False
etcd_is_containerized: False
+etcd_is_thirdparty: False
diff --git a/roles/etcd_server_certificates/meta/main.yml b/roles/etcd_server_certificates/meta/main.yml
index b453f2bd8..98c913dba 100644
--- a/roles/etcd_server_certificates/meta/main.yml
+++ b/roles/etcd_server_certificates/meta/main.yml
@@ -13,4 +13,4 @@ galaxy_info:
- cloud
- system
dependencies:
-- role: etcd_ca
+- role: openshift_etcd_ca
diff --git a/roles/etcd_server_certificates/tasks/main.yml b/roles/etcd_server_certificates/tasks/main.yml
index 242c1e997..4ae9b79c4 100644
--- a/roles/etcd_server_certificates/tasks/main.yml
+++ b/roles/etcd_server_certificates/tasks/main.yml
@@ -1,6 +1,6 @@
---
- name: Install etcd
- package: name=etcd state=present
+ package: name=etcd{{ '-' + etcd_version if etcd_version is defined else '' }} state=present
when: not etcd_is_containerized | bool
- name: Check status of etcd certificates
diff --git a/roles/lib_openshift/library/oc_adm_ca_server_cert.py b/roles/lib_openshift/library/oc_adm_ca_server_cert.py
index 0b4a019f3..af1d13fe1 100644
--- a/roles/lib_openshift/library/oc_adm_ca_server_cert.py
+++ b/roles/lib_openshift/library/oc_adm_ca_server_cert.py
@@ -275,7 +275,8 @@ class Yedit(object):
continue
elif data and not isinstance(data, dict):
- return None
+ raise YeditException("Unexpected item type found while going through key " +
+ "path: {} (at key: {})".format(key, dict_key))
data[dict_key] = {}
data = data[dict_key]
@@ -284,7 +285,7 @@ class Yedit(object):
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
- return None
+ raise YeditException("Unexpected item type found while going through key path: {}".format(key))
if key == '':
data = item
@@ -298,6 +299,12 @@ class Yedit(object):
elif key_indexes[-1][1] and isinstance(data, dict):
data[key_indexes[-1][1]] = item
+ # didn't add/update to an existing list, nor add/update key to a dict
+ # so we must have been provided some syntax like a.b.c[<int>] = "data" for a
+ # non-existent array
+ else:
+ raise YeditException("Error adding to object at path: {}".format(key))
+
return data
@staticmethod
@@ -1023,13 +1030,13 @@ class OpenShiftCLI(object):
if oadm:
cmds.append('adm')
+ cmds.extend(cmd)
+
if self.all_namespaces:
cmds.extend(['--all-namespaces'])
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- cmds.extend(cmd)
-
rval = {}
results = ''
err = None
@@ -1051,9 +1058,9 @@ class OpenShiftCLI(object):
if output_type == 'json':
try:
rval['results'] = json.loads(stdout)
- except ValueError as err:
- if "No JSON object could be decoded" in err.args:
- err = err.args
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ err = verr.args
elif output_type == 'raw':
rval['results'] = stdout
@@ -1291,8 +1298,8 @@ class Utils(object):
elif value != user_def[key]:
if debug:
print('value should be identical')
- print(value)
print(user_def[key])
+ print(value)
return False
# recurse on a dictionary
@@ -1312,8 +1319,8 @@ class Utils(object):
if api_values != user_values:
if debug:
print("keys are not equal in dict")
- print(api_values)
print(user_values)
+ print(api_values)
return False
result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug)
@@ -1359,10 +1366,11 @@ class OpenShiftCLIConfig(object):
def stringify(self):
''' return the options hash as cli params in a string '''
rval = []
- for key, data in self.config_options.items():
+ for key in sorted(self.config_options.keys()):
+ data = self.config_options[key]
if data['include'] \
and (data['value'] or isinstance(data['value'], int)):
- rval.append('--%s=%s' % (key.replace('_', '-'), data['value']))
+ rval.append('--{}={}'.format(key.replace('_', '-'), data['value']))
return rval
@@ -1492,6 +1500,9 @@ class CAServerCert(OpenShiftCLI):
api_rval = server_cert.create()
+ if api_rval['returncode'] != 0:
+ return {'Failed': True, 'msg': api_rval}
+
return {'changed': True, 'results': api_rval, 'state': state}
########
diff --git a/roles/lib_openshift/library/oadm_manage_node.py b/roles/lib_openshift/library/oc_adm_manage_node.py
index ced04bf3d..0050ccf62 100644
--- a/roles/lib_openshift/library/oadm_manage_node.py
+++ b/roles/lib_openshift/library/oc_adm_manage_node.py
@@ -54,7 +54,7 @@ from ansible.module_utils.basic import AnsibleModule
DOCUMENTATION = '''
---
-module: oadm_manage_node
+module: oc_adm_manage_node
short_description: Module to manage openshift nodes
description:
- Manage openshift nodes programmatically.
@@ -126,13 +126,13 @@ extends_documentation_fragment: []
EXAMPLES = '''
- name: oadm manage-node --schedulable=true --selector=ops_node=new
- oadm_manage_node:
+ oc_adm_manage_node:
selector: ops_node=new
schedulable: True
register: schedout
- name: oadm manage-node my-k8s-node-5 --evacuate
- oadm_manage_node:
+ oc_adm_manage_node:
node: my-k8s-node-5
evacuate: True
force: True
@@ -267,7 +267,8 @@ class Yedit(object):
continue
elif data and not isinstance(data, dict):
- return None
+ raise YeditException("Unexpected item type found while going through key " +
+ "path: {} (at key: {})".format(key, dict_key))
data[dict_key] = {}
data = data[dict_key]
@@ -276,7 +277,7 @@ class Yedit(object):
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
- return None
+ raise YeditException("Unexpected item type found while going through key path: {}".format(key))
if key == '':
data = item
@@ -290,6 +291,12 @@ class Yedit(object):
elif key_indexes[-1][1] and isinstance(data, dict):
data[key_indexes[-1][1]] = item
+ # didn't add/update to an existing list, nor add/update key to a dict
+ # so we must have been provided some syntax like a.b.c[<int>] = "data" for a
+ # non-existent array
+ else:
+ raise YeditException("Error adding to object at path: {}".format(key))
+
return data
@staticmethod
@@ -1015,13 +1022,13 @@ class OpenShiftCLI(object):
if oadm:
cmds.append('adm')
+ cmds.extend(cmd)
+
if self.all_namespaces:
cmds.extend(['--all-namespaces'])
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- cmds.extend(cmd)
-
rval = {}
results = ''
err = None
@@ -1043,9 +1050,9 @@ class OpenShiftCLI(object):
if output_type == 'json':
try:
rval['results'] = json.loads(stdout)
- except ValueError as err:
- if "No JSON object could be decoded" in err.args:
- err = err.args
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ err = verr.args
elif output_type == 'raw':
rval['results'] = stdout
@@ -1283,8 +1290,8 @@ class Utils(object):
elif value != user_def[key]:
if debug:
print('value should be identical')
- print(value)
print(user_def[key])
+ print(value)
return False
# recurse on a dictionary
@@ -1304,8 +1311,8 @@ class Utils(object):
if api_values != user_values:
if debug:
print("keys are not equal in dict")
- print(api_values)
print(user_values)
+ print(api_values)
return False
result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug)
@@ -1351,17 +1358,18 @@ class OpenShiftCLIConfig(object):
def stringify(self):
''' return the options hash as cli params in a string '''
rval = []
- for key, data in self.config_options.items():
+ for key in sorted(self.config_options.keys()):
+ data = self.config_options[key]
if data['include'] \
and (data['value'] or isinstance(data['value'], int)):
- rval.append('--%s=%s' % (key.replace('_', '-'), data['value']))
+ rval.append('--{}={}'.format(key.replace('_', '-'), data['value']))
return rval
# -*- -*- -*- End included fragment: lib/base.py -*- -*- -*-
-# -*- -*- -*- Begin included fragment: class/oadm_manage_node.py -*- -*- -*-
+# -*- -*- -*- Begin included fragment: class/oc_adm_manage_node.py -*- -*- -*-
class ManageNodeException(Exception):
@@ -1570,9 +1578,9 @@ class ManageNode(OpenShiftCLI):
return {'changed': changed, 'results': results, 'state': "present"}
-# -*- -*- -*- End included fragment: class/oadm_manage_node.py -*- -*- -*-
+# -*- -*- -*- End included fragment: class/oc_adm_manage_node.py -*- -*- -*-
-# -*- -*- -*- Begin included fragment: ansible/oadm_manage_node.py -*- -*- -*-
+# -*- -*- -*- Begin included fragment: ansible/oc_adm_manage_node.py -*- -*- -*-
def main():
@@ -1610,4 +1618,4 @@ def main():
if __name__ == "__main__":
main()
-# -*- -*- -*- End included fragment: ansible/oadm_manage_node.py -*- -*- -*-
+# -*- -*- -*- End included fragment: ansible/oc_adm_manage_node.py -*- -*- -*-
diff --git a/roles/lib_openshift/library/oc_adm_policy_group.py b/roles/lib_openshift/library/oc_adm_policy_group.py
new file mode 100644
index 000000000..3d1dc1c96
--- /dev/null
+++ b/roles/lib_openshift/library/oc_adm_policy_group.py
@@ -0,0 +1,2128 @@
+#!/usr/bin/env python
+# pylint: disable=missing-docstring
+# flake8: noqa: T001
+# ___ ___ _ _ ___ ___ _ _____ ___ ___
+# / __| __| \| | __| _ \ /_\_ _| __| \
+# | (_ | _|| .` | _|| / / _ \| | | _|| |) |
+# \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____
+# | \ / _ \ | \| |/ _ \_ _| | __| \_ _|_ _|
+# | |) | (_) | | .` | (_) || | | _|| |) | | | |
+# |___/ \___/ |_|\_|\___/ |_| |___|___/___| |_|
+#
+# Copyright 2016 Red Hat, Inc. and/or its affiliates
+# and other contributors as indicated by the @author tags.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# -*- -*- -*- Begin included fragment: lib/import.py -*- -*- -*-
+'''
+ OpenShiftCLI class that wraps the oc commands in a subprocess
+'''
+# pylint: disable=too-many-lines
+
+from __future__ import print_function
+import atexit
+import copy
+import json
+import os
+import re
+import shutil
+import subprocess
+import tempfile
+# pylint: disable=import-error
+try:
+ import ruamel.yaml as yaml
+except ImportError:
+ import yaml
+
+from ansible.module_utils.basic import AnsibleModule
+
+# -*- -*- -*- End included fragment: lib/import.py -*- -*- -*-
+
+# -*- -*- -*- Begin included fragment: doc/policy_group -*- -*- -*-
+
+DOCUMENTATION = '''
+---
+module: oc_adm_policy_group
+short_description: Module to manage openshift policy for groups
+description:
+ - Manage openshift policy for groups.
+options:
+ kubeconfig:
+ description:
+ - The path for the kubeconfig file to use for authentication
+ required: false
+ default: /etc/origin/master/admin.kubeconfig
+ aliases: []
+ namespace:
+ description:
+ - The namespace scope
+ required: false
+ default: None
+ aliases: []
+ debug:
+ description:
+ - Turn on debug output.
+ required: false
+ default: False
+ aliases: []
+ group:
+ description:
+ - The name of the group
+ required: true
+ default: None
+ aliases: []
+ resource_kind:
+ description:
+ - The kind of policy to affect
+ required: true
+ default: None
+ choices: ["role", "cluster-role", "scc"]
+ aliases: []
+ resource_name:
+ description:
+ - The name of the policy
+ required: true
+ default: None
+ aliases: []
+ state:
+ description:
+ - Desired state of the policy
+ required: true
+ default: present
+ choices: ["present", "absent"]
+ aliases: []
+author:
+- "Kenny Woodson <kwoodson@redhat.com>"
+extends_documentation_fragment: []
+'''
+
+EXAMPLES = '''
+- name: oc adm policy remove-scc-from-group an-scc agroup
+ oc_adm_policy_group:
+ group: agroup
+ resource_kind: scc
+ resource_name: an-scc
+ state: absent
+
+- name: oc adm policy add-cluster-role-to-group system:build-strategy-docker agroup
+ oc_adm_policy_group:
+ group: agroup
+ resource_kind: cluster-role
+ resource_name: system:build-strategy-docker
+ state: present
+'''
+
+# -*- -*- -*- End included fragment: doc/policy_group -*- -*- -*-
+
+# -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
+# pylint: disable=undefined-variable,missing-docstring
+# noqa: E301,E302
+
+
+class YeditException(Exception):
+ ''' Exception class for Yedit '''
+ pass
+
+
+# pylint: disable=too-many-public-methods
+class Yedit(object):
+ ''' Class to modify yaml files '''
+ re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
+ re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
+ com_sep = set(['.', '#', '|', ':'])
+
+ # pylint: disable=too-many-arguments
+ def __init__(self,
+ filename=None,
+ content=None,
+ content_type='yaml',
+ separator='.',
+ backup=False):
+ self.content = content
+ self._separator = separator
+ self.filename = filename
+ self.__yaml_dict = content
+ self.content_type = content_type
+ self.backup = backup
+ self.load(content_type=self.content_type)
+ if self.__yaml_dict is None:
+ self.__yaml_dict = {}
+
+ @property
+ def separator(self):
+ ''' getter method for yaml_dict '''
+ return self._separator
+
+ @separator.setter
+ def separator(self):
+ ''' getter method for yaml_dict '''
+ return self._separator
+
+ @property
+ def yaml_dict(self):
+ ''' getter method for yaml_dict '''
+ return self.__yaml_dict
+
+ @yaml_dict.setter
+ def yaml_dict(self, value):
+ ''' setter method for yaml_dict '''
+ self.__yaml_dict = value
+
+ @staticmethod
+ def parse_key(key, sep='.'):
+ '''parse the key allowing the appropriate separator'''
+ common_separators = list(Yedit.com_sep - set([sep]))
+ return re.findall(Yedit.re_key % ''.join(common_separators), key)
+
+ @staticmethod
+ def valid_key(key, sep='.'):
+ '''validate the incoming key'''
+ common_separators = list(Yedit.com_sep - set([sep]))
+ if not re.match(Yedit.re_valid_key % ''.join(common_separators), key):
+ return False
+
+ return True
+
+ @staticmethod
+ def remove_entry(data, key, sep='.'):
+ ''' remove data at location key '''
+ if key == '' and isinstance(data, dict):
+ data.clear()
+ return True
+ elif key == '' and isinstance(data, list):
+ del data[:]
+ return True
+
+ if not (key and Yedit.valid_key(key, sep)) and \
+ isinstance(data, (list, dict)):
+ return None
+
+ key_indexes = Yedit.parse_key(key, sep)
+ for arr_ind, dict_key in key_indexes[:-1]:
+ if dict_key and isinstance(data, dict):
+ data = data.get(dict_key, None)
+ elif (arr_ind and isinstance(data, list) and
+ int(arr_ind) <= len(data) - 1):
+ data = data[int(arr_ind)]
+ else:
+ return None
+
+ # process last index for remove
+ # expected list entry
+ if key_indexes[-1][0]:
+ if isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
+ del data[int(key_indexes[-1][0])]
+ return True
+
+ # expected dict entry
+ elif key_indexes[-1][1]:
+ if isinstance(data, dict):
+ del data[key_indexes[-1][1]]
+ return True
+
+ @staticmethod
+ def add_entry(data, key, item=None, sep='.'):
+ ''' Get an item from a dictionary with key notation a.b.c
+ d = {'a': {'b': 'c'}}}
+ key = a#b
+ return c
+ '''
+ if key == '':
+ pass
+ elif (not (key and Yedit.valid_key(key, sep)) and
+ isinstance(data, (list, dict))):
+ return None
+
+ key_indexes = Yedit.parse_key(key, sep)
+ for arr_ind, dict_key in key_indexes[:-1]:
+ if dict_key:
+ if isinstance(data, dict) and dict_key in data and data[dict_key]: # noqa: E501
+ data = data[dict_key]
+ continue
+
+ elif data and not isinstance(data, dict):
+ raise YeditException("Unexpected item type found while going through key " +
+ "path: {} (at key: {})".format(key, dict_key))
+
+ data[dict_key] = {}
+ data = data[dict_key]
+
+ elif (arr_ind and isinstance(data, list) and
+ int(arr_ind) <= len(data) - 1):
+ data = data[int(arr_ind)]
+ else:
+ raise YeditException("Unexpected item type found while going through key path: {}".format(key))
+
+ if key == '':
+ data = item
+
+ # process last index for add
+ # expected list entry
+ elif key_indexes[-1][0] and isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
+ data[int(key_indexes[-1][0])] = item
+
+ # expected dict entry
+ elif key_indexes[-1][1] and isinstance(data, dict):
+ data[key_indexes[-1][1]] = item
+
+ # didn't add/update to an existing list, nor add/update key to a dict
+ # so we must have been provided some syntax like a.b.c[<int>] = "data" for a
+ # non-existent array
+ else:
+ raise YeditException("Error adding to object at path: {}".format(key))
+
+ return data
+
+ @staticmethod
+ def get_entry(data, key, sep='.'):
+ ''' Get an item from a dictionary with key notation a.b.c
+ d = {'a': {'b': 'c'}}}
+ key = a.b
+ return c
+ '''
+ if key == '':
+ pass
+ elif (not (key and Yedit.valid_key(key, sep)) and
+ isinstance(data, (list, dict))):
+ return None
+
+ key_indexes = Yedit.parse_key(key, sep)
+ for arr_ind, dict_key in key_indexes:
+ if dict_key and isinstance(data, dict):
+ data = data.get(dict_key, None)
+ elif (arr_ind and isinstance(data, list) and
+ int(arr_ind) <= len(data) - 1):
+ data = data[int(arr_ind)]
+ else:
+ return None
+
+ return data
+
+ @staticmethod
+ def _write(filename, contents):
+ ''' Actually write the file contents to disk. This helps with mocking. '''
+
+ tmp_filename = filename + '.yedit'
+
+ with open(tmp_filename, 'w') as yfd:
+ yfd.write(contents)
+
+ os.rename(tmp_filename, filename)
+
+ def write(self):
+ ''' write to file '''
+ if not self.filename:
+ raise YeditException('Please specify a filename.')
+
+ if self.backup and self.file_exists():
+ shutil.copy(self.filename, self.filename + '.orig')
+
+ # Try to set format attributes if supported
+ try:
+ self.yaml_dict.fa.set_block_style()
+ except AttributeError:
+ pass
+
+ # Try to use RoundTripDumper if supported.
+ try:
+ Yedit._write(self.filename, yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper))
+ except AttributeError:
+ Yedit._write(self.filename, yaml.safe_dump(self.yaml_dict, default_flow_style=False))
+
+ return (True, self.yaml_dict)
+
+ def read(self):
+ ''' read from file '''
+ # check if it exists
+ if self.filename is None or not self.file_exists():
+ return None
+
+ contents = None
+ with open(self.filename) as yfd:
+ contents = yfd.read()
+
+ return contents
+
+ def file_exists(self):
+ ''' return whether file exists '''
+ if os.path.exists(self.filename):
+ return True
+
+ return False
+
+ def load(self, content_type='yaml'):
+ ''' return yaml file '''
+ contents = self.read()
+
+ if not contents and not self.content:
+ return None
+
+ if self.content:
+ if isinstance(self.content, dict):
+ self.yaml_dict = self.content
+ return self.yaml_dict
+ elif isinstance(self.content, str):
+ contents = self.content
+
+ # check if it is yaml
+ try:
+ if content_type == 'yaml' and contents:
+ # Try to set format attributes if supported
+ try:
+ self.yaml_dict.fa.set_block_style()
+ except AttributeError:
+ pass
+
+ # Try to use RoundTripLoader if supported.
+ try:
+ self.yaml_dict = yaml.safe_load(contents, yaml.RoundTripLoader)
+ except AttributeError:
+ self.yaml_dict = yaml.safe_load(contents)
+
+ # Try to set format attributes if supported
+ try:
+ self.yaml_dict.fa.set_block_style()
+ except AttributeError:
+ pass
+
+ elif content_type == 'json' and contents:
+ self.yaml_dict = json.loads(contents)
+ except yaml.YAMLError as err:
+ # Error loading yaml or json
+ raise YeditException('Problem with loading yaml file. %s' % err)
+
+ return self.yaml_dict
+
+ def get(self, key):
+ ''' get a specified key'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, key, self.separator)
+ except KeyError:
+ entry = None
+
+ return entry
+
+ def pop(self, path, key_or_item):
+ ''' remove a key, value pair from a dict or an item for a list'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry is None:
+ return (False, self.yaml_dict)
+
+ if isinstance(entry, dict):
+ # AUDIT:maybe-no-member makes sense due to fuzzy types
+ # pylint: disable=maybe-no-member
+ if key_or_item in entry:
+ entry.pop(key_or_item)
+ return (True, self.yaml_dict)
+ return (False, self.yaml_dict)
+
+ elif isinstance(entry, list):
+ # AUDIT:maybe-no-member makes sense due to fuzzy types
+ # pylint: disable=maybe-no-member
+ ind = None
+ try:
+ ind = entry.index(key_or_item)
+ except ValueError:
+ return (False, self.yaml_dict)
+
+ entry.pop(ind)
+ return (True, self.yaml_dict)
+
+ return (False, self.yaml_dict)
+
+ def delete(self, path):
+ ''' remove path from a dict'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry is None:
+ return (False, self.yaml_dict)
+
+ result = Yedit.remove_entry(self.yaml_dict, path, self.separator)
+ if not result:
+ return (False, self.yaml_dict)
+
+ return (True, self.yaml_dict)
+
+ def exists(self, path, value):
+ ''' check if value exists at path'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if isinstance(entry, list):
+ if value in entry:
+ return True
+ return False
+
+ elif isinstance(entry, dict):
+ if isinstance(value, dict):
+ rval = False
+ for key, val in value.items():
+ if entry[key] != val:
+ rval = False
+ break
+ else:
+ rval = True
+ return rval
+
+ return value in entry
+
+ return entry == value
+
+ def append(self, path, value):
+ '''append value to a list'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry is None:
+ self.put(path, [])
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ if not isinstance(entry, list):
+ return (False, self.yaml_dict)
+
+ # AUDIT:maybe-no-member makes sense due to loading data from
+ # a serialized format.
+ # pylint: disable=maybe-no-member
+ entry.append(value)
+ return (True, self.yaml_dict)
+
+ # pylint: disable=too-many-arguments
+ def update(self, path, value, index=None, curr_value=None):
+ ''' put path, value into a dict '''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if isinstance(entry, dict):
+ # AUDIT:maybe-no-member makes sense due to fuzzy types
+ # pylint: disable=maybe-no-member
+ if not isinstance(value, dict):
+ raise YeditException('Cannot replace key, value entry in ' +
+ 'dict with non-dict type. value=[%s] [%s]' % (value, type(value))) # noqa: E501
+
+ entry.update(value)
+ return (True, self.yaml_dict)
+
+ elif isinstance(entry, list):
+ # AUDIT:maybe-no-member makes sense due to fuzzy types
+ # pylint: disable=maybe-no-member
+ ind = None
+ if curr_value:
+ try:
+ ind = entry.index(curr_value)
+ except ValueError:
+ return (False, self.yaml_dict)
+
+ elif index is not None:
+ ind = index
+
+ if ind is not None and entry[ind] != value:
+ entry[ind] = value
+ return (True, self.yaml_dict)
+
+ # see if it exists in the list
+ try:
+ ind = entry.index(value)
+ except ValueError:
+ # doesn't exist, append it
+ entry.append(value)
+ return (True, self.yaml_dict)
+
+ # already exists, return
+ if ind is not None:
+ return (False, self.yaml_dict)
+ return (False, self.yaml_dict)
+
+ def put(self, path, value):
+ ''' put path, value into a dict '''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry == value:
+ return (False, self.yaml_dict)
+
+ # deepcopy didn't work
+ # Try to use ruamel.yaml and fallback to pyyaml
+ try:
+ tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
+ default_flow_style=False),
+ yaml.RoundTripLoader)
+ except AttributeError:
+ tmp_copy = copy.deepcopy(self.yaml_dict)
+
+ # set the format attributes if available
+ try:
+ tmp_copy.fa.set_block_style()
+ except AttributeError:
+ pass
+
+ result = Yedit.add_entry(tmp_copy, path, value, self.separator)
+ if not result:
+ return (False, self.yaml_dict)
+
+ self.yaml_dict = tmp_copy
+
+ return (True, self.yaml_dict)
+
+ def create(self, path, value):
+ ''' create a yaml file '''
+ if not self.file_exists():
+ # deepcopy didn't work
+ # Try to use ruamel.yaml and fallback to pyyaml
+ try:
+ tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
+ default_flow_style=False),
+ yaml.RoundTripLoader)
+ except AttributeError:
+ tmp_copy = copy.deepcopy(self.yaml_dict)
+
+ # set the format attributes if available
+ try:
+ tmp_copy.fa.set_block_style()
+ except AttributeError:
+ pass
+
+ result = Yedit.add_entry(tmp_copy, path, value, self.separator)
+ if result:
+ self.yaml_dict = tmp_copy
+ return (True, self.yaml_dict)
+
+ return (False, self.yaml_dict)
+
+ @staticmethod
+ def get_curr_value(invalue, val_type):
+ '''return the current value'''
+ if invalue is None:
+ return None
+
+ curr_value = invalue
+ if val_type == 'yaml':
+ curr_value = yaml.load(invalue)
+ elif val_type == 'json':
+ curr_value = json.loads(invalue)
+
+ return curr_value
+
+ @staticmethod
+ def parse_value(inc_value, vtype=''):
+ '''determine value type passed'''
+ true_bools = ['y', 'Y', 'yes', 'Yes', 'YES', 'true', 'True', 'TRUE',
+ 'on', 'On', 'ON', ]
+ false_bools = ['n', 'N', 'no', 'No', 'NO', 'false', 'False', 'FALSE',
+ 'off', 'Off', 'OFF']
+
+ # It came in as a string but you didn't specify value_type as string
+ # we will convert to bool if it matches any of the above cases
+ if isinstance(inc_value, str) and 'bool' in vtype:
+ if inc_value not in true_bools and inc_value not in false_bools:
+ raise YeditException('Not a boolean type. str=[%s] vtype=[%s]'
+ % (inc_value, vtype))
+ elif isinstance(inc_value, bool) and 'str' in vtype:
+ inc_value = str(inc_value)
+
+ # If vtype is not str then go ahead and attempt to yaml load it.
+ if isinstance(inc_value, str) and 'str' not in vtype:
+ try:
+ inc_value = yaml.load(inc_value)
+ except Exception:
+ raise YeditException('Could not determine type of incoming ' +
+ 'value. value=[%s] vtype=[%s]'
+ % (type(inc_value), vtype))
+
+ return inc_value
+
+ # pylint: disable=too-many-return-statements,too-many-branches
+ @staticmethod
+ def run_ansible(module):
+ '''perform the idempotent crud operations'''
+ yamlfile = Yedit(filename=module.params['src'],
+ backup=module.params['backup'],
+ separator=module.params['separator'])
+
+ if module.params['src']:
+ rval = yamlfile.load()
+
+ if yamlfile.yaml_dict is None and \
+ module.params['state'] != 'present':
+ return {'failed': True,
+ 'msg': 'Error opening file [%s]. Verify that the ' +
+ 'file exists, that it is has correct' +
+ ' permissions, and is valid yaml.'}
+
+ if module.params['state'] == 'list':
+ if module.params['content']:
+ content = Yedit.parse_value(module.params['content'],
+ module.params['content_type'])
+ yamlfile.yaml_dict = content
+
+ if module.params['key']:
+ rval = yamlfile.get(module.params['key']) or {}
+
+ return {'changed': False, 'result': rval, 'state': "list"}
+
+ elif module.params['state'] == 'absent':
+ if module.params['content']:
+ content = Yedit.parse_value(module.params['content'],
+ module.params['content_type'])
+ yamlfile.yaml_dict = content
+
+ if module.params['update']:
+ rval = yamlfile.pop(module.params['key'],
+ module.params['value'])
+ else:
+ rval = yamlfile.delete(module.params['key'])
+
+ if rval[0] and module.params['src']:
+ yamlfile.write()
+
+ return {'changed': rval[0], 'result': rval[1], 'state': "absent"}
+
+ elif module.params['state'] == 'present':
+ # check if content is different than what is in the file
+ if module.params['content']:
+ content = Yedit.parse_value(module.params['content'],
+ module.params['content_type'])
+
+ # We had no edits to make and the contents are the same
+ if yamlfile.yaml_dict == content and \
+ module.params['value'] is None:
+ return {'changed': False,
+ 'result': yamlfile.yaml_dict,
+ 'state': "present"}
+
+ yamlfile.yaml_dict = content
+
+ # we were passed a value; parse it
+ if module.params['value']:
+ value = Yedit.parse_value(module.params['value'],
+ module.params['value_type'])
+ key = module.params['key']
+ if module.params['update']:
+ # pylint: disable=line-too-long
+ curr_value = Yedit.get_curr_value(Yedit.parse_value(module.params['curr_value']), # noqa: E501
+ module.params['curr_value_format']) # noqa: E501
+
+ rval = yamlfile.update(key, value, module.params['index'], curr_value) # noqa: E501
+
+ elif module.params['append']:
+ rval = yamlfile.append(key, value)
+ else:
+ rval = yamlfile.put(key, value)
+
+ if rval[0] and module.params['src']:
+ yamlfile.write()
+
+ return {'changed': rval[0],
+ 'result': rval[1], 'state': "present"}
+
+ # no edits to make
+ if module.params['src']:
+ # pylint: disable=redefined-variable-type
+ rval = yamlfile.write()
+ return {'changed': rval[0],
+ 'result': rval[1],
+ 'state': "present"}
+
+ return {'failed': True, 'msg': 'Unkown state passed'}
+
+# -*- -*- -*- End included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
+
+# -*- -*- -*- Begin included fragment: lib/base.py -*- -*- -*-
+# pylint: disable=too-many-lines
+# noqa: E301,E302,E303,T001
+
+
+class OpenShiftCLIError(Exception):
+ '''Exception class for openshiftcli'''
+ pass
+
+
+ADDITIONAL_PATH_LOOKUPS = ['/usr/local/bin', os.path.expanduser('~/bin')]
+
+
+def locate_oc_binary():
+ ''' Find and return oc binary file '''
+ # https://github.com/openshift/openshift-ansible/issues/3410
+ # oc can be in /usr/local/bin in some cases, but that may not
+ # be in $PATH due to ansible/sudo
+ paths = os.environ.get("PATH", os.defpath).split(os.pathsep) + ADDITIONAL_PATH_LOOKUPS
+
+ oc_binary = 'oc'
+
+ # Use shutil.which if it is available, otherwise fallback to a naive path search
+ try:
+ which_result = shutil.which(oc_binary, path=os.pathsep.join(paths))
+ if which_result is not None:
+ oc_binary = which_result
+ except AttributeError:
+ for path in paths:
+ if os.path.exists(os.path.join(path, oc_binary)):
+ oc_binary = os.path.join(path, oc_binary)
+ break
+
+ return oc_binary
+
+
+# pylint: disable=too-few-public-methods
+class OpenShiftCLI(object):
+ ''' Class to wrap the command line tools '''
+ def __init__(self,
+ namespace,
+ kubeconfig='/etc/origin/master/admin.kubeconfig',
+ verbose=False,
+ all_namespaces=False):
+ ''' Constructor for OpenshiftCLI '''
+ self.namespace = namespace
+ self.verbose = verbose
+ self.kubeconfig = Utils.create_tmpfile_copy(kubeconfig)
+ self.all_namespaces = all_namespaces
+ self.oc_binary = locate_oc_binary()
+
+ # Pylint allows only 5 arguments to be passed.
+ # pylint: disable=too-many-arguments
+ def _replace_content(self, resource, rname, content, force=False, sep='.'):
+ ''' replace the current object with the content '''
+ res = self._get(resource, rname)
+ if not res['results']:
+ return res
+
+ fname = Utils.create_tmpfile(rname + '-')
+
+ yed = Yedit(fname, res['results'][0], separator=sep)
+ changes = []
+ for key, value in content.items():
+ changes.append(yed.put(key, value))
+
+ if any([change[0] for change in changes]):
+ yed.write()
+
+ atexit.register(Utils.cleanup, [fname])
+
+ return self._replace(fname, force)
+
+ return {'returncode': 0, 'updated': False}
+
+ def _replace(self, fname, force=False):
+ '''replace the current object with oc replace'''
+ cmd = ['replace', '-f', fname]
+ if force:
+ cmd.append('--force')
+ return self.openshift_cmd(cmd)
+
+ def _create_from_content(self, rname, content):
+ '''create a temporary file and then call oc create on it'''
+ fname = Utils.create_tmpfile(rname + '-')
+ yed = Yedit(fname, content=content)
+ yed.write()
+
+ atexit.register(Utils.cleanup, [fname])
+
+ return self._create(fname)
+
+ def _create(self, fname):
+ '''call oc create on a filename'''
+ return self.openshift_cmd(['create', '-f', fname])
+
+ def _delete(self, resource, rname, selector=None):
+ '''call oc delete on a resource'''
+ cmd = ['delete', resource, rname]
+ if selector:
+ cmd.append('--selector=%s' % selector)
+
+ return self.openshift_cmd(cmd)
+
+ def _process(self, template_name, create=False, params=None, template_data=None): # noqa: E501
+ '''process a template
+
+ template_name: the name of the template to process
+ create: whether to send to oc create after processing
+ params: the parameters for the template
+ template_data: the incoming template's data; instead of a file
+ '''
+ cmd = ['process']
+ if template_data:
+ cmd.extend(['-f', '-'])
+ else:
+ cmd.append(template_name)
+ if params:
+ param_str = ["%s=%s" % (key, value) for key, value in params.items()]
+ cmd.append('-v')
+ cmd.extend(param_str)
+
+ results = self.openshift_cmd(cmd, output=True, input_data=template_data)
+
+ if results['returncode'] != 0 or not create:
+ return results
+
+ fname = Utils.create_tmpfile(template_name + '-')
+ yed = Yedit(fname, results['results'])
+ yed.write()
+
+ atexit.register(Utils.cleanup, [fname])
+
+ return self.openshift_cmd(['create', '-f', fname])
+
+ def _get(self, resource, rname=None, selector=None):
+ '''return a resource by name '''
+ cmd = ['get', resource]
+ if selector:
+ cmd.append('--selector=%s' % selector)
+ elif rname:
+ cmd.append(rname)
+
+ cmd.extend(['-o', 'json'])
+
+ rval = self.openshift_cmd(cmd, output=True)
+
+ # Ensure results are retuned in an array
+ if 'items' in rval:
+ rval['results'] = rval['items']
+ elif not isinstance(rval['results'], list):
+ rval['results'] = [rval['results']]
+
+ return rval
+
+ def _schedulable(self, node=None, selector=None, schedulable=True):
+ ''' perform oadm manage-node scheduable '''
+ cmd = ['manage-node']
+ if node:
+ cmd.extend(node)
+ else:
+ cmd.append('--selector=%s' % selector)
+
+ cmd.append('--schedulable=%s' % schedulable)
+
+ return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
+
+ def _list_pods(self, node=None, selector=None, pod_selector=None):
+ ''' perform oadm list pods
+
+ node: the node in which to list pods
+ selector: the label selector filter if provided
+ pod_selector: the pod selector filter if provided
+ '''
+ cmd = ['manage-node']
+ if node:
+ cmd.extend(node)
+ else:
+ cmd.append('--selector=%s' % selector)
+
+ if pod_selector:
+ cmd.append('--pod-selector=%s' % pod_selector)
+
+ cmd.extend(['--list-pods', '-o', 'json'])
+
+ return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
+
+ # pylint: disable=too-many-arguments
+ def _evacuate(self, node=None, selector=None, pod_selector=None, dry_run=False, grace_period=None, force=False):
+ ''' perform oadm manage-node evacuate '''
+ cmd = ['manage-node']
+ if node:
+ cmd.extend(node)
+ else:
+ cmd.append('--selector=%s' % selector)
+
+ if dry_run:
+ cmd.append('--dry-run')
+
+ if pod_selector:
+ cmd.append('--pod-selector=%s' % pod_selector)
+
+ if grace_period:
+ cmd.append('--grace-period=%s' % int(grace_period))
+
+ if force:
+ cmd.append('--force')
+
+ cmd.append('--evacuate')
+
+ return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
+
+ def _version(self):
+ ''' return the openshift version'''
+ return self.openshift_cmd(['version'], output=True, output_type='raw')
+
+ def _import_image(self, url=None, name=None, tag=None):
+ ''' perform image import '''
+ cmd = ['import-image']
+
+ image = '{0}'.format(name)
+ if tag:
+ image += ':{0}'.format(tag)
+
+ cmd.append(image)
+
+ if url:
+ cmd.append('--from={0}/{1}'.format(url, image))
+
+ cmd.append('-n{0}'.format(self.namespace))
+
+ cmd.append('--confirm')
+ return self.openshift_cmd(cmd)
+
+ def _run(self, cmds, input_data):
+ ''' Actually executes the command. This makes mocking easier. '''
+ curr_env = os.environ.copy()
+ curr_env.update({'KUBECONFIG': self.kubeconfig})
+ proc = subprocess.Popen(cmds,
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ env=curr_env)
+
+ stdout, stderr = proc.communicate(input_data)
+
+ return proc.returncode, stdout.decode(), stderr.decode()
+
+ # pylint: disable=too-many-arguments,too-many-branches
+ def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
+ '''Base command for oc '''
+ cmds = [self.oc_binary]
+
+ if oadm:
+ cmds.append('adm')
+
+ cmds.extend(cmd)
+
+ if self.all_namespaces:
+ cmds.extend(['--all-namespaces'])
+ elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
+ cmds.extend(['-n', self.namespace])
+
+ rval = {}
+ results = ''
+ err = None
+
+ if self.verbose:
+ print(' '.join(cmds))
+
+ try:
+ returncode, stdout, stderr = self._run(cmds, input_data)
+ except OSError as ex:
+ returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
+
+ rval = {"returncode": returncode,
+ "results": results,
+ "cmd": ' '.join(cmds)}
+
+ if returncode == 0:
+ if output:
+ if output_type == 'json':
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ err = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout
+
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if err:
+ rval.update({"err": err,
+ "stderr": stderr,
+ "stdout": stdout,
+ "cmd": cmds})
+
+ else:
+ rval.update({"stderr": stderr,
+ "stdout": stdout,
+ "results": {}})
+
+ return rval
+
+
+class Utils(object):
+ ''' utilities for openshiftcli modules '''
+
+ @staticmethod
+ def _write(filename, contents):
+ ''' Actually write the file contents to disk. This helps with mocking. '''
+
+ with open(filename, 'w') as sfd:
+ sfd.write(contents)
+
+ @staticmethod
+ def create_tmp_file_from_contents(rname, data, ftype='yaml'):
+ ''' create a file in tmp with name and contents'''
+
+ tmp = Utils.create_tmpfile(prefix=rname)
+
+ if ftype == 'yaml':
+ # AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
+ # pylint: disable=no-member
+ if hasattr(yaml, 'RoundTripDumper'):
+ Utils._write(tmp, yaml.dump(data, Dumper=yaml.RoundTripDumper))
+ else:
+ Utils._write(tmp, yaml.safe_dump(data, default_flow_style=False))
+
+ elif ftype == 'json':
+ Utils._write(tmp, json.dumps(data))
+ else:
+ Utils._write(tmp, data)
+
+ # Register cleanup when module is done
+ atexit.register(Utils.cleanup, [tmp])
+ return tmp
+
+ @staticmethod
+ def create_tmpfile_copy(inc_file):
+ '''create a temporary copy of a file'''
+ tmpfile = Utils.create_tmpfile('lib_openshift-')
+ Utils._write(tmpfile, open(inc_file).read())
+
+ # Cleanup the tmpfile
+ atexit.register(Utils.cleanup, [tmpfile])
+
+ return tmpfile
+
+ @staticmethod
+ def create_tmpfile(prefix='tmp'):
+ ''' Generates and returns a temporary file name '''
+
+ with tempfile.NamedTemporaryFile(prefix=prefix, delete=False) as tmp:
+ return tmp.name
+
+ @staticmethod
+ def create_tmp_files_from_contents(content, content_type=None):
+ '''Turn an array of dict: filename, content into a files array'''
+ if not isinstance(content, list):
+ content = [content]
+ files = []
+ for item in content:
+ path = Utils.create_tmp_file_from_contents(item['path'] + '-',
+ item['data'],
+ ftype=content_type)
+ files.append({'name': os.path.basename(item['path']),
+ 'path': path})
+ return files
+
+ @staticmethod
+ def cleanup(files):
+ '''Clean up on exit '''
+ for sfile in files:
+ if os.path.exists(sfile):
+ if os.path.isdir(sfile):
+ shutil.rmtree(sfile)
+ elif os.path.isfile(sfile):
+ os.remove(sfile)
+
+ @staticmethod
+ def exists(results, _name):
+ ''' Check to see if the results include the name '''
+ if not results:
+ return False
+
+ if Utils.find_result(results, _name):
+ return True
+
+ return False
+
+ @staticmethod
+ def find_result(results, _name):
+ ''' Find the specified result by name'''
+ rval = None
+ for result in results:
+ if 'metadata' in result and result['metadata']['name'] == _name:
+ rval = result
+ break
+
+ return rval
+
+ @staticmethod
+ def get_resource_file(sfile, sfile_type='yaml'):
+ ''' return the service file '''
+ contents = None
+ with open(sfile) as sfd:
+ contents = sfd.read()
+
+ if sfile_type == 'yaml':
+ # AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
+ # pylint: disable=no-member
+ if hasattr(yaml, 'RoundTripLoader'):
+ contents = yaml.load(contents, yaml.RoundTripLoader)
+ else:
+ contents = yaml.safe_load(contents)
+ elif sfile_type == 'json':
+ contents = json.loads(contents)
+
+ return contents
+
+ @staticmethod
+ def filter_versions(stdout):
+ ''' filter the oc version output '''
+
+ version_dict = {}
+ version_search = ['oc', 'openshift', 'kubernetes']
+
+ for line in stdout.strip().split('\n'):
+ for term in version_search:
+ if not line:
+ continue
+ if line.startswith(term):
+ version_dict[term] = line.split()[-1]
+
+ # horrible hack to get openshift version in Openshift 3.2
+ # By default "oc version in 3.2 does not return an "openshift" version
+ if "openshift" not in version_dict:
+ version_dict["openshift"] = version_dict["oc"]
+
+ return version_dict
+
+ @staticmethod
+ def add_custom_versions(versions):
+ ''' create custom versions strings '''
+
+ versions_dict = {}
+
+ for tech, version in versions.items():
+ # clean up "-" from version
+ if "-" in version:
+ version = version.split("-")[0]
+
+ if version.startswith('v'):
+ versions_dict[tech + '_numeric'] = version[1:].split('+')[0]
+ # "v3.3.0.33" is what we have, we want "3.3"
+ versions_dict[tech + '_short'] = version[1:4]
+
+ return versions_dict
+
+ @staticmethod
+ def openshift_installed():
+ ''' check if openshift is installed '''
+ import yum
+
+ yum_base = yum.YumBase()
+ if yum_base.rpmdb.searchNevra(name='atomic-openshift'):
+ return True
+
+ return False
+
+ # Disabling too-many-branches. This is a yaml dictionary comparison function
+ # pylint: disable=too-many-branches,too-many-return-statements,too-many-statements
+ @staticmethod
+ def check_def_equal(user_def, result_def, skip_keys=None, debug=False):
+ ''' Given a user defined definition, compare it with the results given back by our query. '''
+
+ # Currently these values are autogenerated and we do not need to check them
+ skip = ['metadata', 'status']
+ if skip_keys:
+ skip.extend(skip_keys)
+
+ for key, value in result_def.items():
+ if key in skip:
+ continue
+
+ # Both are lists
+ if isinstance(value, list):
+ if key not in user_def:
+ if debug:
+ print('User data does not have key [%s]' % key)
+ print('User data: %s' % user_def)
+ return False
+
+ if not isinstance(user_def[key], list):
+ if debug:
+ print('user_def[key] is not a list key=[%s] user_def[key]=%s' % (key, user_def[key]))
+ return False
+
+ if len(user_def[key]) != len(value):
+ if debug:
+ print("List lengths are not equal.")
+ print("key=[%s]: user_def[%s] != value[%s]" % (key, len(user_def[key]), len(value)))
+ print("user_def: %s" % user_def[key])
+ print("value: %s" % value)
+ return False
+
+ for values in zip(user_def[key], value):
+ if isinstance(values[0], dict) and isinstance(values[1], dict):
+ if debug:
+ print('sending list - list')
+ print(type(values[0]))
+ print(type(values[1]))
+ result = Utils.check_def_equal(values[0], values[1], skip_keys=skip_keys, debug=debug)
+ if not result:
+ print('list compare returned false')
+ return False
+
+ elif value != user_def[key]:
+ if debug:
+ print('value should be identical')
+ print(user_def[key])
+ print(value)
+ return False
+
+ # recurse on a dictionary
+ elif isinstance(value, dict):
+ if key not in user_def:
+ if debug:
+ print("user_def does not have key [%s]" % key)
+ return False
+ if not isinstance(user_def[key], dict):
+ if debug:
+ print("dict returned false: not instance of dict")
+ return False
+
+ # before passing ensure keys match
+ api_values = set(value.keys()) - set(skip)
+ user_values = set(user_def[key].keys()) - set(skip)
+ if api_values != user_values:
+ if debug:
+ print("keys are not equal in dict")
+ print(user_values)
+ print(api_values)
+ return False
+
+ result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug)
+ if not result:
+ if debug:
+ print("dict returned false")
+ print(result)
+ return False
+
+ # Verify each key, value pair is the same
+ else:
+ if key not in user_def or value != user_def[key]:
+ if debug:
+ print("value not equal; user_def does not have key")
+ print(key)
+ print(value)
+ if key in user_def:
+ print(user_def[key])
+ return False
+
+ if debug:
+ print('returning true')
+ return True
+
+
+class OpenShiftCLIConfig(object):
+ '''Generic Config'''
+ def __init__(self, rname, namespace, kubeconfig, options):
+ self.kubeconfig = kubeconfig
+ self.name = rname
+ self.namespace = namespace
+ self._options = options
+
+ @property
+ def config_options(self):
+ ''' return config options '''
+ return self._options
+
+ def to_option_list(self):
+ '''return all options as a string'''
+ return self.stringify()
+
+ def stringify(self):
+ ''' return the options hash as cli params in a string '''
+ rval = []
+ for key in sorted(self.config_options.keys()):
+ data = self.config_options[key]
+ if data['include'] \
+ and (data['value'] or isinstance(data['value'], int)):
+ rval.append('--{}={}'.format(key.replace('_', '-'), data['value']))
+
+ return rval
+
+
+# -*- -*- -*- End included fragment: lib/base.py -*- -*- -*-
+
+# -*- -*- -*- Begin included fragment: lib/rolebinding.py -*- -*- -*-
+
+# pylint: disable=too-many-instance-attributes
+class RoleBindingConfig(object):
+ ''' Handle rolebinding config '''
+ # pylint: disable=too-many-arguments
+ def __init__(self,
+ name,
+ namespace,
+ kubeconfig,
+ group_names=None,
+ role_ref=None,
+ subjects=None,
+ usernames=None):
+ ''' constructor for handling rolebinding options '''
+ self.kubeconfig = kubeconfig
+ self.name = name
+ self.namespace = namespace
+ self.group_names = group_names
+ self.role_ref = role_ref
+ self.subjects = subjects
+ self.usernames = usernames
+ self.data = {}
+
+ self.create_dict()
+
+ def create_dict(self):
+ ''' create a default rolebinding as a dict '''
+ self.data['apiVersion'] = 'v1'
+ self.data['kind'] = 'RoleBinding'
+ self.data['groupNames'] = self.group_names
+ self.data['metadata']['name'] = self.name
+ self.data['metadata']['namespace'] = self.namespace
+
+ self.data['roleRef'] = self.role_ref
+ self.data['subjects'] = self.subjects
+ self.data['userNames'] = self.usernames
+
+
+# pylint: disable=too-many-instance-attributes,too-many-public-methods
+class RoleBinding(Yedit):
+ ''' Class to model a rolebinding openshift object'''
+ group_names_path = "groupNames"
+ role_ref_path = "roleRef"
+ subjects_path = "subjects"
+ user_names_path = "userNames"
+
+ kind = 'RoleBinding'
+
+ def __init__(self, content):
+ '''RoleBinding constructor'''
+ super(RoleBinding, self).__init__(content=content)
+ self._subjects = None
+ self._role_ref = None
+ self._group_names = None
+ self._user_names = None
+
+ @property
+ def subjects(self):
+ ''' subjects property '''
+ if self._subjects is None:
+ self._subjects = self.get_subjects()
+ return self._subjects
+
+ @subjects.setter
+ def subjects(self, data):
+ ''' subjects property setter'''
+ self._subjects = data
+
+ @property
+ def role_ref(self):
+ ''' role_ref property '''
+ if self._role_ref is None:
+ self._role_ref = self.get_role_ref()
+ return self._role_ref
+
+ @role_ref.setter
+ def role_ref(self, data):
+ ''' role_ref property setter'''
+ self._role_ref = data
+
+ @property
+ def group_names(self):
+ ''' group_names property '''
+ if self._group_names is None:
+ self._group_names = self.get_group_names()
+ return self._group_names
+
+ @group_names.setter
+ def group_names(self, data):
+ ''' group_names property setter'''
+ self._group_names = data
+
+ @property
+ def user_names(self):
+ ''' user_names property '''
+ if self._user_names is None:
+ self._user_names = self.get_user_names()
+ return self._user_names
+
+ @user_names.setter
+ def user_names(self, data):
+ ''' user_names property setter'''
+ self._user_names = data
+
+ def get_group_names(self):
+ ''' return groupNames '''
+ return self.get(RoleBinding.group_names_path) or []
+
+ def get_user_names(self):
+ ''' return usernames '''
+ return self.get(RoleBinding.user_names_path) or []
+
+ def get_role_ref(self):
+ ''' return role_ref '''
+ return self.get(RoleBinding.role_ref_path) or {}
+
+ def get_subjects(self):
+ ''' return subjects '''
+ return self.get(RoleBinding.subjects_path) or []
+
+ #### ADD #####
+ def add_subject(self, inc_subject):
+ ''' add a subject '''
+ if self.subjects:
+ # pylint: disable=no-member
+ self.subjects.append(inc_subject)
+ else:
+ self.put(RoleBinding.subjects_path, [inc_subject])
+
+ return True
+
+ def add_role_ref(self, inc_role_ref):
+ ''' add a role_ref '''
+ if not self.role_ref:
+ self.put(RoleBinding.role_ref_path, {"name": inc_role_ref})
+ return True
+
+ return False
+
+ def add_group_names(self, inc_group_names):
+ ''' add a group_names '''
+ if self.group_names:
+ # pylint: disable=no-member
+ self.group_names.append(inc_group_names)
+ else:
+ self.put(RoleBinding.group_names_path, [inc_group_names])
+
+ return True
+
+ def add_user_name(self, inc_user_name):
+ ''' add a username '''
+ if self.user_names:
+ # pylint: disable=no-member
+ self.user_names.append(inc_user_name)
+ else:
+ self.put(RoleBinding.user_names_path, [inc_user_name])
+
+ return True
+
+ #### /ADD #####
+
+ #### Remove #####
+ def remove_subject(self, inc_subject):
+ ''' remove a subject '''
+ try:
+ # pylint: disable=no-member
+ self.subjects.remove(inc_subject)
+ except ValueError as _:
+ return False
+
+ return True
+
+ def remove_role_ref(self, inc_role_ref):
+ ''' remove a role_ref '''
+ if self.role_ref and self.role_ref['name'] == inc_role_ref:
+ del self.role_ref['name']
+ return True
+
+ return False
+
+ def remove_group_name(self, inc_group_name):
+ ''' remove a groupname '''
+ try:
+ # pylint: disable=no-member
+ self.group_names.remove(inc_group_name)
+ except ValueError as _:
+ return False
+
+ return True
+
+ def remove_user_name(self, inc_user_name):
+ ''' remove a username '''
+ try:
+ # pylint: disable=no-member
+ self.user_names.remove(inc_user_name)
+ except ValueError as _:
+ return False
+
+ return True
+
+ #### /REMOVE #####
+
+ #### UPDATE #####
+ def update_subject(self, inc_subject):
+ ''' update a subject '''
+ try:
+ # pylint: disable=no-member
+ index = self.subjects.index(inc_subject)
+ except ValueError as _:
+ return self.add_subject(inc_subject)
+
+ self.subjects[index] = inc_subject
+
+ return True
+
+ def update_group_name(self, inc_group_name):
+ ''' update a groupname '''
+ try:
+ # pylint: disable=no-member
+ index = self.group_names.index(inc_group_name)
+ except ValueError as _:
+ return self.add_group_names(inc_group_name)
+
+ self.group_names[index] = inc_group_name
+
+ return True
+
+ def update_user_name(self, inc_user_name):
+ ''' update a username '''
+ try:
+ # pylint: disable=no-member
+ index = self.user_names.index(inc_user_name)
+ except ValueError as _:
+ return self.add_user_name(inc_user_name)
+
+ self.user_names[index] = inc_user_name
+
+ return True
+
+ def update_role_ref(self, inc_role_ref):
+ ''' update a role_ref '''
+ self.role_ref['name'] = inc_role_ref
+
+ return True
+
+ #### /UPDATE #####
+
+ #### FIND ####
+ def find_subject(self, inc_subject):
+ ''' find a subject '''
+ index = None
+ try:
+ # pylint: disable=no-member
+ index = self.subjects.index(inc_subject)
+ except ValueError as _:
+ return index
+
+ return index
+
+ def find_group_name(self, inc_group_name):
+ ''' find a group_name '''
+ index = None
+ try:
+ # pylint: disable=no-member
+ index = self.group_names.index(inc_group_name)
+ except ValueError as _:
+ return index
+
+ return index
+
+ def find_user_name(self, inc_user_name):
+ ''' find a user_name '''
+ index = None
+ try:
+ # pylint: disable=no-member
+ index = self.user_names.index(inc_user_name)
+ except ValueError as _:
+ return index
+
+ return index
+
+ def find_role_ref(self, inc_role_ref):
+ ''' find a user_name '''
+ if self.role_ref and self.role_ref['name'] == inc_role_ref['name']:
+ return self.role_ref
+
+ return None
+
+# -*- -*- -*- End included fragment: lib/rolebinding.py -*- -*- -*-
+
+# -*- -*- -*- Begin included fragment: lib/scc.py -*- -*- -*-
+
+
+# pylint: disable=too-many-instance-attributes
+class SecurityContextConstraintsConfig(object):
+ ''' Handle scc options '''
+ # pylint: disable=too-many-arguments
+ def __init__(self,
+ sname,
+ kubeconfig,
+ options=None,
+ fs_group='MustRunAs',
+ default_add_capabilities=None,
+ groups=None,
+ priority=None,
+ required_drop_capabilities=None,
+ run_as_user='MustRunAsRange',
+ se_linux_context='MustRunAs',
+ supplemental_groups='RunAsAny',
+ users=None,
+ annotations=None):
+ ''' constructor for handling scc options '''
+ self.kubeconfig = kubeconfig
+ self.name = sname
+ self.options = options
+ self.fs_group = fs_group
+ self.default_add_capabilities = default_add_capabilities
+ self.groups = groups
+ self.priority = priority
+ self.required_drop_capabilities = required_drop_capabilities
+ self.run_as_user = run_as_user
+ self.se_linux_context = se_linux_context
+ self.supplemental_groups = supplemental_groups
+ self.users = users
+ self.annotations = annotations
+ self.data = {}
+
+ self.create_dict()
+
+ def create_dict(self):
+ ''' assign the correct properties for a scc dict '''
+ # allow options
+ if self.options:
+ for key, value in self.options.items():
+ self.data[key] = value
+ else:
+ self.data['allowHostDirVolumePlugin'] = False
+ self.data['allowHostIPC'] = False
+ self.data['allowHostNetwork'] = False
+ self.data['allowHostPID'] = False
+ self.data['allowHostPorts'] = False
+ self.data['allowPrivilegedContainer'] = False
+ self.data['allowedCapabilities'] = None
+
+ # version
+ self.data['apiVersion'] = 'v1'
+ # kind
+ self.data['kind'] = 'SecurityContextConstraints'
+ # defaultAddCapabilities
+ self.data['defaultAddCapabilities'] = self.default_add_capabilities
+ # fsGroup
+ self.data['fsGroup']['type'] = self.fs_group
+ # groups
+ self.data['groups'] = []
+ if self.groups:
+ self.data['groups'] = self.groups
+ # metadata
+ self.data['metadata'] = {}
+ self.data['metadata']['name'] = self.name
+ if self.annotations:
+ for key, value in self.annotations.items():
+ self.data['metadata'][key] = value
+ # priority
+ self.data['priority'] = self.priority
+ # requiredDropCapabilities
+ self.data['requiredDropCapabilities'] = self.required_drop_capabilities
+ # runAsUser
+ self.data['runAsUser'] = {'type': self.run_as_user}
+ # seLinuxContext
+ self.data['seLinuxContext'] = {'type': self.se_linux_context}
+ # supplementalGroups
+ self.data['supplementalGroups'] = {'type': self.supplemental_groups}
+ # users
+ self.data['users'] = []
+ if self.users:
+ self.data['users'] = self.users
+
+
+# pylint: disable=too-many-instance-attributes,too-many-public-methods,no-member
+class SecurityContextConstraints(Yedit):
+ ''' Class to wrap the oc command line tools '''
+ default_add_capabilities_path = "defaultAddCapabilities"
+ fs_group_path = "fsGroup"
+ groups_path = "groups"
+ priority_path = "priority"
+ required_drop_capabilities_path = "requiredDropCapabilities"
+ run_as_user_path = "runAsUser"
+ se_linux_context_path = "seLinuxContext"
+ supplemental_groups_path = "supplementalGroups"
+ users_path = "users"
+ kind = 'SecurityContextConstraints'
+
+ def __init__(self, content):
+ '''SecurityContextConstraints constructor'''
+ super(SecurityContextConstraints, self).__init__(content=content)
+ self._users = None
+ self._groups = None
+
+ @property
+ def users(self):
+ ''' users property getter '''
+ if self._users is None:
+ self._users = self.get_users()
+ return self._users
+
+ @property
+ def groups(self):
+ ''' groups property getter '''
+ if self._groups is None:
+ self._groups = self.get_groups()
+ return self._groups
+
+ @users.setter
+ def users(self, data):
+ ''' users property setter'''
+ self._users = data
+
+ @groups.setter
+ def groups(self, data):
+ ''' groups property setter'''
+ self._groups = data
+
+ def get_users(self):
+ '''get scc users'''
+ return self.get(SecurityContextConstraints.users_path) or []
+
+ def get_groups(self):
+ '''get scc groups'''
+ return self.get(SecurityContextConstraints.groups_path) or []
+
+ def add_user(self, inc_user):
+ ''' add a user '''
+ if self.users:
+ self.users.append(inc_user)
+ else:
+ self.put(SecurityContextConstraints.users_path, [inc_user])
+
+ return True
+
+ def add_group(self, inc_group):
+ ''' add a group '''
+ if self.groups:
+ self.groups.append(inc_group)
+ else:
+ self.put(SecurityContextConstraints.groups_path, [inc_group])
+
+ return True
+
+ def remove_user(self, inc_user):
+ ''' remove a user '''
+ try:
+ self.users.remove(inc_user)
+ except ValueError as _:
+ return False
+
+ return True
+
+ def remove_group(self, inc_group):
+ ''' remove a group '''
+ try:
+ self.groups.remove(inc_group)
+ except ValueError as _:
+ return False
+
+ return True
+
+ def update_user(self, inc_user):
+ ''' update a user '''
+ try:
+ index = self.users.index(inc_user)
+ except ValueError as _:
+ return self.add_user(inc_user)
+
+ self.users[index] = inc_user
+
+ return True
+
+ def update_group(self, inc_group):
+ ''' update a group '''
+ try:
+ index = self.groups.index(inc_group)
+ except ValueError as _:
+ return self.add_group(inc_group)
+
+ self.groups[index] = inc_group
+
+ return True
+
+ def find_user(self, inc_user):
+ ''' find a user '''
+ index = None
+ try:
+ index = self.users.index(inc_user)
+ except ValueError as _:
+ return index
+
+ return index
+
+ def find_group(self, inc_group):
+ ''' find a group '''
+ index = None
+ try:
+ index = self.groups.index(inc_group)
+ except ValueError as _:
+ return index
+
+ return index
+
+# -*- -*- -*- End included fragment: lib/scc.py -*- -*- -*-
+
+# -*- -*- -*- Begin included fragment: class/oc_adm_policy_group.py -*- -*- -*-
+
+
+class PolicyGroupException(Exception):
+ ''' PolicyGroup exception'''
+ pass
+
+
+class PolicyGroupConfig(OpenShiftCLIConfig):
+ ''' PolicyGroupConfig is a DTO for group related policy. '''
+ def __init__(self, namespace, kubeconfig, policy_options):
+ super(PolicyGroupConfig, self).__init__(policy_options['name']['value'],
+ namespace, kubeconfig, policy_options)
+ self.kind = self.get_kind()
+ self.namespace = namespace
+
+ def get_kind(self):
+ ''' return the kind we are working with '''
+ if self.config_options['resource_kind']['value'] == 'role':
+ return 'rolebinding'
+ elif self.config_options['resource_kind']['value'] == 'cluster-role':
+ return 'clusterrolebinding'
+ elif self.config_options['resource_kind']['value'] == 'scc':
+ return 'scc'
+
+ return None
+
+
+# pylint: disable=too-many-return-statements
+class PolicyGroup(OpenShiftCLI):
+ ''' Class to handle attaching policies to users '''
+
+
+ def __init__(self,
+ config,
+ verbose=False):
+ ''' Constructor for PolicyGroup '''
+ super(PolicyGroup, self).__init__(config.namespace, config.kubeconfig, verbose)
+ self.config = config
+ self.verbose = verbose
+ self._rolebinding = None
+ self._scc = None
+ self._cluster_policy_bindings = None
+ self._policy_bindings = None
+
+ @property
+ def policybindings(self):
+ if self._policy_bindings is None:
+ results = self._get('clusterpolicybindings', None)
+ if results['returncode'] != 0:
+ raise OpenShiftCLIError('Could not retrieve policybindings')
+ self._policy_bindings = results['results'][0]['items'][0]
+
+ return self._policy_bindings
+
+ @property
+ def clusterpolicybindings(self):
+ if self._cluster_policy_bindings is None:
+ results = self._get('clusterpolicybindings', None)
+ if results['returncode'] != 0:
+ raise OpenShiftCLIError('Could not retrieve clusterpolicybindings')
+ self._cluster_policy_bindings = results['results'][0]['items'][0]
+
+ return self._cluster_policy_bindings
+
+ @property
+ def role_binding(self):
+ ''' role_binding getter '''
+ return self._rolebinding
+
+ @role_binding.setter
+ def role_binding(self, binding):
+ ''' role_binding setter '''
+ self._rolebinding = binding
+
+ @property
+ def security_context_constraint(self):
+ ''' security_context_constraint getter '''
+ return self._scc
+
+ @security_context_constraint.setter
+ def security_context_constraint(self, scc):
+ ''' security_context_constraint setter '''
+ self._scc = scc
+
+ def get(self):
+ '''fetch the desired kind'''
+ resource_name = self.config.config_options['name']['value']
+ if resource_name == 'cluster-reader':
+ resource_name += 's'
+
+ # oc adm policy add-... creates policy bindings with the name
+ # "[resource_name]-binding", however some bindings in the system
+ # simply use "[resource_name]". So try both.
+
+ results = self._get(self.config.kind, resource_name)
+ if results['returncode'] == 0:
+ return results
+
+ # Now try -binding naming convention
+ return self._get(self.config.kind, resource_name + "-binding")
+
+ def exists_role_binding(self):
+ ''' return whether role_binding exists '''
+ bindings = None
+ if self.config.config_options['resource_kind']['value'] == 'cluster-role':
+ bindings = self.clusterpolicybindings
+ else:
+ bindings = self.policybindings
+
+ if bindings is None:
+ return False
+
+ for binding in bindings['roleBindings']:
+ _rb = binding['roleBinding']
+ if _rb['roleRef']['name'] == self.config.config_options['name']['value'] and \
+ _rb['groupNames'] is not None and \
+ self.config.config_options['group']['value'] in _rb['groupNames']:
+ self.role_binding = binding
+ return True
+
+ return False
+
+ def exists_scc(self):
+ ''' return whether scc exists '''
+ results = self.get()
+ if results['returncode'] == 0:
+ self.security_context_constraint = SecurityContextConstraints(results['results'][0])
+
+ if self.security_context_constraint.find_group(self.config.config_options['group']['value']) != None:
+ return True
+
+ return False
+
+ return results
+
+ def exists(self):
+ '''does the object exist?'''
+ if self.config.config_options['resource_kind']['value'] == 'cluster-role':
+ return self.exists_role_binding()
+
+ elif self.config.config_options['resource_kind']['value'] == 'role':
+ return self.exists_role_binding()
+
+ elif self.config.config_options['resource_kind']['value'] == 'scc':
+ return self.exists_scc()
+
+ return False
+
+ def perform(self):
+ '''perform action on resource'''
+ cmd = ['policy',
+ self.config.config_options['action']['value'],
+ self.config.config_options['name']['value'],
+ self.config.config_options['group']['value']]
+
+ return self.openshift_cmd(cmd, oadm=True)
+
+ @staticmethod
+ def run_ansible(params, check_mode):
+ '''run the idempotent ansible code'''
+
+ state = params['state']
+
+ action = None
+ if state == 'present':
+ action = 'add-' + params['resource_kind'] + '-to-group'
+ else:
+ action = 'remove-' + params['resource_kind'] + '-from-group'
+
+ nconfig = PolicyGroupConfig(params['namespace'],
+ params['kubeconfig'],
+ {'action': {'value': action, 'include': False},
+ 'group': {'value': params['group'], 'include': False},
+ 'resource_kind': {'value': params['resource_kind'], 'include': False},
+ 'name': {'value': params['resource_name'], 'include': False},
+ })
+
+ policygroup = PolicyGroup(nconfig, params['debug'])
+
+ # Run the oc adm policy group related command
+
+ ########
+ # Delete
+ ########
+ if state == 'absent':
+ if not policygroup.exists():
+ return {'changed': False, 'state': 'absent'}
+
+ if check_mode:
+ return {'changed': False, 'msg': 'CHECK_MODE: would have performed a delete.'}
+
+ api_rval = policygroup.perform()
+
+ if api_rval['returncode'] != 0:
+ return {'msg': api_rval}
+
+ return {'changed': True, 'results' : api_rval, state:'absent'}
+
+ if state == 'present':
+ ########
+ # Create
+ ########
+ results = policygroup.exists()
+ if isinstance(results, dict) and 'returncode' in results and results['returncode'] != 0:
+ return {'msg': results}
+
+ if not results:
+
+ if check_mode:
+ return {'changed': False, 'msg': 'CHECK_MODE: would have performed a create.'}
+
+ api_rval = policygroup.perform()
+
+ if api_rval['returncode'] != 0:
+ return {'msg': api_rval}
+
+ return {'changed': True, 'results': api_rval, state: 'present'}
+
+ return {'changed': False, state: 'present'}
+
+ return {'failed': True, 'changed': False, 'results': 'Unknown state passed. %s' % state, state: 'unknown'}
+
+# -*- -*- -*- End included fragment: class/oc_adm_policy_group.py -*- -*- -*-
+
+# -*- -*- -*- Begin included fragment: ansible/oc_adm_policy_group.py -*- -*- -*-
+
+
+def main():
+ '''
+ ansible oc adm module for group policy
+ '''
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(default='present', type='str',
+ choices=['present', 'absent']),
+ debug=dict(default=False, type='bool'),
+ resource_name=dict(required=True, type='str'),
+ namespace=dict(default='default', type='str'),
+ kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
+
+ group=dict(required=True, type='str'),
+ resource_kind=dict(required=True, choices=['role', 'cluster-role', 'scc'], type='str'),
+ ),
+ supports_check_mode=True,
+ )
+
+ results = PolicyGroup.run_ansible(module.params, module.check_mode)
+
+ if 'failed' in results:
+ module.fail_json(**results)
+
+ module.exit_json(**results)
+
+
+if __name__ == "__main__":
+ main()
+
+# -*- -*- -*- End included fragment: ansible/oc_adm_policy_group.py -*- -*- -*-
diff --git a/roles/lib_openshift/library/oc_adm_policy_user.py b/roles/lib_openshift/library/oc_adm_policy_user.py
new file mode 100644
index 000000000..83f2165a3
--- /dev/null
+++ b/roles/lib_openshift/library/oc_adm_policy_user.py
@@ -0,0 +1,2122 @@
+#!/usr/bin/env python
+# pylint: disable=missing-docstring
+# flake8: noqa: T001
+# ___ ___ _ _ ___ ___ _ _____ ___ ___
+# / __| __| \| | __| _ \ /_\_ _| __| \
+# | (_ | _|| .` | _|| / / _ \| | | _|| |) |
+# \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____
+# | \ / _ \ | \| |/ _ \_ _| | __| \_ _|_ _|
+# | |) | (_) | | .` | (_) || | | _|| |) | | | |
+# |___/ \___/ |_|\_|\___/ |_| |___|___/___| |_|
+#
+# Copyright 2016 Red Hat, Inc. and/or its affiliates
+# and other contributors as indicated by the @author tags.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# -*- -*- -*- Begin included fragment: lib/import.py -*- -*- -*-
+'''
+ OpenShiftCLI class that wraps the oc commands in a subprocess
+'''
+# pylint: disable=too-many-lines
+
+from __future__ import print_function
+import atexit
+import copy
+import json
+import os
+import re
+import shutil
+import subprocess
+import tempfile
+# pylint: disable=import-error
+try:
+ import ruamel.yaml as yaml
+except ImportError:
+ import yaml
+
+from ansible.module_utils.basic import AnsibleModule
+
+# -*- -*- -*- End included fragment: lib/import.py -*- -*- -*-
+
+# -*- -*- -*- Begin included fragment: doc/policy_user -*- -*- -*-
+
+DOCUMENTATION = '''
+---
+module: oc_adm_policy_user
+short_description: Module to manage openshift policy for users
+description:
+ - Manage openshift policy for users.
+options:
+ kubeconfig:
+ description:
+ - The path for the kubeconfig file to use for authentication
+ required: false
+ default: /etc/origin/master/admin.kubeconfig
+ aliases: []
+ namespace:
+ description:
+ - The namespace scope
+ required: false
+ default: None
+ aliases: []
+ debug:
+ description:
+ - Turn on debug output.
+ required: false
+ default: False
+ aliases: []
+ user:
+ description:
+ - The name of the user
+ required: true
+ default: None
+ aliases: []
+ resource_kind:
+ description:
+ - The kind of policy to affect
+ required: true
+ default: None
+ choices: ["role", "cluster-role", "scc"]
+ aliases: []
+ resource_name:
+ description:
+ - The name of the policy
+ required: true
+ default: None
+ aliases: []
+ state:
+ description:
+ - Desired state of the policy
+ required: true
+ default: present
+ choices: ["present", "absent"]
+ aliases: []
+author:
+- "Kenny Woodson <kwoodson@redhat.com>"
+extends_documentation_fragment: []
+'''
+
+EXAMPLES = '''
+- name: oc adm policy remove-scc-from-user an-scc ausername
+ oc_adm_policy_user:
+ user: ausername
+ resource_kind: scc
+ resource_name: an-scc
+ state: absent
+
+- name: oc adm policy add-cluster-role-to-user system:build-strategy-docker ausername
+ oc_adm_policy_user:
+ user: ausername
+ resource_kind: cluster-role
+ resource_name: system:build-strategy-docker
+ state: present
+'''
+
+# -*- -*- -*- End included fragment: doc/policy_user -*- -*- -*-
+
+# -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
+# pylint: disable=undefined-variable,missing-docstring
+# noqa: E301,E302
+
+
+class YeditException(Exception):
+ ''' Exception class for Yedit '''
+ pass
+
+
+# pylint: disable=too-many-public-methods
+class Yedit(object):
+ ''' Class to modify yaml files '''
+ re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
+ re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
+ com_sep = set(['.', '#', '|', ':'])
+
+ # pylint: disable=too-many-arguments
+ def __init__(self,
+ filename=None,
+ content=None,
+ content_type='yaml',
+ separator='.',
+ backup=False):
+ self.content = content
+ self._separator = separator
+ self.filename = filename
+ self.__yaml_dict = content
+ self.content_type = content_type
+ self.backup = backup
+ self.load(content_type=self.content_type)
+ if self.__yaml_dict is None:
+ self.__yaml_dict = {}
+
+ @property
+ def separator(self):
+ ''' getter method for yaml_dict '''
+ return self._separator
+
+ @separator.setter
+ def separator(self):
+ ''' getter method for yaml_dict '''
+ return self._separator
+
+ @property
+ def yaml_dict(self):
+ ''' getter method for yaml_dict '''
+ return self.__yaml_dict
+
+ @yaml_dict.setter
+ def yaml_dict(self, value):
+ ''' setter method for yaml_dict '''
+ self.__yaml_dict = value
+
+ @staticmethod
+ def parse_key(key, sep='.'):
+ '''parse the key allowing the appropriate separator'''
+ common_separators = list(Yedit.com_sep - set([sep]))
+ return re.findall(Yedit.re_key % ''.join(common_separators), key)
+
+ @staticmethod
+ def valid_key(key, sep='.'):
+ '''validate the incoming key'''
+ common_separators = list(Yedit.com_sep - set([sep]))
+ if not re.match(Yedit.re_valid_key % ''.join(common_separators), key):
+ return False
+
+ return True
+
+ @staticmethod
+ def remove_entry(data, key, sep='.'):
+ ''' remove data at location key '''
+ if key == '' and isinstance(data, dict):
+ data.clear()
+ return True
+ elif key == '' and isinstance(data, list):
+ del data[:]
+ return True
+
+ if not (key and Yedit.valid_key(key, sep)) and \
+ isinstance(data, (list, dict)):
+ return None
+
+ key_indexes = Yedit.parse_key(key, sep)
+ for arr_ind, dict_key in key_indexes[:-1]:
+ if dict_key and isinstance(data, dict):
+ data = data.get(dict_key, None)
+ elif (arr_ind and isinstance(data, list) and
+ int(arr_ind) <= len(data) - 1):
+ data = data[int(arr_ind)]
+ else:
+ return None
+
+ # process last index for remove
+ # expected list entry
+ if key_indexes[-1][0]:
+ if isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
+ del data[int(key_indexes[-1][0])]
+ return True
+
+ # expected dict entry
+ elif key_indexes[-1][1]:
+ if isinstance(data, dict):
+ del data[key_indexes[-1][1]]
+ return True
+
+ @staticmethod
+ def add_entry(data, key, item=None, sep='.'):
+ ''' Get an item from a dictionary with key notation a.b.c
+ d = {'a': {'b': 'c'}}}
+ key = a#b
+ return c
+ '''
+ if key == '':
+ pass
+ elif (not (key and Yedit.valid_key(key, sep)) and
+ isinstance(data, (list, dict))):
+ return None
+
+ key_indexes = Yedit.parse_key(key, sep)
+ for arr_ind, dict_key in key_indexes[:-1]:
+ if dict_key:
+ if isinstance(data, dict) and dict_key in data and data[dict_key]: # noqa: E501
+ data = data[dict_key]
+ continue
+
+ elif data and not isinstance(data, dict):
+ raise YeditException("Unexpected item type found while going through key " +
+ "path: {} (at key: {})".format(key, dict_key))
+
+ data[dict_key] = {}
+ data = data[dict_key]
+
+ elif (arr_ind and isinstance(data, list) and
+ int(arr_ind) <= len(data) - 1):
+ data = data[int(arr_ind)]
+ else:
+ raise YeditException("Unexpected item type found while going through key path: {}".format(key))
+
+ if key == '':
+ data = item
+
+ # process last index for add
+ # expected list entry
+ elif key_indexes[-1][0] and isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
+ data[int(key_indexes[-1][0])] = item
+
+ # expected dict entry
+ elif key_indexes[-1][1] and isinstance(data, dict):
+ data[key_indexes[-1][1]] = item
+
+ # didn't add/update to an existing list, nor add/update key to a dict
+ # so we must have been provided some syntax like a.b.c[<int>] = "data" for a
+ # non-existent array
+ else:
+ raise YeditException("Error adding to object at path: {}".format(key))
+
+ return data
+
+ @staticmethod
+ def get_entry(data, key, sep='.'):
+ ''' Get an item from a dictionary with key notation a.b.c
+ d = {'a': {'b': 'c'}}}
+ key = a.b
+ return c
+ '''
+ if key == '':
+ pass
+ elif (not (key and Yedit.valid_key(key, sep)) and
+ isinstance(data, (list, dict))):
+ return None
+
+ key_indexes = Yedit.parse_key(key, sep)
+ for arr_ind, dict_key in key_indexes:
+ if dict_key and isinstance(data, dict):
+ data = data.get(dict_key, None)
+ elif (arr_ind and isinstance(data, list) and
+ int(arr_ind) <= len(data) - 1):
+ data = data[int(arr_ind)]
+ else:
+ return None
+
+ return data
+
+ @staticmethod
+ def _write(filename, contents):
+ ''' Actually write the file contents to disk. This helps with mocking. '''
+
+ tmp_filename = filename + '.yedit'
+
+ with open(tmp_filename, 'w') as yfd:
+ yfd.write(contents)
+
+ os.rename(tmp_filename, filename)
+
+ def write(self):
+ ''' write to file '''
+ if not self.filename:
+ raise YeditException('Please specify a filename.')
+
+ if self.backup and self.file_exists():
+ shutil.copy(self.filename, self.filename + '.orig')
+
+ # Try to set format attributes if supported
+ try:
+ self.yaml_dict.fa.set_block_style()
+ except AttributeError:
+ pass
+
+ # Try to use RoundTripDumper if supported.
+ try:
+ Yedit._write(self.filename, yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper))
+ except AttributeError:
+ Yedit._write(self.filename, yaml.safe_dump(self.yaml_dict, default_flow_style=False))
+
+ return (True, self.yaml_dict)
+
+ def read(self):
+ ''' read from file '''
+ # check if it exists
+ if self.filename is None or not self.file_exists():
+ return None
+
+ contents = None
+ with open(self.filename) as yfd:
+ contents = yfd.read()
+
+ return contents
+
+ def file_exists(self):
+ ''' return whether file exists '''
+ if os.path.exists(self.filename):
+ return True
+
+ return False
+
+ def load(self, content_type='yaml'):
+ ''' return yaml file '''
+ contents = self.read()
+
+ if not contents and not self.content:
+ return None
+
+ if self.content:
+ if isinstance(self.content, dict):
+ self.yaml_dict = self.content
+ return self.yaml_dict
+ elif isinstance(self.content, str):
+ contents = self.content
+
+ # check if it is yaml
+ try:
+ if content_type == 'yaml' and contents:
+ # Try to set format attributes if supported
+ try:
+ self.yaml_dict.fa.set_block_style()
+ except AttributeError:
+ pass
+
+ # Try to use RoundTripLoader if supported.
+ try:
+ self.yaml_dict = yaml.safe_load(contents, yaml.RoundTripLoader)
+ except AttributeError:
+ self.yaml_dict = yaml.safe_load(contents)
+
+ # Try to set format attributes if supported
+ try:
+ self.yaml_dict.fa.set_block_style()
+ except AttributeError:
+ pass
+
+ elif content_type == 'json' and contents:
+ self.yaml_dict = json.loads(contents)
+ except yaml.YAMLError as err:
+ # Error loading yaml or json
+ raise YeditException('Problem with loading yaml file. %s' % err)
+
+ return self.yaml_dict
+
+ def get(self, key):
+ ''' get a specified key'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, key, self.separator)
+ except KeyError:
+ entry = None
+
+ return entry
+
+ def pop(self, path, key_or_item):
+ ''' remove a key, value pair from a dict or an item for a list'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry is None:
+ return (False, self.yaml_dict)
+
+ if isinstance(entry, dict):
+ # AUDIT:maybe-no-member makes sense due to fuzzy types
+ # pylint: disable=maybe-no-member
+ if key_or_item in entry:
+ entry.pop(key_or_item)
+ return (True, self.yaml_dict)
+ return (False, self.yaml_dict)
+
+ elif isinstance(entry, list):
+ # AUDIT:maybe-no-member makes sense due to fuzzy types
+ # pylint: disable=maybe-no-member
+ ind = None
+ try:
+ ind = entry.index(key_or_item)
+ except ValueError:
+ return (False, self.yaml_dict)
+
+ entry.pop(ind)
+ return (True, self.yaml_dict)
+
+ return (False, self.yaml_dict)
+
+ def delete(self, path):
+ ''' remove path from a dict'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry is None:
+ return (False, self.yaml_dict)
+
+ result = Yedit.remove_entry(self.yaml_dict, path, self.separator)
+ if not result:
+ return (False, self.yaml_dict)
+
+ return (True, self.yaml_dict)
+
+ def exists(self, path, value):
+ ''' check if value exists at path'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if isinstance(entry, list):
+ if value in entry:
+ return True
+ return False
+
+ elif isinstance(entry, dict):
+ if isinstance(value, dict):
+ rval = False
+ for key, val in value.items():
+ if entry[key] != val:
+ rval = False
+ break
+ else:
+ rval = True
+ return rval
+
+ return value in entry
+
+ return entry == value
+
+ def append(self, path, value):
+ '''append value to a list'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry is None:
+ self.put(path, [])
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ if not isinstance(entry, list):
+ return (False, self.yaml_dict)
+
+ # AUDIT:maybe-no-member makes sense due to loading data from
+ # a serialized format.
+ # pylint: disable=maybe-no-member
+ entry.append(value)
+ return (True, self.yaml_dict)
+
+ # pylint: disable=too-many-arguments
+ def update(self, path, value, index=None, curr_value=None):
+ ''' put path, value into a dict '''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if isinstance(entry, dict):
+ # AUDIT:maybe-no-member makes sense due to fuzzy types
+ # pylint: disable=maybe-no-member
+ if not isinstance(value, dict):
+ raise YeditException('Cannot replace key, value entry in ' +
+ 'dict with non-dict type. value=[%s] [%s]' % (value, type(value))) # noqa: E501
+
+ entry.update(value)
+ return (True, self.yaml_dict)
+
+ elif isinstance(entry, list):
+ # AUDIT:maybe-no-member makes sense due to fuzzy types
+ # pylint: disable=maybe-no-member
+ ind = None
+ if curr_value:
+ try:
+ ind = entry.index(curr_value)
+ except ValueError:
+ return (False, self.yaml_dict)
+
+ elif index is not None:
+ ind = index
+
+ if ind is not None and entry[ind] != value:
+ entry[ind] = value
+ return (True, self.yaml_dict)
+
+ # see if it exists in the list
+ try:
+ ind = entry.index(value)
+ except ValueError:
+ # doesn't exist, append it
+ entry.append(value)
+ return (True, self.yaml_dict)
+
+ # already exists, return
+ if ind is not None:
+ return (False, self.yaml_dict)
+ return (False, self.yaml_dict)
+
+ def put(self, path, value):
+ ''' put path, value into a dict '''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry == value:
+ return (False, self.yaml_dict)
+
+ # deepcopy didn't work
+ # Try to use ruamel.yaml and fallback to pyyaml
+ try:
+ tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
+ default_flow_style=False),
+ yaml.RoundTripLoader)
+ except AttributeError:
+ tmp_copy = copy.deepcopy(self.yaml_dict)
+
+ # set the format attributes if available
+ try:
+ tmp_copy.fa.set_block_style()
+ except AttributeError:
+ pass
+
+ result = Yedit.add_entry(tmp_copy, path, value, self.separator)
+ if not result:
+ return (False, self.yaml_dict)
+
+ self.yaml_dict = tmp_copy
+
+ return (True, self.yaml_dict)
+
+ def create(self, path, value):
+ ''' create a yaml file '''
+ if not self.file_exists():
+ # deepcopy didn't work
+ # Try to use ruamel.yaml and fallback to pyyaml
+ try:
+ tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
+ default_flow_style=False),
+ yaml.RoundTripLoader)
+ except AttributeError:
+ tmp_copy = copy.deepcopy(self.yaml_dict)
+
+ # set the format attributes if available
+ try:
+ tmp_copy.fa.set_block_style()
+ except AttributeError:
+ pass
+
+ result = Yedit.add_entry(tmp_copy, path, value, self.separator)
+ if result:
+ self.yaml_dict = tmp_copy
+ return (True, self.yaml_dict)
+
+ return (False, self.yaml_dict)
+
+ @staticmethod
+ def get_curr_value(invalue, val_type):
+ '''return the current value'''
+ if invalue is None:
+ return None
+
+ curr_value = invalue
+ if val_type == 'yaml':
+ curr_value = yaml.load(invalue)
+ elif val_type == 'json':
+ curr_value = json.loads(invalue)
+
+ return curr_value
+
+ @staticmethod
+ def parse_value(inc_value, vtype=''):
+ '''determine value type passed'''
+ true_bools = ['y', 'Y', 'yes', 'Yes', 'YES', 'true', 'True', 'TRUE',
+ 'on', 'On', 'ON', ]
+ false_bools = ['n', 'N', 'no', 'No', 'NO', 'false', 'False', 'FALSE',
+ 'off', 'Off', 'OFF']
+
+ # It came in as a string but you didn't specify value_type as string
+ # we will convert to bool if it matches any of the above cases
+ if isinstance(inc_value, str) and 'bool' in vtype:
+ if inc_value not in true_bools and inc_value not in false_bools:
+ raise YeditException('Not a boolean type. str=[%s] vtype=[%s]'
+ % (inc_value, vtype))
+ elif isinstance(inc_value, bool) and 'str' in vtype:
+ inc_value = str(inc_value)
+
+ # If vtype is not str then go ahead and attempt to yaml load it.
+ if isinstance(inc_value, str) and 'str' not in vtype:
+ try:
+ inc_value = yaml.load(inc_value)
+ except Exception:
+ raise YeditException('Could not determine type of incoming ' +
+ 'value. value=[%s] vtype=[%s]'
+ % (type(inc_value), vtype))
+
+ return inc_value
+
+ # pylint: disable=too-many-return-statements,too-many-branches
+ @staticmethod
+ def run_ansible(module):
+ '''perform the idempotent crud operations'''
+ yamlfile = Yedit(filename=module.params['src'],
+ backup=module.params['backup'],
+ separator=module.params['separator'])
+
+ if module.params['src']:
+ rval = yamlfile.load()
+
+ if yamlfile.yaml_dict is None and \
+ module.params['state'] != 'present':
+ return {'failed': True,
+ 'msg': 'Error opening file [%s]. Verify that the ' +
+ 'file exists, that it is has correct' +
+ ' permissions, and is valid yaml.'}
+
+ if module.params['state'] == 'list':
+ if module.params['content']:
+ content = Yedit.parse_value(module.params['content'],
+ module.params['content_type'])
+ yamlfile.yaml_dict = content
+
+ if module.params['key']:
+ rval = yamlfile.get(module.params['key']) or {}
+
+ return {'changed': False, 'result': rval, 'state': "list"}
+
+ elif module.params['state'] == 'absent':
+ if module.params['content']:
+ content = Yedit.parse_value(module.params['content'],
+ module.params['content_type'])
+ yamlfile.yaml_dict = content
+
+ if module.params['update']:
+ rval = yamlfile.pop(module.params['key'],
+ module.params['value'])
+ else:
+ rval = yamlfile.delete(module.params['key'])
+
+ if rval[0] and module.params['src']:
+ yamlfile.write()
+
+ return {'changed': rval[0], 'result': rval[1], 'state': "absent"}
+
+ elif module.params['state'] == 'present':
+ # check if content is different than what is in the file
+ if module.params['content']:
+ content = Yedit.parse_value(module.params['content'],
+ module.params['content_type'])
+
+ # We had no edits to make and the contents are the same
+ if yamlfile.yaml_dict == content and \
+ module.params['value'] is None:
+ return {'changed': False,
+ 'result': yamlfile.yaml_dict,
+ 'state': "present"}
+
+ yamlfile.yaml_dict = content
+
+ # we were passed a value; parse it
+ if module.params['value']:
+ value = Yedit.parse_value(module.params['value'],
+ module.params['value_type'])
+ key = module.params['key']
+ if module.params['update']:
+ # pylint: disable=line-too-long
+ curr_value = Yedit.get_curr_value(Yedit.parse_value(module.params['curr_value']), # noqa: E501
+ module.params['curr_value_format']) # noqa: E501
+
+ rval = yamlfile.update(key, value, module.params['index'], curr_value) # noqa: E501
+
+ elif module.params['append']:
+ rval = yamlfile.append(key, value)
+ else:
+ rval = yamlfile.put(key, value)
+
+ if rval[0] and module.params['src']:
+ yamlfile.write()
+
+ return {'changed': rval[0],
+ 'result': rval[1], 'state': "present"}
+
+ # no edits to make
+ if module.params['src']:
+ # pylint: disable=redefined-variable-type
+ rval = yamlfile.write()
+ return {'changed': rval[0],
+ 'result': rval[1],
+ 'state': "present"}
+
+ return {'failed': True, 'msg': 'Unkown state passed'}
+
+# -*- -*- -*- End included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
+
+# -*- -*- -*- Begin included fragment: lib/base.py -*- -*- -*-
+# pylint: disable=too-many-lines
+# noqa: E301,E302,E303,T001
+
+
+class OpenShiftCLIError(Exception):
+ '''Exception class for openshiftcli'''
+ pass
+
+
+ADDITIONAL_PATH_LOOKUPS = ['/usr/local/bin', os.path.expanduser('~/bin')]
+
+
+def locate_oc_binary():
+ ''' Find and return oc binary file '''
+ # https://github.com/openshift/openshift-ansible/issues/3410
+ # oc can be in /usr/local/bin in some cases, but that may not
+ # be in $PATH due to ansible/sudo
+ paths = os.environ.get("PATH", os.defpath).split(os.pathsep) + ADDITIONAL_PATH_LOOKUPS
+
+ oc_binary = 'oc'
+
+ # Use shutil.which if it is available, otherwise fallback to a naive path search
+ try:
+ which_result = shutil.which(oc_binary, path=os.pathsep.join(paths))
+ if which_result is not None:
+ oc_binary = which_result
+ except AttributeError:
+ for path in paths:
+ if os.path.exists(os.path.join(path, oc_binary)):
+ oc_binary = os.path.join(path, oc_binary)
+ break
+
+ return oc_binary
+
+
+# pylint: disable=too-few-public-methods
+class OpenShiftCLI(object):
+ ''' Class to wrap the command line tools '''
+ def __init__(self,
+ namespace,
+ kubeconfig='/etc/origin/master/admin.kubeconfig',
+ verbose=False,
+ all_namespaces=False):
+ ''' Constructor for OpenshiftCLI '''
+ self.namespace = namespace
+ self.verbose = verbose
+ self.kubeconfig = Utils.create_tmpfile_copy(kubeconfig)
+ self.all_namespaces = all_namespaces
+ self.oc_binary = locate_oc_binary()
+
+ # Pylint allows only 5 arguments to be passed.
+ # pylint: disable=too-many-arguments
+ def _replace_content(self, resource, rname, content, force=False, sep='.'):
+ ''' replace the current object with the content '''
+ res = self._get(resource, rname)
+ if not res['results']:
+ return res
+
+ fname = Utils.create_tmpfile(rname + '-')
+
+ yed = Yedit(fname, res['results'][0], separator=sep)
+ changes = []
+ for key, value in content.items():
+ changes.append(yed.put(key, value))
+
+ if any([change[0] for change in changes]):
+ yed.write()
+
+ atexit.register(Utils.cleanup, [fname])
+
+ return self._replace(fname, force)
+
+ return {'returncode': 0, 'updated': False}
+
+ def _replace(self, fname, force=False):
+ '''replace the current object with oc replace'''
+ cmd = ['replace', '-f', fname]
+ if force:
+ cmd.append('--force')
+ return self.openshift_cmd(cmd)
+
+ def _create_from_content(self, rname, content):
+ '''create a temporary file and then call oc create on it'''
+ fname = Utils.create_tmpfile(rname + '-')
+ yed = Yedit(fname, content=content)
+ yed.write()
+
+ atexit.register(Utils.cleanup, [fname])
+
+ return self._create(fname)
+
+ def _create(self, fname):
+ '''call oc create on a filename'''
+ return self.openshift_cmd(['create', '-f', fname])
+
+ def _delete(self, resource, rname, selector=None):
+ '''call oc delete on a resource'''
+ cmd = ['delete', resource, rname]
+ if selector:
+ cmd.append('--selector=%s' % selector)
+
+ return self.openshift_cmd(cmd)
+
+ def _process(self, template_name, create=False, params=None, template_data=None): # noqa: E501
+ '''process a template
+
+ template_name: the name of the template to process
+ create: whether to send to oc create after processing
+ params: the parameters for the template
+ template_data: the incoming template's data; instead of a file
+ '''
+ cmd = ['process']
+ if template_data:
+ cmd.extend(['-f', '-'])
+ else:
+ cmd.append(template_name)
+ if params:
+ param_str = ["%s=%s" % (key, value) for key, value in params.items()]
+ cmd.append('-v')
+ cmd.extend(param_str)
+
+ results = self.openshift_cmd(cmd, output=True, input_data=template_data)
+
+ if results['returncode'] != 0 or not create:
+ return results
+
+ fname = Utils.create_tmpfile(template_name + '-')
+ yed = Yedit(fname, results['results'])
+ yed.write()
+
+ atexit.register(Utils.cleanup, [fname])
+
+ return self.openshift_cmd(['create', '-f', fname])
+
+ def _get(self, resource, rname=None, selector=None):
+ '''return a resource by name '''
+ cmd = ['get', resource]
+ if selector:
+ cmd.append('--selector=%s' % selector)
+ elif rname:
+ cmd.append(rname)
+
+ cmd.extend(['-o', 'json'])
+
+ rval = self.openshift_cmd(cmd, output=True)
+
+ # Ensure results are retuned in an array
+ if 'items' in rval:
+ rval['results'] = rval['items']
+ elif not isinstance(rval['results'], list):
+ rval['results'] = [rval['results']]
+
+ return rval
+
+ def _schedulable(self, node=None, selector=None, schedulable=True):
+ ''' perform oadm manage-node scheduable '''
+ cmd = ['manage-node']
+ if node:
+ cmd.extend(node)
+ else:
+ cmd.append('--selector=%s' % selector)
+
+ cmd.append('--schedulable=%s' % schedulable)
+
+ return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
+
+ def _list_pods(self, node=None, selector=None, pod_selector=None):
+ ''' perform oadm list pods
+
+ node: the node in which to list pods
+ selector: the label selector filter if provided
+ pod_selector: the pod selector filter if provided
+ '''
+ cmd = ['manage-node']
+ if node:
+ cmd.extend(node)
+ else:
+ cmd.append('--selector=%s' % selector)
+
+ if pod_selector:
+ cmd.append('--pod-selector=%s' % pod_selector)
+
+ cmd.extend(['--list-pods', '-o', 'json'])
+
+ return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
+
+ # pylint: disable=too-many-arguments
+ def _evacuate(self, node=None, selector=None, pod_selector=None, dry_run=False, grace_period=None, force=False):
+ ''' perform oadm manage-node evacuate '''
+ cmd = ['manage-node']
+ if node:
+ cmd.extend(node)
+ else:
+ cmd.append('--selector=%s' % selector)
+
+ if dry_run:
+ cmd.append('--dry-run')
+
+ if pod_selector:
+ cmd.append('--pod-selector=%s' % pod_selector)
+
+ if grace_period:
+ cmd.append('--grace-period=%s' % int(grace_period))
+
+ if force:
+ cmd.append('--force')
+
+ cmd.append('--evacuate')
+
+ return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
+
+ def _version(self):
+ ''' return the openshift version'''
+ return self.openshift_cmd(['version'], output=True, output_type='raw')
+
+ def _import_image(self, url=None, name=None, tag=None):
+ ''' perform image import '''
+ cmd = ['import-image']
+
+ image = '{0}'.format(name)
+ if tag:
+ image += ':{0}'.format(tag)
+
+ cmd.append(image)
+
+ if url:
+ cmd.append('--from={0}/{1}'.format(url, image))
+
+ cmd.append('-n{0}'.format(self.namespace))
+
+ cmd.append('--confirm')
+ return self.openshift_cmd(cmd)
+
+ def _run(self, cmds, input_data):
+ ''' Actually executes the command. This makes mocking easier. '''
+ curr_env = os.environ.copy()
+ curr_env.update({'KUBECONFIG': self.kubeconfig})
+ proc = subprocess.Popen(cmds,
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ env=curr_env)
+
+ stdout, stderr = proc.communicate(input_data)
+
+ return proc.returncode, stdout.decode(), stderr.decode()
+
+ # pylint: disable=too-many-arguments,too-many-branches
+ def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
+ '''Base command for oc '''
+ cmds = [self.oc_binary]
+
+ if oadm:
+ cmds.append('adm')
+
+ cmds.extend(cmd)
+
+ if self.all_namespaces:
+ cmds.extend(['--all-namespaces'])
+ elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
+ cmds.extend(['-n', self.namespace])
+
+ rval = {}
+ results = ''
+ err = None
+
+ if self.verbose:
+ print(' '.join(cmds))
+
+ try:
+ returncode, stdout, stderr = self._run(cmds, input_data)
+ except OSError as ex:
+ returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
+
+ rval = {"returncode": returncode,
+ "results": results,
+ "cmd": ' '.join(cmds)}
+
+ if returncode == 0:
+ if output:
+ if output_type == 'json':
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ err = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout
+
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if err:
+ rval.update({"err": err,
+ "stderr": stderr,
+ "stdout": stdout,
+ "cmd": cmds})
+
+ else:
+ rval.update({"stderr": stderr,
+ "stdout": stdout,
+ "results": {}})
+
+ return rval
+
+
+class Utils(object):
+ ''' utilities for openshiftcli modules '''
+
+ @staticmethod
+ def _write(filename, contents):
+ ''' Actually write the file contents to disk. This helps with mocking. '''
+
+ with open(filename, 'w') as sfd:
+ sfd.write(contents)
+
+ @staticmethod
+ def create_tmp_file_from_contents(rname, data, ftype='yaml'):
+ ''' create a file in tmp with name and contents'''
+
+ tmp = Utils.create_tmpfile(prefix=rname)
+
+ if ftype == 'yaml':
+ # AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
+ # pylint: disable=no-member
+ if hasattr(yaml, 'RoundTripDumper'):
+ Utils._write(tmp, yaml.dump(data, Dumper=yaml.RoundTripDumper))
+ else:
+ Utils._write(tmp, yaml.safe_dump(data, default_flow_style=False))
+
+ elif ftype == 'json':
+ Utils._write(tmp, json.dumps(data))
+ else:
+ Utils._write(tmp, data)
+
+ # Register cleanup when module is done
+ atexit.register(Utils.cleanup, [tmp])
+ return tmp
+
+ @staticmethod
+ def create_tmpfile_copy(inc_file):
+ '''create a temporary copy of a file'''
+ tmpfile = Utils.create_tmpfile('lib_openshift-')
+ Utils._write(tmpfile, open(inc_file).read())
+
+ # Cleanup the tmpfile
+ atexit.register(Utils.cleanup, [tmpfile])
+
+ return tmpfile
+
+ @staticmethod
+ def create_tmpfile(prefix='tmp'):
+ ''' Generates and returns a temporary file name '''
+
+ with tempfile.NamedTemporaryFile(prefix=prefix, delete=False) as tmp:
+ return tmp.name
+
+ @staticmethod
+ def create_tmp_files_from_contents(content, content_type=None):
+ '''Turn an array of dict: filename, content into a files array'''
+ if not isinstance(content, list):
+ content = [content]
+ files = []
+ for item in content:
+ path = Utils.create_tmp_file_from_contents(item['path'] + '-',
+ item['data'],
+ ftype=content_type)
+ files.append({'name': os.path.basename(item['path']),
+ 'path': path})
+ return files
+
+ @staticmethod
+ def cleanup(files):
+ '''Clean up on exit '''
+ for sfile in files:
+ if os.path.exists(sfile):
+ if os.path.isdir(sfile):
+ shutil.rmtree(sfile)
+ elif os.path.isfile(sfile):
+ os.remove(sfile)
+
+ @staticmethod
+ def exists(results, _name):
+ ''' Check to see if the results include the name '''
+ if not results:
+ return False
+
+ if Utils.find_result(results, _name):
+ return True
+
+ return False
+
+ @staticmethod
+ def find_result(results, _name):
+ ''' Find the specified result by name'''
+ rval = None
+ for result in results:
+ if 'metadata' in result and result['metadata']['name'] == _name:
+ rval = result
+ break
+
+ return rval
+
+ @staticmethod
+ def get_resource_file(sfile, sfile_type='yaml'):
+ ''' return the service file '''
+ contents = None
+ with open(sfile) as sfd:
+ contents = sfd.read()
+
+ if sfile_type == 'yaml':
+ # AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
+ # pylint: disable=no-member
+ if hasattr(yaml, 'RoundTripLoader'):
+ contents = yaml.load(contents, yaml.RoundTripLoader)
+ else:
+ contents = yaml.safe_load(contents)
+ elif sfile_type == 'json':
+ contents = json.loads(contents)
+
+ return contents
+
+ @staticmethod
+ def filter_versions(stdout):
+ ''' filter the oc version output '''
+
+ version_dict = {}
+ version_search = ['oc', 'openshift', 'kubernetes']
+
+ for line in stdout.strip().split('\n'):
+ for term in version_search:
+ if not line:
+ continue
+ if line.startswith(term):
+ version_dict[term] = line.split()[-1]
+
+ # horrible hack to get openshift version in Openshift 3.2
+ # By default "oc version in 3.2 does not return an "openshift" version
+ if "openshift" not in version_dict:
+ version_dict["openshift"] = version_dict["oc"]
+
+ return version_dict
+
+ @staticmethod
+ def add_custom_versions(versions):
+ ''' create custom versions strings '''
+
+ versions_dict = {}
+
+ for tech, version in versions.items():
+ # clean up "-" from version
+ if "-" in version:
+ version = version.split("-")[0]
+
+ if version.startswith('v'):
+ versions_dict[tech + '_numeric'] = version[1:].split('+')[0]
+ # "v3.3.0.33" is what we have, we want "3.3"
+ versions_dict[tech + '_short'] = version[1:4]
+
+ return versions_dict
+
+ @staticmethod
+ def openshift_installed():
+ ''' check if openshift is installed '''
+ import yum
+
+ yum_base = yum.YumBase()
+ if yum_base.rpmdb.searchNevra(name='atomic-openshift'):
+ return True
+
+ return False
+
+ # Disabling too-many-branches. This is a yaml dictionary comparison function
+ # pylint: disable=too-many-branches,too-many-return-statements,too-many-statements
+ @staticmethod
+ def check_def_equal(user_def, result_def, skip_keys=None, debug=False):
+ ''' Given a user defined definition, compare it with the results given back by our query. '''
+
+ # Currently these values are autogenerated and we do not need to check them
+ skip = ['metadata', 'status']
+ if skip_keys:
+ skip.extend(skip_keys)
+
+ for key, value in result_def.items():
+ if key in skip:
+ continue
+
+ # Both are lists
+ if isinstance(value, list):
+ if key not in user_def:
+ if debug:
+ print('User data does not have key [%s]' % key)
+ print('User data: %s' % user_def)
+ return False
+
+ if not isinstance(user_def[key], list):
+ if debug:
+ print('user_def[key] is not a list key=[%s] user_def[key]=%s' % (key, user_def[key]))
+ return False
+
+ if len(user_def[key]) != len(value):
+ if debug:
+ print("List lengths are not equal.")
+ print("key=[%s]: user_def[%s] != value[%s]" % (key, len(user_def[key]), len(value)))
+ print("user_def: %s" % user_def[key])
+ print("value: %s" % value)
+ return False
+
+ for values in zip(user_def[key], value):
+ if isinstance(values[0], dict) and isinstance(values[1], dict):
+ if debug:
+ print('sending list - list')
+ print(type(values[0]))
+ print(type(values[1]))
+ result = Utils.check_def_equal(values[0], values[1], skip_keys=skip_keys, debug=debug)
+ if not result:
+ print('list compare returned false')
+ return False
+
+ elif value != user_def[key]:
+ if debug:
+ print('value should be identical')
+ print(user_def[key])
+ print(value)
+ return False
+
+ # recurse on a dictionary
+ elif isinstance(value, dict):
+ if key not in user_def:
+ if debug:
+ print("user_def does not have key [%s]" % key)
+ return False
+ if not isinstance(user_def[key], dict):
+ if debug:
+ print("dict returned false: not instance of dict")
+ return False
+
+ # before passing ensure keys match
+ api_values = set(value.keys()) - set(skip)
+ user_values = set(user_def[key].keys()) - set(skip)
+ if api_values != user_values:
+ if debug:
+ print("keys are not equal in dict")
+ print(user_values)
+ print(api_values)
+ return False
+
+ result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug)
+ if not result:
+ if debug:
+ print("dict returned false")
+ print(result)
+ return False
+
+ # Verify each key, value pair is the same
+ else:
+ if key not in user_def or value != user_def[key]:
+ if debug:
+ print("value not equal; user_def does not have key")
+ print(key)
+ print(value)
+ if key in user_def:
+ print(user_def[key])
+ return False
+
+ if debug:
+ print('returning true')
+ return True
+
+
+class OpenShiftCLIConfig(object):
+ '''Generic Config'''
+ def __init__(self, rname, namespace, kubeconfig, options):
+ self.kubeconfig = kubeconfig
+ self.name = rname
+ self.namespace = namespace
+ self._options = options
+
+ @property
+ def config_options(self):
+ ''' return config options '''
+ return self._options
+
+ def to_option_list(self):
+ '''return all options as a string'''
+ return self.stringify()
+
+ def stringify(self):
+ ''' return the options hash as cli params in a string '''
+ rval = []
+ for key in sorted(self.config_options.keys()):
+ data = self.config_options[key]
+ if data['include'] \
+ and (data['value'] or isinstance(data['value'], int)):
+ rval.append('--{}={}'.format(key.replace('_', '-'), data['value']))
+
+ return rval
+
+
+# -*- -*- -*- End included fragment: lib/base.py -*- -*- -*-
+
+# -*- -*- -*- Begin included fragment: lib/rolebinding.py -*- -*- -*-
+
+# pylint: disable=too-many-instance-attributes
+class RoleBindingConfig(object):
+ ''' Handle rolebinding config '''
+ # pylint: disable=too-many-arguments
+ def __init__(self,
+ name,
+ namespace,
+ kubeconfig,
+ group_names=None,
+ role_ref=None,
+ subjects=None,
+ usernames=None):
+ ''' constructor for handling rolebinding options '''
+ self.kubeconfig = kubeconfig
+ self.name = name
+ self.namespace = namespace
+ self.group_names = group_names
+ self.role_ref = role_ref
+ self.subjects = subjects
+ self.usernames = usernames
+ self.data = {}
+
+ self.create_dict()
+
+ def create_dict(self):
+ ''' create a default rolebinding as a dict '''
+ self.data['apiVersion'] = 'v1'
+ self.data['kind'] = 'RoleBinding'
+ self.data['groupNames'] = self.group_names
+ self.data['metadata']['name'] = self.name
+ self.data['metadata']['namespace'] = self.namespace
+
+ self.data['roleRef'] = self.role_ref
+ self.data['subjects'] = self.subjects
+ self.data['userNames'] = self.usernames
+
+
+# pylint: disable=too-many-instance-attributes,too-many-public-methods
+class RoleBinding(Yedit):
+ ''' Class to model a rolebinding openshift object'''
+ group_names_path = "groupNames"
+ role_ref_path = "roleRef"
+ subjects_path = "subjects"
+ user_names_path = "userNames"
+
+ kind = 'RoleBinding'
+
+ def __init__(self, content):
+ '''RoleBinding constructor'''
+ super(RoleBinding, self).__init__(content=content)
+ self._subjects = None
+ self._role_ref = None
+ self._group_names = None
+ self._user_names = None
+
+ @property
+ def subjects(self):
+ ''' subjects property '''
+ if self._subjects is None:
+ self._subjects = self.get_subjects()
+ return self._subjects
+
+ @subjects.setter
+ def subjects(self, data):
+ ''' subjects property setter'''
+ self._subjects = data
+
+ @property
+ def role_ref(self):
+ ''' role_ref property '''
+ if self._role_ref is None:
+ self._role_ref = self.get_role_ref()
+ return self._role_ref
+
+ @role_ref.setter
+ def role_ref(self, data):
+ ''' role_ref property setter'''
+ self._role_ref = data
+
+ @property
+ def group_names(self):
+ ''' group_names property '''
+ if self._group_names is None:
+ self._group_names = self.get_group_names()
+ return self._group_names
+
+ @group_names.setter
+ def group_names(self, data):
+ ''' group_names property setter'''
+ self._group_names = data
+
+ @property
+ def user_names(self):
+ ''' user_names property '''
+ if self._user_names is None:
+ self._user_names = self.get_user_names()
+ return self._user_names
+
+ @user_names.setter
+ def user_names(self, data):
+ ''' user_names property setter'''
+ self._user_names = data
+
+ def get_group_names(self):
+ ''' return groupNames '''
+ return self.get(RoleBinding.group_names_path) or []
+
+ def get_user_names(self):
+ ''' return usernames '''
+ return self.get(RoleBinding.user_names_path) or []
+
+ def get_role_ref(self):
+ ''' return role_ref '''
+ return self.get(RoleBinding.role_ref_path) or {}
+
+ def get_subjects(self):
+ ''' return subjects '''
+ return self.get(RoleBinding.subjects_path) or []
+
+ #### ADD #####
+ def add_subject(self, inc_subject):
+ ''' add a subject '''
+ if self.subjects:
+ # pylint: disable=no-member
+ self.subjects.append(inc_subject)
+ else:
+ self.put(RoleBinding.subjects_path, [inc_subject])
+
+ return True
+
+ def add_role_ref(self, inc_role_ref):
+ ''' add a role_ref '''
+ if not self.role_ref:
+ self.put(RoleBinding.role_ref_path, {"name": inc_role_ref})
+ return True
+
+ return False
+
+ def add_group_names(self, inc_group_names):
+ ''' add a group_names '''
+ if self.group_names:
+ # pylint: disable=no-member
+ self.group_names.append(inc_group_names)
+ else:
+ self.put(RoleBinding.group_names_path, [inc_group_names])
+
+ return True
+
+ def add_user_name(self, inc_user_name):
+ ''' add a username '''
+ if self.user_names:
+ # pylint: disable=no-member
+ self.user_names.append(inc_user_name)
+ else:
+ self.put(RoleBinding.user_names_path, [inc_user_name])
+
+ return True
+
+ #### /ADD #####
+
+ #### Remove #####
+ def remove_subject(self, inc_subject):
+ ''' remove a subject '''
+ try:
+ # pylint: disable=no-member
+ self.subjects.remove(inc_subject)
+ except ValueError as _:
+ return False
+
+ return True
+
+ def remove_role_ref(self, inc_role_ref):
+ ''' remove a role_ref '''
+ if self.role_ref and self.role_ref['name'] == inc_role_ref:
+ del self.role_ref['name']
+ return True
+
+ return False
+
+ def remove_group_name(self, inc_group_name):
+ ''' remove a groupname '''
+ try:
+ # pylint: disable=no-member
+ self.group_names.remove(inc_group_name)
+ except ValueError as _:
+ return False
+
+ return True
+
+ def remove_user_name(self, inc_user_name):
+ ''' remove a username '''
+ try:
+ # pylint: disable=no-member
+ self.user_names.remove(inc_user_name)
+ except ValueError as _:
+ return False
+
+ return True
+
+ #### /REMOVE #####
+
+ #### UPDATE #####
+ def update_subject(self, inc_subject):
+ ''' update a subject '''
+ try:
+ # pylint: disable=no-member
+ index = self.subjects.index(inc_subject)
+ except ValueError as _:
+ return self.add_subject(inc_subject)
+
+ self.subjects[index] = inc_subject
+
+ return True
+
+ def update_group_name(self, inc_group_name):
+ ''' update a groupname '''
+ try:
+ # pylint: disable=no-member
+ index = self.group_names.index(inc_group_name)
+ except ValueError as _:
+ return self.add_group_names(inc_group_name)
+
+ self.group_names[index] = inc_group_name
+
+ return True
+
+ def update_user_name(self, inc_user_name):
+ ''' update a username '''
+ try:
+ # pylint: disable=no-member
+ index = self.user_names.index(inc_user_name)
+ except ValueError as _:
+ return self.add_user_name(inc_user_name)
+
+ self.user_names[index] = inc_user_name
+
+ return True
+
+ def update_role_ref(self, inc_role_ref):
+ ''' update a role_ref '''
+ self.role_ref['name'] = inc_role_ref
+
+ return True
+
+ #### /UPDATE #####
+
+ #### FIND ####
+ def find_subject(self, inc_subject):
+ ''' find a subject '''
+ index = None
+ try:
+ # pylint: disable=no-member
+ index = self.subjects.index(inc_subject)
+ except ValueError as _:
+ return index
+
+ return index
+
+ def find_group_name(self, inc_group_name):
+ ''' find a group_name '''
+ index = None
+ try:
+ # pylint: disable=no-member
+ index = self.group_names.index(inc_group_name)
+ except ValueError as _:
+ return index
+
+ return index
+
+ def find_user_name(self, inc_user_name):
+ ''' find a user_name '''
+ index = None
+ try:
+ # pylint: disable=no-member
+ index = self.user_names.index(inc_user_name)
+ except ValueError as _:
+ return index
+
+ return index
+
+ def find_role_ref(self, inc_role_ref):
+ ''' find a user_name '''
+ if self.role_ref and self.role_ref['name'] == inc_role_ref['name']:
+ return self.role_ref
+
+ return None
+
+# -*- -*- -*- End included fragment: lib/rolebinding.py -*- -*- -*-
+
+# -*- -*- -*- Begin included fragment: lib/scc.py -*- -*- -*-
+
+
+# pylint: disable=too-many-instance-attributes
+class SecurityContextConstraintsConfig(object):
+ ''' Handle scc options '''
+ # pylint: disable=too-many-arguments
+ def __init__(self,
+ sname,
+ kubeconfig,
+ options=None,
+ fs_group='MustRunAs',
+ default_add_capabilities=None,
+ groups=None,
+ priority=None,
+ required_drop_capabilities=None,
+ run_as_user='MustRunAsRange',
+ se_linux_context='MustRunAs',
+ supplemental_groups='RunAsAny',
+ users=None,
+ annotations=None):
+ ''' constructor for handling scc options '''
+ self.kubeconfig = kubeconfig
+ self.name = sname
+ self.options = options
+ self.fs_group = fs_group
+ self.default_add_capabilities = default_add_capabilities
+ self.groups = groups
+ self.priority = priority
+ self.required_drop_capabilities = required_drop_capabilities
+ self.run_as_user = run_as_user
+ self.se_linux_context = se_linux_context
+ self.supplemental_groups = supplemental_groups
+ self.users = users
+ self.annotations = annotations
+ self.data = {}
+
+ self.create_dict()
+
+ def create_dict(self):
+ ''' assign the correct properties for a scc dict '''
+ # allow options
+ if self.options:
+ for key, value in self.options.items():
+ self.data[key] = value
+ else:
+ self.data['allowHostDirVolumePlugin'] = False
+ self.data['allowHostIPC'] = False
+ self.data['allowHostNetwork'] = False
+ self.data['allowHostPID'] = False
+ self.data['allowHostPorts'] = False
+ self.data['allowPrivilegedContainer'] = False
+ self.data['allowedCapabilities'] = None
+
+ # version
+ self.data['apiVersion'] = 'v1'
+ # kind
+ self.data['kind'] = 'SecurityContextConstraints'
+ # defaultAddCapabilities
+ self.data['defaultAddCapabilities'] = self.default_add_capabilities
+ # fsGroup
+ self.data['fsGroup']['type'] = self.fs_group
+ # groups
+ self.data['groups'] = []
+ if self.groups:
+ self.data['groups'] = self.groups
+ # metadata
+ self.data['metadata'] = {}
+ self.data['metadata']['name'] = self.name
+ if self.annotations:
+ for key, value in self.annotations.items():
+ self.data['metadata'][key] = value
+ # priority
+ self.data['priority'] = self.priority
+ # requiredDropCapabilities
+ self.data['requiredDropCapabilities'] = self.required_drop_capabilities
+ # runAsUser
+ self.data['runAsUser'] = {'type': self.run_as_user}
+ # seLinuxContext
+ self.data['seLinuxContext'] = {'type': self.se_linux_context}
+ # supplementalGroups
+ self.data['supplementalGroups'] = {'type': self.supplemental_groups}
+ # users
+ self.data['users'] = []
+ if self.users:
+ self.data['users'] = self.users
+
+
+# pylint: disable=too-many-instance-attributes,too-many-public-methods,no-member
+class SecurityContextConstraints(Yedit):
+ ''' Class to wrap the oc command line tools '''
+ default_add_capabilities_path = "defaultAddCapabilities"
+ fs_group_path = "fsGroup"
+ groups_path = "groups"
+ priority_path = "priority"
+ required_drop_capabilities_path = "requiredDropCapabilities"
+ run_as_user_path = "runAsUser"
+ se_linux_context_path = "seLinuxContext"
+ supplemental_groups_path = "supplementalGroups"
+ users_path = "users"
+ kind = 'SecurityContextConstraints'
+
+ def __init__(self, content):
+ '''SecurityContextConstraints constructor'''
+ super(SecurityContextConstraints, self).__init__(content=content)
+ self._users = None
+ self._groups = None
+
+ @property
+ def users(self):
+ ''' users property getter '''
+ if self._users is None:
+ self._users = self.get_users()
+ return self._users
+
+ @property
+ def groups(self):
+ ''' groups property getter '''
+ if self._groups is None:
+ self._groups = self.get_groups()
+ return self._groups
+
+ @users.setter
+ def users(self, data):
+ ''' users property setter'''
+ self._users = data
+
+ @groups.setter
+ def groups(self, data):
+ ''' groups property setter'''
+ self._groups = data
+
+ def get_users(self):
+ '''get scc users'''
+ return self.get(SecurityContextConstraints.users_path) or []
+
+ def get_groups(self):
+ '''get scc groups'''
+ return self.get(SecurityContextConstraints.groups_path) or []
+
+ def add_user(self, inc_user):
+ ''' add a user '''
+ if self.users:
+ self.users.append(inc_user)
+ else:
+ self.put(SecurityContextConstraints.users_path, [inc_user])
+
+ return True
+
+ def add_group(self, inc_group):
+ ''' add a group '''
+ if self.groups:
+ self.groups.append(inc_group)
+ else:
+ self.put(SecurityContextConstraints.groups_path, [inc_group])
+
+ return True
+
+ def remove_user(self, inc_user):
+ ''' remove a user '''
+ try:
+ self.users.remove(inc_user)
+ except ValueError as _:
+ return False
+
+ return True
+
+ def remove_group(self, inc_group):
+ ''' remove a group '''
+ try:
+ self.groups.remove(inc_group)
+ except ValueError as _:
+ return False
+
+ return True
+
+ def update_user(self, inc_user):
+ ''' update a user '''
+ try:
+ index = self.users.index(inc_user)
+ except ValueError as _:
+ return self.add_user(inc_user)
+
+ self.users[index] = inc_user
+
+ return True
+
+ def update_group(self, inc_group):
+ ''' update a group '''
+ try:
+ index = self.groups.index(inc_group)
+ except ValueError as _:
+ return self.add_group(inc_group)
+
+ self.groups[index] = inc_group
+
+ return True
+
+ def find_user(self, inc_user):
+ ''' find a user '''
+ index = None
+ try:
+ index = self.users.index(inc_user)
+ except ValueError as _:
+ return index
+
+ return index
+
+ def find_group(self, inc_group):
+ ''' find a group '''
+ index = None
+ try:
+ index = self.groups.index(inc_group)
+ except ValueError as _:
+ return index
+
+ return index
+
+# -*- -*- -*- End included fragment: lib/scc.py -*- -*- -*-
+
+# -*- -*- -*- Begin included fragment: class/oc_adm_policy_user.py -*- -*- -*-
+
+
+class PolicyUserException(Exception):
+ ''' PolicyUser exception'''
+ pass
+
+
+class PolicyUserConfig(OpenShiftCLIConfig):
+ ''' PolicyUserConfig is a DTO for user related policy. '''
+ def __init__(self, namespace, kubeconfig, policy_options):
+ super(PolicyUserConfig, self).__init__(policy_options['name']['value'],
+ namespace, kubeconfig, policy_options)
+ self.kind = self.get_kind()
+ self.namespace = namespace
+
+ def get_kind(self):
+ ''' return the kind we are working with '''
+ if self.config_options['resource_kind']['value'] == 'role':
+ return 'rolebinding'
+ elif self.config_options['resource_kind']['value'] == 'cluster-role':
+ return 'clusterrolebinding'
+ elif self.config_options['resource_kind']['value'] == 'scc':
+ return 'scc'
+
+ return None
+
+
+# pylint: disable=too-many-return-statements
+class PolicyUser(OpenShiftCLI):
+ ''' Class to handle attaching policies to users '''
+
+ def __init__(self,
+ policy_config,
+ verbose=False):
+ ''' Constructor for PolicyUser '''
+ super(PolicyUser, self).__init__(policy_config.namespace, policy_config.kubeconfig, verbose)
+ self.config = policy_config
+ self.verbose = verbose
+ self._rolebinding = None
+ self._scc = None
+ self._cluster_policy_bindings = None
+ self._policy_bindings = None
+
+ @property
+ def policybindings(self):
+ if self._policy_bindings is None:
+ results = self._get('clusterpolicybindings', None)
+ if results['returncode'] != 0:
+ raise OpenShiftCLIError('Could not retrieve policybindings')
+ self._policy_bindings = results['results'][0]['items'][0]
+
+ return self._policy_bindings
+
+ @property
+ def clusterpolicybindings(self):
+ if self._cluster_policy_bindings is None:
+ results = self._get('clusterpolicybindings', None)
+ if results['returncode'] != 0:
+ raise OpenShiftCLIError('Could not retrieve clusterpolicybindings')
+ self._cluster_policy_bindings = results['results'][0]['items'][0]
+
+ return self._cluster_policy_bindings
+
+ @property
+ def role_binding(self):
+ ''' role_binding property '''
+ return self._rolebinding
+
+ @role_binding.setter
+ def role_binding(self, binding):
+ ''' setter for role_binding property '''
+ self._rolebinding = binding
+
+ @property
+ def security_context_constraint(self):
+ ''' security_context_constraint property '''
+ return self._scc
+
+ @security_context_constraint.setter
+ def security_context_constraint(self, scc):
+ ''' setter for security_context_constraint property '''
+ self._scc = scc
+
+ def get(self):
+ '''fetch the desired kind
+
+ This is only used for scc objects.
+ The {cluster}rolebindings happen in exists.
+ '''
+ resource_name = self.config.config_options['name']['value']
+ if resource_name == 'cluster-reader':
+ resource_name += 's'
+
+ return self._get(self.config.kind, resource_name)
+
+ def exists_role_binding(self):
+ ''' return whether role_binding exists '''
+ bindings = None
+ if self.config.config_options['resource_kind']['value'] == 'cluster-role':
+ bindings = self.clusterpolicybindings
+ else:
+ bindings = self.policybindings
+
+ if bindings is None:
+ return False
+
+ for binding in bindings['roleBindings']:
+ _rb = binding['roleBinding']
+ if _rb['roleRef']['name'] == self.config.config_options['name']['value'] and \
+ _rb['userNames'] is not None and \
+ self.config.config_options['user']['value'] in _rb['userNames']:
+ self.role_binding = binding
+ return True
+
+ return False
+
+ def exists_scc(self):
+ ''' return whether scc exists '''
+ results = self.get()
+ if results['returncode'] == 0:
+ self.security_context_constraint = SecurityContextConstraints(results['results'][0])
+
+ if self.security_context_constraint.find_user(self.config.config_options['user']['value']) != None:
+ return True
+
+ return False
+
+ return results
+
+ def exists(self):
+ '''does the object exist?'''
+ if self.config.config_options['resource_kind']['value'] == 'cluster-role':
+ return self.exists_role_binding()
+
+ elif self.config.config_options['resource_kind']['value'] == 'role':
+ return self.exists_role_binding()
+
+ elif self.config.config_options['resource_kind']['value'] == 'scc':
+ return self.exists_scc()
+
+ return False
+
+ def perform(self):
+ '''perform action on resource'''
+ cmd = ['policy',
+ self.config.config_options['action']['value'],
+ self.config.config_options['name']['value'],
+ self.config.config_options['user']['value']]
+
+ return self.openshift_cmd(cmd, oadm=True)
+
+ @staticmethod
+ def run_ansible(params, check_mode):
+ '''run the idempotent ansible code'''
+
+ state = params['state']
+
+ action = None
+ if state == 'present':
+ action = 'add-' + params['resource_kind'] + '-to-user'
+ else:
+ action = 'remove-' + params['resource_kind'] + '-from-user'
+
+ nconfig = PolicyUserConfig(params['namespace'],
+ params['kubeconfig'],
+ {'action': {'value': action, 'include': False},
+ 'user': {'value': params['user'], 'include': False},
+ 'resource_kind': {'value': params['resource_kind'], 'include': False},
+ 'name': {'value': params['resource_name'], 'include': False},
+ })
+
+ policyuser = PolicyUser(nconfig, params['debug'])
+
+ # Run the oc adm policy user related command
+
+ ########
+ # Delete
+ ########
+ if state == 'absent':
+ if not policyuser.exists():
+ return {'changed': False, 'state': 'absent'}
+
+ if check_mode:
+ return {'changed': False, 'msg': 'CHECK_MODE: would have performed a delete.'}
+
+ api_rval = policyuser.perform()
+
+ if api_rval['returncode'] != 0:
+ return {'msg': api_rval}
+
+ return {'changed': True, 'results' : api_rval, state:'absent'}
+
+ if state == 'present':
+ ########
+ # Create
+ ########
+ results = policyuser.exists()
+ if isinstance(results, dict) and 'returncode' in results and results['returncode'] != 0:
+ return {'msg': results}
+
+ if not results:
+
+ if check_mode:
+ return {'changed': False, 'msg': 'CHECK_MODE: would have performed a create.'}
+
+ api_rval = policyuser.perform()
+
+ if api_rval['returncode'] != 0:
+ return {'msg': api_rval}
+
+ return {'changed': True, 'results': api_rval, state: 'present'}
+
+ return {'changed': False, state: 'present'}
+
+ return {'failed': True, 'changed': False, 'results': 'Unknown state passed. %s' % state, state: 'unknown'}
+
+# -*- -*- -*- End included fragment: class/oc_adm_policy_user.py -*- -*- -*-
+
+# -*- -*- -*- Begin included fragment: ansible/oc_adm_policy_user.py -*- -*- -*-
+
+
+def main():
+ '''
+ ansible oc adm module for user policy
+ '''
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(default='present', type='str',
+ choices=['present', 'absent']),
+ debug=dict(default=False, type='bool'),
+ resource_name=dict(required=True, type='str'),
+ namespace=dict(default='default', type='str'),
+ kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
+
+ user=dict(required=True, type='str'),
+ resource_kind=dict(required=True, choices=['role', 'cluster-role', 'scc'], type='str'),
+ ),
+ supports_check_mode=True,
+ )
+
+ results = PolicyUser.run_ansible(module.params, module.check_mode)
+
+ if 'failed' in results:
+ module.fail_json(**results)
+
+ module.exit_json(**results)
+
+
+if __name__ == "__main__":
+ main()
+
+# -*- -*- -*- End included fragment: ansible/oc_adm_policy_user.py -*- -*- -*-
diff --git a/roles/lib_openshift/library/oc_adm_registry.py b/roles/lib_openshift/library/oc_adm_registry.py
index fa17d0e58..93cf34559 100644
--- a/roles/lib_openshift/library/oc_adm_registry.py
+++ b/roles/lib_openshift/library/oc_adm_registry.py
@@ -371,7 +371,8 @@ class Yedit(object):
continue
elif data and not isinstance(data, dict):
- return None
+ raise YeditException("Unexpected item type found while going through key " +
+ "path: {} (at key: {})".format(key, dict_key))
data[dict_key] = {}
data = data[dict_key]
@@ -380,7 +381,7 @@ class Yedit(object):
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
- return None
+ raise YeditException("Unexpected item type found while going through key path: {}".format(key))
if key == '':
data = item
@@ -394,6 +395,12 @@ class Yedit(object):
elif key_indexes[-1][1] and isinstance(data, dict):
data[key_indexes[-1][1]] = item
+ # didn't add/update to an existing list, nor add/update key to a dict
+ # so we must have been provided some syntax like a.b.c[<int>] = "data" for a
+ # non-existent array
+ else:
+ raise YeditException("Error adding to object at path: {}".format(key))
+
return data
@staticmethod
@@ -1119,13 +1126,13 @@ class OpenShiftCLI(object):
if oadm:
cmds.append('adm')
+ cmds.extend(cmd)
+
if self.all_namespaces:
cmds.extend(['--all-namespaces'])
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- cmds.extend(cmd)
-
rval = {}
results = ''
err = None
@@ -1147,9 +1154,9 @@ class OpenShiftCLI(object):
if output_type == 'json':
try:
rval['results'] = json.loads(stdout)
- except ValueError as err:
- if "No JSON object could be decoded" in err.args:
- err = err.args
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ err = verr.args
elif output_type == 'raw':
rval['results'] = stdout
@@ -1387,8 +1394,8 @@ class Utils(object):
elif value != user_def[key]:
if debug:
print('value should be identical')
- print(value)
print(user_def[key])
+ print(value)
return False
# recurse on a dictionary
@@ -1408,8 +1415,8 @@ class Utils(object):
if api_values != user_values:
if debug:
print("keys are not equal in dict")
- print(api_values)
print(user_values)
+ print(api_values)
return False
result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug)
@@ -1455,10 +1462,11 @@ class OpenShiftCLIConfig(object):
def stringify(self):
''' return the options hash as cli params in a string '''
rval = []
- for key, data in self.config_options.items():
+ for key in sorted(self.config_options.keys()):
+ data = self.config_options[key]
if data['include'] \
and (data['value'] or isinstance(data['value'], int)):
- rval.append('--%s=%s' % (key.replace('_', '-'), data['value']))
+ rval.append('--{}={}'.format(key.replace('_', '-'), data['value']))
return rval
@@ -1571,6 +1579,18 @@ spec:
return False
+ def get_env_var(self, key):
+ '''return a environment variables '''
+ results = self.get(DeploymentConfig.env_path) or []
+ if not results:
+ return None
+
+ for env_var in results:
+ if env_var['name'] == key:
+ return env_var
+
+ return None
+
def get_env_vars(self):
'''return a environment variables '''
return self.get(DeploymentConfig.env_path) or []
@@ -1973,6 +1993,7 @@ class Service(Yedit):
port_path = "spec.ports"
portal_ip = "spec.portalIP"
cluster_ip = "spec.clusterIP"
+ selector_path = 'spec.selector'
kind = 'Service'
def __init__(self, content):
@@ -1983,6 +2004,10 @@ class Service(Yedit):
''' get a list of ports '''
return self.get(Service.port_path) or []
+ def get_selector(self):
+ ''' get the service selector'''
+ return self.get(Service.selector_path) or {}
+
def add_ports(self, inc_ports):
''' add a port object to the ports list '''
if not isinstance(inc_ports, list):
@@ -2051,20 +2076,21 @@ class Volume(object):
''' return a properly structured volume '''
volume_mount = None
volume = {'name': volume_info['name']}
- if volume_info['type'] == 'secret':
+ volume_type = volume_info['type'].lower()
+ if volume_type == 'secret':
volume['secret'] = {}
volume[volume_info['type']] = {'secretName': volume_info['secret_name']}
volume_mount = {'mountPath': volume_info['path'],
'name': volume_info['name']}
- elif volume_info['type'] == 'emptydir':
+ elif volume_type == 'emptydir':
volume['emptyDir'] = {}
volume_mount = {'mountPath': volume_info['path'],
'name': volume_info['name']}
- elif volume_info['type'] == 'pvc':
+ elif volume_type == 'pvc' or volume_type == 'persistentvolumeclaim':
volume['persistentVolumeClaim'] = {}
volume['persistentVolumeClaim']['claimName'] = volume_info['claimName']
volume['persistentVolumeClaim']['claimSize'] = volume_info['claimSize']
- elif volume_info['type'] == 'hostpath':
+ elif volume_type == 'hostpath':
volume['hostPath'] = {}
volume['hostPath']['path'] = volume_info['path']
@@ -2209,8 +2235,8 @@ class Registry(OpenShiftCLI):
''' prepared_registry property '''
if not self.__prepared_registry:
results = self.prepare_registry()
- if not results:
- raise RegistryException('Could not perform registry preparation.')
+ if not results or ('returncode' in results and results['returncode'] != 0):
+ raise RegistryException('Could not perform registry preparation. {}'.format(results))
self.__prepared_registry = results
return self.__prepared_registry
@@ -2231,7 +2257,7 @@ class Registry(OpenShiftCLI):
if result['returncode'] == 0 and part['kind'] == 'dc':
self.deploymentconfig = DeploymentConfig(result['results'][0])
elif result['returncode'] == 0 and part['kind'] == 'svc':
- self.service = Yedit(content=result['results'][0])
+ self.service = Service(result['results'][0])
if result['returncode'] != 0:
rval = result['returncode']
@@ -2241,8 +2267,7 @@ class Registry(OpenShiftCLI):
def exists(self):
'''does the object exist?'''
- self.get()
- if self.deploymentconfig or self.service:
+ if self.deploymentconfig and self.service:
return True
return False
@@ -2268,7 +2293,7 @@ class Registry(OpenShiftCLI):
''' prepare a registry for instantiation '''
options = self.config.to_option_list()
- cmd = ['registry', '-n', self.config.namespace]
+ cmd = ['registry']
cmd.extend(options)
cmd.extend(['--dry-run=True', '-o', 'json'])
@@ -2276,8 +2301,8 @@ class Registry(OpenShiftCLI):
# probably need to parse this
# pylint thinks results is a string
# pylint: disable=no-member
- if results['returncode'] != 0 and 'items' in results['results']:
- return results
+ if results['returncode'] != 0 and 'items' not in results['results']:
+ raise RegistryException('Could not perform registry preparation. {}'.format(results))
service = None
deploymentconfig = None
@@ -2301,6 +2326,10 @@ class Registry(OpenShiftCLI):
if self.portal_ip:
service.put('spec.portalIP', self.portal_ip)
+ # the dry-run doesn't apply the selector correctly
+ if self.service:
+ service.put('spec.selector', self.service.get_selector())
+
# need to create the service and the deploymentconfig
service_file = Utils.create_tmp_file_from_contents('service', service.yaml_dict)
deployment_file = Utils.create_tmp_file_from_contents('deploymentconfig', deploymentconfig.yaml_dict)
@@ -2315,8 +2344,20 @@ class Registry(OpenShiftCLI):
def create(self):
'''Create a registry'''
results = []
- for config_file in ['deployment_file', 'service_file']:
- results.append(self._create(self.prepared_registry[config_file]))
+ self.needs_update()
+ # if the object is none, then we need to create it
+ # if the object needs an update, then we should call replace
+ # Handle the deploymentconfig
+ if self.deploymentconfig is None:
+ results.append(self._create(self.prepared_registry['deployment_file']))
+ elif self.prepared_registry['deployment_update']:
+ results.append(self._replace(self.prepared_registry['deployment_file']))
+
+ # Handle the service
+ if self.service is None:
+ results.append(self._create(self.prepared_registry['service_file']))
+ elif self.prepared_registry['service_update']:
+ results.append(self._replace(self.prepared_registry['service_file']))
# Clean up returned results
rval = 0
@@ -2328,7 +2369,7 @@ class Registry(OpenShiftCLI):
return {'returncode': rval, 'results': results}
def update(self):
- '''run update for the registry. This performs a delete and then create '''
+ '''run update for the registry. This performs a replace if required'''
# Store the current service IP
if self.service:
svcip = self.service.get('spec.clusterIP')
@@ -2354,6 +2395,14 @@ class Registry(OpenShiftCLI):
def add_modifications(self, deploymentconfig):
''' update a deployment config with changes '''
+ # The environment variable for REGISTRY_HTTP_SECRET is autogenerated
+ # We should set the generated deploymentconfig to the in memory version
+ # the following modifications will overwrite if needed
+ if self.deploymentconfig:
+ result = self.deploymentconfig.get_env_var('REGISTRY_HTTP_SECRET')
+ if result:
+ deploymentconfig.update_env_var('REGISTRY_HTTP_SECRET', result['value'])
+
# Currently we know that our deployment of a registry requires a few extra modifications
# Modification 1
# we need specific environment variables to be set
@@ -2394,14 +2443,12 @@ class Registry(OpenShiftCLI):
def needs_update(self):
''' check to see if we need to update '''
- if not self.service or not self.deploymentconfig:
- return True
-
exclude_list = ['clusterIP', 'portalIP', 'type', 'protocol']
- if not Utils.check_def_equal(self.prepared_registry['service'].yaml_dict,
- self.service.yaml_dict,
- exclude_list,
- debug=self.verbose):
+ if self.service is None or \
+ not Utils.check_def_equal(self.prepared_registry['service'].yaml_dict,
+ self.service.yaml_dict,
+ exclude_list,
+ debug=self.verbose):
self.prepared_registry['service_update'] = True
exclude_list = ['dnsPolicy',
@@ -2417,10 +2464,11 @@ class Registry(OpenShiftCLI):
'activeDeadlineSeconds', # added in 1.5 for timeouts
]
- if not Utils.check_def_equal(self.prepared_registry['deployment'].yaml_dict,
- self.deploymentconfig.yaml_dict,
- exclude_list,
- debug=self.verbose):
+ if self.deploymentconfig is None or \
+ not Utils.check_def_equal(self.prepared_registry['deployment'].yaml_dict,
+ self.deploymentconfig.yaml_dict,
+ exclude_list,
+ debug=self.verbose):
self.prepared_registry['deployment_update'] = True
return self.prepared_registry['deployment_update'] or self.prepared_registry['service_update'] or False
@@ -2547,8 +2595,8 @@ def main():
service_account=dict(default='registry', type='str'),
mount_host=dict(default=None, type='str'),
volume_mounts=dict(default=None, type='list'),
- env_vars=dict(default=None, type='dict'),
- edits=dict(default=None, type='list'),
+ env_vars=dict(default={}, type='dict'),
+ edits=dict(default=[], type='list'),
enforce_quota=dict(default=False, type='bool'),
force=dict(default=False, type='bool'),
daemonset=dict(default=False, type='bool'),
diff --git a/roles/lib_openshift/library/oc_adm_router.py b/roles/lib_openshift/library/oc_adm_router.py
index a9e76a92e..e666e0d09 100644
--- a/roles/lib_openshift/library/oc_adm_router.py
+++ b/roles/lib_openshift/library/oc_adm_router.py
@@ -396,7 +396,8 @@ class Yedit(object):
continue
elif data and not isinstance(data, dict):
- return None
+ raise YeditException("Unexpected item type found while going through key " +
+ "path: {} (at key: {})".format(key, dict_key))
data[dict_key] = {}
data = data[dict_key]
@@ -405,7 +406,7 @@ class Yedit(object):
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
- return None
+ raise YeditException("Unexpected item type found while going through key path: {}".format(key))
if key == '':
data = item
@@ -419,6 +420,12 @@ class Yedit(object):
elif key_indexes[-1][1] and isinstance(data, dict):
data[key_indexes[-1][1]] = item
+ # didn't add/update to an existing list, nor add/update key to a dict
+ # so we must have been provided some syntax like a.b.c[<int>] = "data" for a
+ # non-existent array
+ else:
+ raise YeditException("Error adding to object at path: {}".format(key))
+
return data
@staticmethod
@@ -1144,13 +1151,13 @@ class OpenShiftCLI(object):
if oadm:
cmds.append('adm')
+ cmds.extend(cmd)
+
if self.all_namespaces:
cmds.extend(['--all-namespaces'])
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- cmds.extend(cmd)
-
rval = {}
results = ''
err = None
@@ -1172,9 +1179,9 @@ class OpenShiftCLI(object):
if output_type == 'json':
try:
rval['results'] = json.loads(stdout)
- except ValueError as err:
- if "No JSON object could be decoded" in err.args:
- err = err.args
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ err = verr.args
elif output_type == 'raw':
rval['results'] = stdout
@@ -1412,8 +1419,8 @@ class Utils(object):
elif value != user_def[key]:
if debug:
print('value should be identical')
- print(value)
print(user_def[key])
+ print(value)
return False
# recurse on a dictionary
@@ -1433,8 +1440,8 @@ class Utils(object):
if api_values != user_values:
if debug:
print("keys are not equal in dict")
- print(api_values)
print(user_values)
+ print(api_values)
return False
result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug)
@@ -1480,10 +1487,11 @@ class OpenShiftCLIConfig(object):
def stringify(self):
''' return the options hash as cli params in a string '''
rval = []
- for key, data in self.config_options.items():
+ for key in sorted(self.config_options.keys()):
+ data = self.config_options[key]
if data['include'] \
and (data['value'] or isinstance(data['value'], int)):
- rval.append('--%s=%s' % (key.replace('_', '-'), data['value']))
+ rval.append('--{}={}'.format(key.replace('_', '-'), data['value']))
return rval
@@ -1558,6 +1566,7 @@ class Service(Yedit):
port_path = "spec.ports"
portal_ip = "spec.portalIP"
cluster_ip = "spec.clusterIP"
+ selector_path = 'spec.selector'
kind = 'Service'
def __init__(self, content):
@@ -1568,6 +1577,10 @@ class Service(Yedit):
''' get a list of ports '''
return self.get(Service.port_path) or []
+ def get_selector(self):
+ ''' get the service selector'''
+ return self.get(Service.selector_path) or {}
+
def add_ports(self, inc_ports):
''' add a port object to the ports list '''
if not isinstance(inc_ports, list):
@@ -1724,6 +1737,18 @@ spec:
return False
+ def get_env_var(self, key):
+ '''return a environment variables '''
+ results = self.get(DeploymentConfig.env_path) or []
+ if not results:
+ return None
+
+ for env_var in results:
+ if env_var['name'] == key:
+ return env_var
+
+ return None
+
def get_env_vars(self):
'''return a environment variables '''
return self.get(DeploymentConfig.env_path) or []
@@ -2594,6 +2619,21 @@ class Router(OpenShiftCLI):
''' setter for property rolebinding '''
self._rolebinding = config
+ def get_object_by_kind(self, kind):
+ '''return the current object kind by name'''
+ if re.match("^(dc|deploymentconfig)$", kind, flags=re.IGNORECASE):
+ return self.deploymentconfig
+ elif re.match("^(svc|service)$", kind, flags=re.IGNORECASE):
+ return self.service
+ elif re.match("^(sa|serviceaccount)$", kind, flags=re.IGNORECASE):
+ return self.serviceaccount
+ elif re.match("secret", kind, flags=re.IGNORECASE):
+ return self.secret
+ elif re.match("clusterrolebinding", kind, flags=re.IGNORECASE):
+ return self.rolebinding
+
+ return None
+
def get(self):
''' return the self.router_parts '''
self.service = None
@@ -2690,7 +2730,7 @@ class Router(OpenShiftCLI):
options = self.config.to_option_list()
- cmd = ['router', self.config.name, '-n', self.config.namespace]
+ cmd = ['router', self.config.name]
cmd.extend(options)
cmd.extend(['--dry-run=True', '-o', 'json'])
@@ -2744,13 +2784,19 @@ class Router(OpenShiftCLI):
- clusterrolebinding
'''
results = []
+ self.needs_update()
import time
# pylint: disable=maybe-no-member
- for _, oc_data in self.prepared_router.items():
+ for kind, oc_data in self.prepared_router.items():
if oc_data['obj'] is not None:
time.sleep(1)
- results.append(self._create(oc_data['path']))
+ if self.get_object_by_kind(kind) is None:
+ results.append(self._create(oc_data['path']))
+
+ elif oc_data['update']:
+ results.append(self._replace(oc_data['path']))
+
rval = 0
for result in results:
@@ -2778,17 +2824,15 @@ class Router(OpenShiftCLI):
# pylint: disable=too-many-return-statements,too-many-branches
def needs_update(self):
''' check to see if we need to update '''
- if not self.deploymentconfig or not self.service or not self.serviceaccount or not self.secret:
- return True
-
# ServiceAccount:
# Need to determine changes from the pregenerated ones from the original
# Since these are auto generated, we can skip
skip = ['secrets', 'imagePullSecrets']
- if not Utils.check_def_equal(self.prepared_router['ServiceAccount']['obj'].yaml_dict,
- self.serviceaccount.yaml_dict,
- skip_keys=skip,
- debug=self.verbose):
+ if self.serviceaccount is None or \
+ not Utils.check_def_equal(self.prepared_router['ServiceAccount']['obj'].yaml_dict,
+ self.serviceaccount.yaml_dict,
+ skip_keys=skip,
+ debug=self.verbose):
self.prepared_router['ServiceAccount']['update'] = True
# Secret:
@@ -2797,10 +2841,11 @@ class Router(OpenShiftCLI):
if not self.secret:
self.prepared_router['Secret']['update'] = True
- if not Utils.check_def_equal(self.prepared_router['Secret']['obj'].yaml_dict,
- self.secret.yaml_dict,
- skip_keys=skip,
- debug=self.verbose):
+ if self.secret is None or \
+ not Utils.check_def_equal(self.prepared_router['Secret']['obj'].yaml_dict,
+ self.secret.yaml_dict,
+ skip_keys=skip,
+ debug=self.verbose):
self.prepared_router['Secret']['update'] = True
# Service:
@@ -2809,28 +2854,30 @@ class Router(OpenShiftCLI):
port['protocol'] = 'TCP'
skip = ['portalIP', 'clusterIP', 'sessionAffinity', 'type']
- if not Utils.check_def_equal(self.prepared_router['Service']['obj'].yaml_dict,
- self.service.yaml_dict,
- skip_keys=skip,
- debug=self.verbose):
+ if self.service is None or \
+ not Utils.check_def_equal(self.prepared_router['Service']['obj'].yaml_dict,
+ self.service.yaml_dict,
+ skip_keys=skip,
+ debug=self.verbose):
self.prepared_router['Service']['update'] = True
# DeploymentConfig:
# Router needs some exceptions.
# We do not want to check the autogenerated password for stats admin
- if not self.config.config_options['stats_password']['value']:
- for idx, env_var in enumerate(self.prepared_router['DeploymentConfig']['obj'].get(\
- 'spec.template.spec.containers[0].env') or []):
- if env_var['name'] == 'STATS_PASSWORD':
- env_var['value'] = \
- self.deploymentconfig.get('spec.template.spec.containers[0].env[%s].value' % idx)
- break
+ if self.deploymentconfig is not None:
+ if not self.config.config_options['stats_password']['value']:
+ for idx, env_var in enumerate(self.prepared_router['DeploymentConfig']['obj'].get(\
+ 'spec.template.spec.containers[0].env') or []):
+ if env_var['name'] == 'STATS_PASSWORD':
+ env_var['value'] = \
+ self.deploymentconfig.get('spec.template.spec.containers[0].env[%s].value' % idx)
+ break
- # dry-run doesn't add the protocol to the ports section. We will manually do that.
- for idx, port in enumerate(self.prepared_router['DeploymentConfig']['obj'].get(\
- 'spec.template.spec.containers[0].ports') or []):
- if not 'protocol' in port:
- port['protocol'] = 'TCP'
+ # dry-run doesn't add the protocol to the ports section. We will manually do that.
+ for idx, port in enumerate(self.prepared_router['DeploymentConfig']['obj'].get(\
+ 'spec.template.spec.containers[0].ports') or []):
+ if not 'protocol' in port:
+ port['protocol'] = 'TCP'
# These are different when generating
skip = ['dnsPolicy',
@@ -2841,10 +2888,11 @@ class Router(OpenShiftCLI):
'defaultMode',
]
- if not Utils.check_def_equal(self.prepared_router['DeploymentConfig']['obj'].yaml_dict,
- self.deploymentconfig.yaml_dict,
- skip_keys=skip,
- debug=self.verbose):
+ if self.deploymentconfig is None or \
+ not Utils.check_def_equal(self.prepared_router['DeploymentConfig']['obj'].yaml_dict,
+ self.deploymentconfig.yaml_dict,
+ skip_keys=skip,
+ debug=self.verbose):
self.prepared_router['DeploymentConfig']['update'] = True
# Check if any of the parts need updating, if so, return True
diff --git a/roles/lib_openshift/library/oc_atomic_container.py b/roles/lib_openshift/library/oc_atomic_container.py
new file mode 100644
index 000000000..d2620b4cc
--- /dev/null
+++ b/roles/lib_openshift/library/oc_atomic_container.py
@@ -0,0 +1,203 @@
+#!/usr/bin/env python
+# pylint: disable=missing-docstring
+# flake8: noqa: T001
+# ___ ___ _ _ ___ ___ _ _____ ___ ___
+# / __| __| \| | __| _ \ /_\_ _| __| \
+# | (_ | _|| .` | _|| / / _ \| | | _|| |) |
+# \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____
+# | \ / _ \ | \| |/ _ \_ _| | __| \_ _|_ _|
+# | |) | (_) | | .` | (_) || | | _|| |) | | | |
+# |___/ \___/ |_|\_|\___/ |_| |___|___/___| |_|
+#
+# Copyright 2016 Red Hat, Inc. and/or its affiliates
+# and other contributors as indicated by the @author tags.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# -*- -*- -*- Begin included fragment: doc/atomic_container -*- -*- -*-
+
+DOCUMENTATION = '''
+---
+module: oc_atomic_container
+short_description: Manage the container images on the atomic host platform
+description:
+ - Manage the container images on the atomic host platform
+ - Allows to execute the commands on the container images
+requirements:
+ - atomic
+ - "python >= 2.6"
+options:
+ name:
+ description:
+ - Name of the container
+ required: True
+ default: null
+ image:
+ description:
+ - The image to use to install the container
+ required: True
+ default: null
+ state:
+ description:
+ - State of the container
+ required: True
+ choices: ["latest", "absent", "latest", "rollback"]
+ default: "latest"
+ values:
+ description:
+ - Values for the installation of the container
+ required: False
+ default: None
+'''
+
+# -*- -*- -*- End included fragment: doc/atomic_container -*- -*- -*-
+
+# -*- -*- -*- Begin included fragment: ansible/oc_atomic_container.py -*- -*- -*-
+
+# pylint: disable=wrong-import-position,too-many-branches,invalid-name
+import json
+from ansible.module_utils.basic import AnsibleModule
+
+
+def _install(module, container, image, values_list):
+ ''' install a container using atomic CLI. values_list is the list of --set arguments.
+ container is the name given to the container. image is the image to use for the installation. '''
+ args = ['atomic', 'install', "--system", '--name=%s' % container] + values_list + [image]
+ rc, out, err = module.run_command(args, check_rc=False)
+ if rc != 0:
+ return rc, out, err, False
+ else:
+ changed = "Extracting" in out
+ return rc, out, err, changed
+
+def _uninstall(module, name):
+ ''' uninstall an atomic container by its name. '''
+ args = ['atomic', 'uninstall', name]
+ rc, out, err = module.run_command(args, check_rc=False)
+ return rc, out, err, False
+
+
+def do_install(module, container, image, values_list):
+ ''' install a container and exit the module. '''
+ rc, out, err, changed = _install(module, container, image, values_list)
+ if rc != 0:
+ module.fail_json(rc=rc, msg=err)
+ else:
+ module.exit_json(msg=out, changed=changed)
+
+
+def do_uninstall(module, name):
+ ''' uninstall a container and exit the module. '''
+ rc, out, err, changed = _uninstall(module, name)
+ if rc != 0:
+ module.fail_json(rc=rc, msg=err)
+ module.exit_json(msg=out, changed=changed)
+
+
+def do_update(module, container, old_image, image, values_list):
+ ''' update a container and exit the module. If the container uses a different
+ image than the current installed one, then first uninstall the old one '''
+
+ # the image we want is different than the installed one
+ if old_image != image:
+ rc, out, err, _ = _uninstall(module, container)
+ if rc != 0:
+ module.fail_json(rc=rc, msg=err)
+ return do_install(module, container, image, values_list)
+
+ # if the image didn't change, use "atomic containers update"
+ args = ['atomic', 'containers', 'update'] + values_list + [container]
+ rc, out, err = module.run_command(args, check_rc=False)
+ if rc != 0:
+ module.fail_json(rc=rc, msg=err)
+ else:
+ changed = "Extracting" in out
+ module.exit_json(msg=out, changed=changed)
+
+
+def do_rollback(module, name):
+ ''' move to the previous deployment of the container, if present, and exit the module. '''
+ args = ['atomic', 'containers', 'rollback', name]
+ rc, out, err = module.run_command(args, check_rc=False)
+ if rc != 0:
+ module.fail_json(rc=rc, msg=err)
+ else:
+ changed = "Rolling back" in out
+ module.exit_json(msg=out, changed=changed)
+
+
+def core(module):
+ ''' entrypoint for the module. '''
+ name = module.params['name']
+ image = module.params['image']
+ values = module.params['values']
+ state = module.params['state']
+
+ module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C')
+ out = {}
+ err = {}
+ rc = 0
+
+ values_list = ["--set=%s" % x for x in values] if values else []
+
+ args = ['atomic', 'containers', 'list', '--json', '--all', '-f', 'container=%s' % name]
+ rc, out, err = module.run_command(args, check_rc=False)
+ if rc != 0:
+ module.fail_json(rc=rc, msg=err)
+ return
+
+ containers = json.loads(out)
+ present = len(containers) > 0
+ old_image = containers[0]["image_name"] if present else None
+
+ if state == 'present' and present:
+ module.exit_json(msg=out, changed=False)
+ elif (state in ['latest', 'present']) and not present:
+ do_install(module, name, image, values_list)
+ elif state == 'latest':
+ do_update(module, name, old_image, image, values_list)
+ elif state == 'absent':
+ if not present:
+ module.exit_json(msg="", changed=False)
+ else:
+ do_uninstall(module, name)
+ elif state == 'rollback':
+ do_rollback(module, name)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(default=None, required=True),
+ image=dict(default=None, required=True),
+ state=dict(default='latest', choices=['present', 'absent', 'latest', 'rollback']),
+ values=dict(type='list', default=[]),
+ ),
+ )
+
+ # Verify that the platform supports atomic command
+ rc, _, err = module.run_command('atomic -v', check_rc=False)
+ if rc != 0:
+ module.fail_json(msg="Error in running atomic command", err=err)
+
+ try:
+ core(module)
+ except Exception as e: # pylint: disable=broad-except
+ module.fail_json(msg=str(e))
+
+
+if __name__ == '__main__':
+ main()
+
+# -*- -*- -*- End included fragment: ansible/oc_atomic_container.py -*- -*- -*-
diff --git a/roles/lib_openshift/library/oc_edit.py b/roles/lib_openshift/library/oc_edit.py
index 8901042ac..42f50ebe7 100644
--- a/roles/lib_openshift/library/oc_edit.py
+++ b/roles/lib_openshift/library/oc_edit.py
@@ -295,7 +295,8 @@ class Yedit(object):
continue
elif data and not isinstance(data, dict):
- return None
+ raise YeditException("Unexpected item type found while going through key " +
+ "path: {} (at key: {})".format(key, dict_key))
data[dict_key] = {}
data = data[dict_key]
@@ -304,7 +305,7 @@ class Yedit(object):
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
- return None
+ raise YeditException("Unexpected item type found while going through key path: {}".format(key))
if key == '':
data = item
@@ -318,6 +319,12 @@ class Yedit(object):
elif key_indexes[-1][1] and isinstance(data, dict):
data[key_indexes[-1][1]] = item
+ # didn't add/update to an existing list, nor add/update key to a dict
+ # so we must have been provided some syntax like a.b.c[<int>] = "data" for a
+ # non-existent array
+ else:
+ raise YeditException("Error adding to object at path: {}".format(key))
+
return data
@staticmethod
@@ -1043,13 +1050,13 @@ class OpenShiftCLI(object):
if oadm:
cmds.append('adm')
+ cmds.extend(cmd)
+
if self.all_namespaces:
cmds.extend(['--all-namespaces'])
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- cmds.extend(cmd)
-
rval = {}
results = ''
err = None
@@ -1071,9 +1078,9 @@ class OpenShiftCLI(object):
if output_type == 'json':
try:
rval['results'] = json.loads(stdout)
- except ValueError as err:
- if "No JSON object could be decoded" in err.args:
- err = err.args
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ err = verr.args
elif output_type == 'raw':
rval['results'] = stdout
@@ -1311,8 +1318,8 @@ class Utils(object):
elif value != user_def[key]:
if debug:
print('value should be identical')
- print(value)
print(user_def[key])
+ print(value)
return False
# recurse on a dictionary
@@ -1332,8 +1339,8 @@ class Utils(object):
if api_values != user_values:
if debug:
print("keys are not equal in dict")
- print(api_values)
print(user_values)
+ print(api_values)
return False
result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug)
@@ -1379,10 +1386,11 @@ class OpenShiftCLIConfig(object):
def stringify(self):
''' return the options hash as cli params in a string '''
rval = []
- for key, data in self.config_options.items():
+ for key in sorted(self.config_options.keys()):
+ data = self.config_options[key]
if data['include'] \
and (data['value'] or isinstance(data['value'], int)):
- rval.append('--%s=%s' % (key.replace('_', '-'), data['value']))
+ rval.append('--{}={}'.format(key.replace('_', '-'), data['value']))
return rval
diff --git a/roles/lib_openshift/library/oc_env.py b/roles/lib_openshift/library/oc_env.py
index 3286985c5..3088ea947 100644
--- a/roles/lib_openshift/library/oc_env.py
+++ b/roles/lib_openshift/library/oc_env.py
@@ -262,7 +262,8 @@ class Yedit(object):
continue
elif data and not isinstance(data, dict):
- return None
+ raise YeditException("Unexpected item type found while going through key " +
+ "path: {} (at key: {})".format(key, dict_key))
data[dict_key] = {}
data = data[dict_key]
@@ -271,7 +272,7 @@ class Yedit(object):
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
- return None
+ raise YeditException("Unexpected item type found while going through key path: {}".format(key))
if key == '':
data = item
@@ -285,6 +286,12 @@ class Yedit(object):
elif key_indexes[-1][1] and isinstance(data, dict):
data[key_indexes[-1][1]] = item
+ # didn't add/update to an existing list, nor add/update key to a dict
+ # so we must have been provided some syntax like a.b.c[<int>] = "data" for a
+ # non-existent array
+ else:
+ raise YeditException("Error adding to object at path: {}".format(key))
+
return data
@staticmethod
@@ -1010,13 +1017,13 @@ class OpenShiftCLI(object):
if oadm:
cmds.append('adm')
+ cmds.extend(cmd)
+
if self.all_namespaces:
cmds.extend(['--all-namespaces'])
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- cmds.extend(cmd)
-
rval = {}
results = ''
err = None
@@ -1038,9 +1045,9 @@ class OpenShiftCLI(object):
if output_type == 'json':
try:
rval['results'] = json.loads(stdout)
- except ValueError as err:
- if "No JSON object could be decoded" in err.args:
- err = err.args
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ err = verr.args
elif output_type == 'raw':
rval['results'] = stdout
@@ -1278,8 +1285,8 @@ class Utils(object):
elif value != user_def[key]:
if debug:
print('value should be identical')
- print(value)
print(user_def[key])
+ print(value)
return False
# recurse on a dictionary
@@ -1299,8 +1306,8 @@ class Utils(object):
if api_values != user_values:
if debug:
print("keys are not equal in dict")
- print(api_values)
print(user_values)
+ print(api_values)
return False
result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug)
@@ -1346,10 +1353,11 @@ class OpenShiftCLIConfig(object):
def stringify(self):
''' return the options hash as cli params in a string '''
rval = []
- for key, data in self.config_options.items():
+ for key in sorted(self.config_options.keys()):
+ data = self.config_options[key]
if data['include'] \
and (data['value'] or isinstance(data['value'], int)):
- rval.append('--%s=%s' % (key.replace('_', '-'), data['value']))
+ rval.append('--{}={}'.format(key.replace('_', '-'), data['value']))
return rval
@@ -1462,6 +1470,18 @@ spec:
return False
+ def get_env_var(self, key):
+ '''return a environment variables '''
+ results = self.get(DeploymentConfig.env_path) or []
+ if not results:
+ return None
+
+ for env_var in results:
+ if env_var['name'] == key:
+ return env_var
+
+ return None
+
def get_env_vars(self):
'''return a environment variables '''
return self.get(DeploymentConfig.env_path) or []
diff --git a/roles/lib_openshift/library/oc_label.py b/roles/lib_openshift/library/oc_label.py
index 7a4d6959a..cfcb15241 100644
--- a/roles/lib_openshift/library/oc_label.py
+++ b/roles/lib_openshift/library/oc_label.py
@@ -271,7 +271,8 @@ class Yedit(object):
continue
elif data and not isinstance(data, dict):
- return None
+ raise YeditException("Unexpected item type found while going through key " +
+ "path: {} (at key: {})".format(key, dict_key))
data[dict_key] = {}
data = data[dict_key]
@@ -280,7 +281,7 @@ class Yedit(object):
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
- return None
+ raise YeditException("Unexpected item type found while going through key path: {}".format(key))
if key == '':
data = item
@@ -294,6 +295,12 @@ class Yedit(object):
elif key_indexes[-1][1] and isinstance(data, dict):
data[key_indexes[-1][1]] = item
+ # didn't add/update to an existing list, nor add/update key to a dict
+ # so we must have been provided some syntax like a.b.c[<int>] = "data" for a
+ # non-existent array
+ else:
+ raise YeditException("Error adding to object at path: {}".format(key))
+
return data
@staticmethod
@@ -1019,13 +1026,13 @@ class OpenShiftCLI(object):
if oadm:
cmds.append('adm')
+ cmds.extend(cmd)
+
if self.all_namespaces:
cmds.extend(['--all-namespaces'])
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- cmds.extend(cmd)
-
rval = {}
results = ''
err = None
@@ -1047,9 +1054,9 @@ class OpenShiftCLI(object):
if output_type == 'json':
try:
rval['results'] = json.loads(stdout)
- except ValueError as err:
- if "No JSON object could be decoded" in err.args:
- err = err.args
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ err = verr.args
elif output_type == 'raw':
rval['results'] = stdout
@@ -1287,8 +1294,8 @@ class Utils(object):
elif value != user_def[key]:
if debug:
print('value should be identical')
- print(value)
print(user_def[key])
+ print(value)
return False
# recurse on a dictionary
@@ -1308,8 +1315,8 @@ class Utils(object):
if api_values != user_values:
if debug:
print("keys are not equal in dict")
- print(api_values)
print(user_values)
+ print(api_values)
return False
result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug)
@@ -1355,10 +1362,11 @@ class OpenShiftCLIConfig(object):
def stringify(self):
''' return the options hash as cli params in a string '''
rval = []
- for key, data in self.config_options.items():
+ for key in sorted(self.config_options.keys()):
+ data = self.config_options[key]
if data['include'] \
and (data['value'] or isinstance(data['value'], int)):
- rval.append('--%s=%s' % (key.replace('_', '-'), data['value']))
+ rval.append('--{}={}'.format(key.replace('_', '-'), data['value']))
return rval
diff --git a/roles/lib_openshift/library/oc_obj.py b/roles/lib_openshift/library/oc_obj.py
index 0f56ce983..f5cba696d 100644
--- a/roles/lib_openshift/library/oc_obj.py
+++ b/roles/lib_openshift/library/oc_obj.py
@@ -274,7 +274,8 @@ class Yedit(object):
continue
elif data and not isinstance(data, dict):
- return None
+ raise YeditException("Unexpected item type found while going through key " +
+ "path: {} (at key: {})".format(key, dict_key))
data[dict_key] = {}
data = data[dict_key]
@@ -283,7 +284,7 @@ class Yedit(object):
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
- return None
+ raise YeditException("Unexpected item type found while going through key path: {}".format(key))
if key == '':
data = item
@@ -297,6 +298,12 @@ class Yedit(object):
elif key_indexes[-1][1] and isinstance(data, dict):
data[key_indexes[-1][1]] = item
+ # didn't add/update to an existing list, nor add/update key to a dict
+ # so we must have been provided some syntax like a.b.c[<int>] = "data" for a
+ # non-existent array
+ else:
+ raise YeditException("Error adding to object at path: {}".format(key))
+
return data
@staticmethod
@@ -1022,13 +1029,13 @@ class OpenShiftCLI(object):
if oadm:
cmds.append('adm')
+ cmds.extend(cmd)
+
if self.all_namespaces:
cmds.extend(['--all-namespaces'])
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- cmds.extend(cmd)
-
rval = {}
results = ''
err = None
@@ -1050,9 +1057,9 @@ class OpenShiftCLI(object):
if output_type == 'json':
try:
rval['results'] = json.loads(stdout)
- except ValueError as err:
- if "No JSON object could be decoded" in err.args:
- err = err.args
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ err = verr.args
elif output_type == 'raw':
rval['results'] = stdout
@@ -1290,8 +1297,8 @@ class Utils(object):
elif value != user_def[key]:
if debug:
print('value should be identical')
- print(value)
print(user_def[key])
+ print(value)
return False
# recurse on a dictionary
@@ -1311,8 +1318,8 @@ class Utils(object):
if api_values != user_values:
if debug:
print("keys are not equal in dict")
- print(api_values)
print(user_values)
+ print(api_values)
return False
result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug)
@@ -1358,10 +1365,11 @@ class OpenShiftCLIConfig(object):
def stringify(self):
''' return the options hash as cli params in a string '''
rval = []
- for key, data in self.config_options.items():
+ for key in sorted(self.config_options.keys()):
+ data = self.config_options[key]
if data['include'] \
and (data['value'] or isinstance(data['value'], int)):
- rval.append('--%s=%s' % (key.replace('_', '-'), data['value']))
+ rval.append('--{}={}'.format(key.replace('_', '-'), data['value']))
return rval
diff --git a/roles/lib_openshift/library/oc_objectvalidator.py b/roles/lib_openshift/library/oc_objectvalidator.py
new file mode 100644
index 000000000..4e1e769cf
--- /dev/null
+++ b/roles/lib_openshift/library/oc_objectvalidator.py
@@ -0,0 +1,1423 @@
+#!/usr/bin/env python
+# pylint: disable=missing-docstring
+# flake8: noqa: T001
+# ___ ___ _ _ ___ ___ _ _____ ___ ___
+# / __| __| \| | __| _ \ /_\_ _| __| \
+# | (_ | _|| .` | _|| / / _ \| | | _|| |) |
+# \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____
+# | \ / _ \ | \| |/ _ \_ _| | __| \_ _|_ _|
+# | |) | (_) | | .` | (_) || | | _|| |) | | | |
+# |___/ \___/ |_|\_|\___/ |_| |___|___/___| |_|
+#
+# Copyright 2016 Red Hat, Inc. and/or its affiliates
+# and other contributors as indicated by the @author tags.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# -*- -*- -*- Begin included fragment: lib/import.py -*- -*- -*-
+'''
+ OpenShiftCLI class that wraps the oc commands in a subprocess
+'''
+# pylint: disable=too-many-lines
+
+from __future__ import print_function
+import atexit
+import copy
+import json
+import os
+import re
+import shutil
+import subprocess
+import tempfile
+# pylint: disable=import-error
+try:
+ import ruamel.yaml as yaml
+except ImportError:
+ import yaml
+
+from ansible.module_utils.basic import AnsibleModule
+
+# -*- -*- -*- End included fragment: lib/import.py -*- -*- -*-
+
+# -*- -*- -*- Begin included fragment: doc/objectvalidator -*- -*- -*-
+
+DOCUMENTATION = '''
+---
+module: oc_objectvalidator
+short_description: Validate OpenShift objects
+description:
+ - Validate OpenShift objects
+options:
+ kubeconfig:
+ description:
+ - The path for the kubeconfig file to use for authentication
+ required: false
+ default: /etc/origin/master/admin.kubeconfig
+ aliases: []
+author:
+- "Mo Khan <monis@redhat.com>"
+extends_documentation_fragment: []
+'''
+
+EXAMPLES = '''
+oc_objectvalidator:
+- name: run oc_objectvalidator
+ oc_objectvalidator:
+ register: oc_objectvalidator
+'''
+
+# -*- -*- -*- End included fragment: doc/objectvalidator -*- -*- -*-
+
+# -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
+# pylint: disable=undefined-variable,missing-docstring
+# noqa: E301,E302
+
+
+class YeditException(Exception):
+ ''' Exception class for Yedit '''
+ pass
+
+
+# pylint: disable=too-many-public-methods
+class Yedit(object):
+ ''' Class to modify yaml files '''
+ re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
+ re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
+ com_sep = set(['.', '#', '|', ':'])
+
+ # pylint: disable=too-many-arguments
+ def __init__(self,
+ filename=None,
+ content=None,
+ content_type='yaml',
+ separator='.',
+ backup=False):
+ self.content = content
+ self._separator = separator
+ self.filename = filename
+ self.__yaml_dict = content
+ self.content_type = content_type
+ self.backup = backup
+ self.load(content_type=self.content_type)
+ if self.__yaml_dict is None:
+ self.__yaml_dict = {}
+
+ @property
+ def separator(self):
+ ''' getter method for yaml_dict '''
+ return self._separator
+
+ @separator.setter
+ def separator(self):
+ ''' getter method for yaml_dict '''
+ return self._separator
+
+ @property
+ def yaml_dict(self):
+ ''' getter method for yaml_dict '''
+ return self.__yaml_dict
+
+ @yaml_dict.setter
+ def yaml_dict(self, value):
+ ''' setter method for yaml_dict '''
+ self.__yaml_dict = value
+
+ @staticmethod
+ def parse_key(key, sep='.'):
+ '''parse the key allowing the appropriate separator'''
+ common_separators = list(Yedit.com_sep - set([sep]))
+ return re.findall(Yedit.re_key % ''.join(common_separators), key)
+
+ @staticmethod
+ def valid_key(key, sep='.'):
+ '''validate the incoming key'''
+ common_separators = list(Yedit.com_sep - set([sep]))
+ if not re.match(Yedit.re_valid_key % ''.join(common_separators), key):
+ return False
+
+ return True
+
+ @staticmethod
+ def remove_entry(data, key, sep='.'):
+ ''' remove data at location key '''
+ if key == '' and isinstance(data, dict):
+ data.clear()
+ return True
+ elif key == '' and isinstance(data, list):
+ del data[:]
+ return True
+
+ if not (key and Yedit.valid_key(key, sep)) and \
+ isinstance(data, (list, dict)):
+ return None
+
+ key_indexes = Yedit.parse_key(key, sep)
+ for arr_ind, dict_key in key_indexes[:-1]:
+ if dict_key and isinstance(data, dict):
+ data = data.get(dict_key, None)
+ elif (arr_ind and isinstance(data, list) and
+ int(arr_ind) <= len(data) - 1):
+ data = data[int(arr_ind)]
+ else:
+ return None
+
+ # process last index for remove
+ # expected list entry
+ if key_indexes[-1][0]:
+ if isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
+ del data[int(key_indexes[-1][0])]
+ return True
+
+ # expected dict entry
+ elif key_indexes[-1][1]:
+ if isinstance(data, dict):
+ del data[key_indexes[-1][1]]
+ return True
+
+ @staticmethod
+ def add_entry(data, key, item=None, sep='.'):
+ ''' Get an item from a dictionary with key notation a.b.c
+ d = {'a': {'b': 'c'}}}
+ key = a#b
+ return c
+ '''
+ if key == '':
+ pass
+ elif (not (key and Yedit.valid_key(key, sep)) and
+ isinstance(data, (list, dict))):
+ return None
+
+ key_indexes = Yedit.parse_key(key, sep)
+ for arr_ind, dict_key in key_indexes[:-1]:
+ if dict_key:
+ if isinstance(data, dict) and dict_key in data and data[dict_key]: # noqa: E501
+ data = data[dict_key]
+ continue
+
+ elif data and not isinstance(data, dict):
+ raise YeditException("Unexpected item type found while going through key " +
+ "path: {} (at key: {})".format(key, dict_key))
+
+ data[dict_key] = {}
+ data = data[dict_key]
+
+ elif (arr_ind and isinstance(data, list) and
+ int(arr_ind) <= len(data) - 1):
+ data = data[int(arr_ind)]
+ else:
+ raise YeditException("Unexpected item type found while going through key path: {}".format(key))
+
+ if key == '':
+ data = item
+
+ # process last index for add
+ # expected list entry
+ elif key_indexes[-1][0] and isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
+ data[int(key_indexes[-1][0])] = item
+
+ # expected dict entry
+ elif key_indexes[-1][1] and isinstance(data, dict):
+ data[key_indexes[-1][1]] = item
+
+ # didn't add/update to an existing list, nor add/update key to a dict
+ # so we must have been provided some syntax like a.b.c[<int>] = "data" for a
+ # non-existent array
+ else:
+ raise YeditException("Error adding to object at path: {}".format(key))
+
+ return data
+
+ @staticmethod
+ def get_entry(data, key, sep='.'):
+ ''' Get an item from a dictionary with key notation a.b.c
+ d = {'a': {'b': 'c'}}}
+ key = a.b
+ return c
+ '''
+ if key == '':
+ pass
+ elif (not (key and Yedit.valid_key(key, sep)) and
+ isinstance(data, (list, dict))):
+ return None
+
+ key_indexes = Yedit.parse_key(key, sep)
+ for arr_ind, dict_key in key_indexes:
+ if dict_key and isinstance(data, dict):
+ data = data.get(dict_key, None)
+ elif (arr_ind and isinstance(data, list) and
+ int(arr_ind) <= len(data) - 1):
+ data = data[int(arr_ind)]
+ else:
+ return None
+
+ return data
+
+ @staticmethod
+ def _write(filename, contents):
+ ''' Actually write the file contents to disk. This helps with mocking. '''
+
+ tmp_filename = filename + '.yedit'
+
+ with open(tmp_filename, 'w') as yfd:
+ yfd.write(contents)
+
+ os.rename(tmp_filename, filename)
+
+ def write(self):
+ ''' write to file '''
+ if not self.filename:
+ raise YeditException('Please specify a filename.')
+
+ if self.backup and self.file_exists():
+ shutil.copy(self.filename, self.filename + '.orig')
+
+ # Try to set format attributes if supported
+ try:
+ self.yaml_dict.fa.set_block_style()
+ except AttributeError:
+ pass
+
+ # Try to use RoundTripDumper if supported.
+ try:
+ Yedit._write(self.filename, yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper))
+ except AttributeError:
+ Yedit._write(self.filename, yaml.safe_dump(self.yaml_dict, default_flow_style=False))
+
+ return (True, self.yaml_dict)
+
+ def read(self):
+ ''' read from file '''
+ # check if it exists
+ if self.filename is None or not self.file_exists():
+ return None
+
+ contents = None
+ with open(self.filename) as yfd:
+ contents = yfd.read()
+
+ return contents
+
+ def file_exists(self):
+ ''' return whether file exists '''
+ if os.path.exists(self.filename):
+ return True
+
+ return False
+
+ def load(self, content_type='yaml'):
+ ''' return yaml file '''
+ contents = self.read()
+
+ if not contents and not self.content:
+ return None
+
+ if self.content:
+ if isinstance(self.content, dict):
+ self.yaml_dict = self.content
+ return self.yaml_dict
+ elif isinstance(self.content, str):
+ contents = self.content
+
+ # check if it is yaml
+ try:
+ if content_type == 'yaml' and contents:
+ # Try to set format attributes if supported
+ try:
+ self.yaml_dict.fa.set_block_style()
+ except AttributeError:
+ pass
+
+ # Try to use RoundTripLoader if supported.
+ try:
+ self.yaml_dict = yaml.safe_load(contents, yaml.RoundTripLoader)
+ except AttributeError:
+ self.yaml_dict = yaml.safe_load(contents)
+
+ # Try to set format attributes if supported
+ try:
+ self.yaml_dict.fa.set_block_style()
+ except AttributeError:
+ pass
+
+ elif content_type == 'json' and contents:
+ self.yaml_dict = json.loads(contents)
+ except yaml.YAMLError as err:
+ # Error loading yaml or json
+ raise YeditException('Problem with loading yaml file. %s' % err)
+
+ return self.yaml_dict
+
+ def get(self, key):
+ ''' get a specified key'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, key, self.separator)
+ except KeyError:
+ entry = None
+
+ return entry
+
+ def pop(self, path, key_or_item):
+ ''' remove a key, value pair from a dict or an item for a list'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry is None:
+ return (False, self.yaml_dict)
+
+ if isinstance(entry, dict):
+ # AUDIT:maybe-no-member makes sense due to fuzzy types
+ # pylint: disable=maybe-no-member
+ if key_or_item in entry:
+ entry.pop(key_or_item)
+ return (True, self.yaml_dict)
+ return (False, self.yaml_dict)
+
+ elif isinstance(entry, list):
+ # AUDIT:maybe-no-member makes sense due to fuzzy types
+ # pylint: disable=maybe-no-member
+ ind = None
+ try:
+ ind = entry.index(key_or_item)
+ except ValueError:
+ return (False, self.yaml_dict)
+
+ entry.pop(ind)
+ return (True, self.yaml_dict)
+
+ return (False, self.yaml_dict)
+
+ def delete(self, path):
+ ''' remove path from a dict'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry is None:
+ return (False, self.yaml_dict)
+
+ result = Yedit.remove_entry(self.yaml_dict, path, self.separator)
+ if not result:
+ return (False, self.yaml_dict)
+
+ return (True, self.yaml_dict)
+
+ def exists(self, path, value):
+ ''' check if value exists at path'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if isinstance(entry, list):
+ if value in entry:
+ return True
+ return False
+
+ elif isinstance(entry, dict):
+ if isinstance(value, dict):
+ rval = False
+ for key, val in value.items():
+ if entry[key] != val:
+ rval = False
+ break
+ else:
+ rval = True
+ return rval
+
+ return value in entry
+
+ return entry == value
+
+ def append(self, path, value):
+ '''append value to a list'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry is None:
+ self.put(path, [])
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ if not isinstance(entry, list):
+ return (False, self.yaml_dict)
+
+ # AUDIT:maybe-no-member makes sense due to loading data from
+ # a serialized format.
+ # pylint: disable=maybe-no-member
+ entry.append(value)
+ return (True, self.yaml_dict)
+
+ # pylint: disable=too-many-arguments
+ def update(self, path, value, index=None, curr_value=None):
+ ''' put path, value into a dict '''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if isinstance(entry, dict):
+ # AUDIT:maybe-no-member makes sense due to fuzzy types
+ # pylint: disable=maybe-no-member
+ if not isinstance(value, dict):
+ raise YeditException('Cannot replace key, value entry in ' +
+ 'dict with non-dict type. value=[%s] [%s]' % (value, type(value))) # noqa: E501
+
+ entry.update(value)
+ return (True, self.yaml_dict)
+
+ elif isinstance(entry, list):
+ # AUDIT:maybe-no-member makes sense due to fuzzy types
+ # pylint: disable=maybe-no-member
+ ind = None
+ if curr_value:
+ try:
+ ind = entry.index(curr_value)
+ except ValueError:
+ return (False, self.yaml_dict)
+
+ elif index is not None:
+ ind = index
+
+ if ind is not None and entry[ind] != value:
+ entry[ind] = value
+ return (True, self.yaml_dict)
+
+ # see if it exists in the list
+ try:
+ ind = entry.index(value)
+ except ValueError:
+ # doesn't exist, append it
+ entry.append(value)
+ return (True, self.yaml_dict)
+
+ # already exists, return
+ if ind is not None:
+ return (False, self.yaml_dict)
+ return (False, self.yaml_dict)
+
+ def put(self, path, value):
+ ''' put path, value into a dict '''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry == value:
+ return (False, self.yaml_dict)
+
+ # deepcopy didn't work
+ # Try to use ruamel.yaml and fallback to pyyaml
+ try:
+ tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
+ default_flow_style=False),
+ yaml.RoundTripLoader)
+ except AttributeError:
+ tmp_copy = copy.deepcopy(self.yaml_dict)
+
+ # set the format attributes if available
+ try:
+ tmp_copy.fa.set_block_style()
+ except AttributeError:
+ pass
+
+ result = Yedit.add_entry(tmp_copy, path, value, self.separator)
+ if not result:
+ return (False, self.yaml_dict)
+
+ self.yaml_dict = tmp_copy
+
+ return (True, self.yaml_dict)
+
+ def create(self, path, value):
+ ''' create a yaml file '''
+ if not self.file_exists():
+ # deepcopy didn't work
+ # Try to use ruamel.yaml and fallback to pyyaml
+ try:
+ tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
+ default_flow_style=False),
+ yaml.RoundTripLoader)
+ except AttributeError:
+ tmp_copy = copy.deepcopy(self.yaml_dict)
+
+ # set the format attributes if available
+ try:
+ tmp_copy.fa.set_block_style()
+ except AttributeError:
+ pass
+
+ result = Yedit.add_entry(tmp_copy, path, value, self.separator)
+ if result:
+ self.yaml_dict = tmp_copy
+ return (True, self.yaml_dict)
+
+ return (False, self.yaml_dict)
+
+ @staticmethod
+ def get_curr_value(invalue, val_type):
+ '''return the current value'''
+ if invalue is None:
+ return None
+
+ curr_value = invalue
+ if val_type == 'yaml':
+ curr_value = yaml.load(invalue)
+ elif val_type == 'json':
+ curr_value = json.loads(invalue)
+
+ return curr_value
+
+ @staticmethod
+ def parse_value(inc_value, vtype=''):
+ '''determine value type passed'''
+ true_bools = ['y', 'Y', 'yes', 'Yes', 'YES', 'true', 'True', 'TRUE',
+ 'on', 'On', 'ON', ]
+ false_bools = ['n', 'N', 'no', 'No', 'NO', 'false', 'False', 'FALSE',
+ 'off', 'Off', 'OFF']
+
+ # It came in as a string but you didn't specify value_type as string
+ # we will convert to bool if it matches any of the above cases
+ if isinstance(inc_value, str) and 'bool' in vtype:
+ if inc_value not in true_bools and inc_value not in false_bools:
+ raise YeditException('Not a boolean type. str=[%s] vtype=[%s]'
+ % (inc_value, vtype))
+ elif isinstance(inc_value, bool) and 'str' in vtype:
+ inc_value = str(inc_value)
+
+ # If vtype is not str then go ahead and attempt to yaml load it.
+ if isinstance(inc_value, str) and 'str' not in vtype:
+ try:
+ inc_value = yaml.load(inc_value)
+ except Exception:
+ raise YeditException('Could not determine type of incoming ' +
+ 'value. value=[%s] vtype=[%s]'
+ % (type(inc_value), vtype))
+
+ return inc_value
+
+ # pylint: disable=too-many-return-statements,too-many-branches
+ @staticmethod
+ def run_ansible(module):
+ '''perform the idempotent crud operations'''
+ yamlfile = Yedit(filename=module.params['src'],
+ backup=module.params['backup'],
+ separator=module.params['separator'])
+
+ if module.params['src']:
+ rval = yamlfile.load()
+
+ if yamlfile.yaml_dict is None and \
+ module.params['state'] != 'present':
+ return {'failed': True,
+ 'msg': 'Error opening file [%s]. Verify that the ' +
+ 'file exists, that it is has correct' +
+ ' permissions, and is valid yaml.'}
+
+ if module.params['state'] == 'list':
+ if module.params['content']:
+ content = Yedit.parse_value(module.params['content'],
+ module.params['content_type'])
+ yamlfile.yaml_dict = content
+
+ if module.params['key']:
+ rval = yamlfile.get(module.params['key']) or {}
+
+ return {'changed': False, 'result': rval, 'state': "list"}
+
+ elif module.params['state'] == 'absent':
+ if module.params['content']:
+ content = Yedit.parse_value(module.params['content'],
+ module.params['content_type'])
+ yamlfile.yaml_dict = content
+
+ if module.params['update']:
+ rval = yamlfile.pop(module.params['key'],
+ module.params['value'])
+ else:
+ rval = yamlfile.delete(module.params['key'])
+
+ if rval[0] and module.params['src']:
+ yamlfile.write()
+
+ return {'changed': rval[0], 'result': rval[1], 'state': "absent"}
+
+ elif module.params['state'] == 'present':
+ # check if content is different than what is in the file
+ if module.params['content']:
+ content = Yedit.parse_value(module.params['content'],
+ module.params['content_type'])
+
+ # We had no edits to make and the contents are the same
+ if yamlfile.yaml_dict == content and \
+ module.params['value'] is None:
+ return {'changed': False,
+ 'result': yamlfile.yaml_dict,
+ 'state': "present"}
+
+ yamlfile.yaml_dict = content
+
+ # we were passed a value; parse it
+ if module.params['value']:
+ value = Yedit.parse_value(module.params['value'],
+ module.params['value_type'])
+ key = module.params['key']
+ if module.params['update']:
+ # pylint: disable=line-too-long
+ curr_value = Yedit.get_curr_value(Yedit.parse_value(module.params['curr_value']), # noqa: E501
+ module.params['curr_value_format']) # noqa: E501
+
+ rval = yamlfile.update(key, value, module.params['index'], curr_value) # noqa: E501
+
+ elif module.params['append']:
+ rval = yamlfile.append(key, value)
+ else:
+ rval = yamlfile.put(key, value)
+
+ if rval[0] and module.params['src']:
+ yamlfile.write()
+
+ return {'changed': rval[0],
+ 'result': rval[1], 'state': "present"}
+
+ # no edits to make
+ if module.params['src']:
+ # pylint: disable=redefined-variable-type
+ rval = yamlfile.write()
+ return {'changed': rval[0],
+ 'result': rval[1],
+ 'state': "present"}
+
+ return {'failed': True, 'msg': 'Unkown state passed'}
+
+# -*- -*- -*- End included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
+
+# -*- -*- -*- Begin included fragment: lib/base.py -*- -*- -*-
+# pylint: disable=too-many-lines
+# noqa: E301,E302,E303,T001
+
+
+class OpenShiftCLIError(Exception):
+ '''Exception class for openshiftcli'''
+ pass
+
+
+ADDITIONAL_PATH_LOOKUPS = ['/usr/local/bin', os.path.expanduser('~/bin')]
+
+
+def locate_oc_binary():
+ ''' Find and return oc binary file '''
+ # https://github.com/openshift/openshift-ansible/issues/3410
+ # oc can be in /usr/local/bin in some cases, but that may not
+ # be in $PATH due to ansible/sudo
+ paths = os.environ.get("PATH", os.defpath).split(os.pathsep) + ADDITIONAL_PATH_LOOKUPS
+
+ oc_binary = 'oc'
+
+ # Use shutil.which if it is available, otherwise fallback to a naive path search
+ try:
+ which_result = shutil.which(oc_binary, path=os.pathsep.join(paths))
+ if which_result is not None:
+ oc_binary = which_result
+ except AttributeError:
+ for path in paths:
+ if os.path.exists(os.path.join(path, oc_binary)):
+ oc_binary = os.path.join(path, oc_binary)
+ break
+
+ return oc_binary
+
+
+# pylint: disable=too-few-public-methods
+class OpenShiftCLI(object):
+ ''' Class to wrap the command line tools '''
+ def __init__(self,
+ namespace,
+ kubeconfig='/etc/origin/master/admin.kubeconfig',
+ verbose=False,
+ all_namespaces=False):
+ ''' Constructor for OpenshiftCLI '''
+ self.namespace = namespace
+ self.verbose = verbose
+ self.kubeconfig = Utils.create_tmpfile_copy(kubeconfig)
+ self.all_namespaces = all_namespaces
+ self.oc_binary = locate_oc_binary()
+
+ # Pylint allows only 5 arguments to be passed.
+ # pylint: disable=too-many-arguments
+ def _replace_content(self, resource, rname, content, force=False, sep='.'):
+ ''' replace the current object with the content '''
+ res = self._get(resource, rname)
+ if not res['results']:
+ return res
+
+ fname = Utils.create_tmpfile(rname + '-')
+
+ yed = Yedit(fname, res['results'][0], separator=sep)
+ changes = []
+ for key, value in content.items():
+ changes.append(yed.put(key, value))
+
+ if any([change[0] for change in changes]):
+ yed.write()
+
+ atexit.register(Utils.cleanup, [fname])
+
+ return self._replace(fname, force)
+
+ return {'returncode': 0, 'updated': False}
+
+ def _replace(self, fname, force=False):
+ '''replace the current object with oc replace'''
+ cmd = ['replace', '-f', fname]
+ if force:
+ cmd.append('--force')
+ return self.openshift_cmd(cmd)
+
+ def _create_from_content(self, rname, content):
+ '''create a temporary file and then call oc create on it'''
+ fname = Utils.create_tmpfile(rname + '-')
+ yed = Yedit(fname, content=content)
+ yed.write()
+
+ atexit.register(Utils.cleanup, [fname])
+
+ return self._create(fname)
+
+ def _create(self, fname):
+ '''call oc create on a filename'''
+ return self.openshift_cmd(['create', '-f', fname])
+
+ def _delete(self, resource, rname, selector=None):
+ '''call oc delete on a resource'''
+ cmd = ['delete', resource, rname]
+ if selector:
+ cmd.append('--selector=%s' % selector)
+
+ return self.openshift_cmd(cmd)
+
+ def _process(self, template_name, create=False, params=None, template_data=None): # noqa: E501
+ '''process a template
+
+ template_name: the name of the template to process
+ create: whether to send to oc create after processing
+ params: the parameters for the template
+ template_data: the incoming template's data; instead of a file
+ '''
+ cmd = ['process']
+ if template_data:
+ cmd.extend(['-f', '-'])
+ else:
+ cmd.append(template_name)
+ if params:
+ param_str = ["%s=%s" % (key, value) for key, value in params.items()]
+ cmd.append('-v')
+ cmd.extend(param_str)
+
+ results = self.openshift_cmd(cmd, output=True, input_data=template_data)
+
+ if results['returncode'] != 0 or not create:
+ return results
+
+ fname = Utils.create_tmpfile(template_name + '-')
+ yed = Yedit(fname, results['results'])
+ yed.write()
+
+ atexit.register(Utils.cleanup, [fname])
+
+ return self.openshift_cmd(['create', '-f', fname])
+
+ def _get(self, resource, rname=None, selector=None):
+ '''return a resource by name '''
+ cmd = ['get', resource]
+ if selector:
+ cmd.append('--selector=%s' % selector)
+ elif rname:
+ cmd.append(rname)
+
+ cmd.extend(['-o', 'json'])
+
+ rval = self.openshift_cmd(cmd, output=True)
+
+ # Ensure results are retuned in an array
+ if 'items' in rval:
+ rval['results'] = rval['items']
+ elif not isinstance(rval['results'], list):
+ rval['results'] = [rval['results']]
+
+ return rval
+
+ def _schedulable(self, node=None, selector=None, schedulable=True):
+ ''' perform oadm manage-node scheduable '''
+ cmd = ['manage-node']
+ if node:
+ cmd.extend(node)
+ else:
+ cmd.append('--selector=%s' % selector)
+
+ cmd.append('--schedulable=%s' % schedulable)
+
+ return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
+
+ def _list_pods(self, node=None, selector=None, pod_selector=None):
+ ''' perform oadm list pods
+
+ node: the node in which to list pods
+ selector: the label selector filter if provided
+ pod_selector: the pod selector filter if provided
+ '''
+ cmd = ['manage-node']
+ if node:
+ cmd.extend(node)
+ else:
+ cmd.append('--selector=%s' % selector)
+
+ if pod_selector:
+ cmd.append('--pod-selector=%s' % pod_selector)
+
+ cmd.extend(['--list-pods', '-o', 'json'])
+
+ return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
+
+ # pylint: disable=too-many-arguments
+ def _evacuate(self, node=None, selector=None, pod_selector=None, dry_run=False, grace_period=None, force=False):
+ ''' perform oadm manage-node evacuate '''
+ cmd = ['manage-node']
+ if node:
+ cmd.extend(node)
+ else:
+ cmd.append('--selector=%s' % selector)
+
+ if dry_run:
+ cmd.append('--dry-run')
+
+ if pod_selector:
+ cmd.append('--pod-selector=%s' % pod_selector)
+
+ if grace_period:
+ cmd.append('--grace-period=%s' % int(grace_period))
+
+ if force:
+ cmd.append('--force')
+
+ cmd.append('--evacuate')
+
+ return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
+
+ def _version(self):
+ ''' return the openshift version'''
+ return self.openshift_cmd(['version'], output=True, output_type='raw')
+
+ def _import_image(self, url=None, name=None, tag=None):
+ ''' perform image import '''
+ cmd = ['import-image']
+
+ image = '{0}'.format(name)
+ if tag:
+ image += ':{0}'.format(tag)
+
+ cmd.append(image)
+
+ if url:
+ cmd.append('--from={0}/{1}'.format(url, image))
+
+ cmd.append('-n{0}'.format(self.namespace))
+
+ cmd.append('--confirm')
+ return self.openshift_cmd(cmd)
+
+ def _run(self, cmds, input_data):
+ ''' Actually executes the command. This makes mocking easier. '''
+ curr_env = os.environ.copy()
+ curr_env.update({'KUBECONFIG': self.kubeconfig})
+ proc = subprocess.Popen(cmds,
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ env=curr_env)
+
+ stdout, stderr = proc.communicate(input_data)
+
+ return proc.returncode, stdout.decode(), stderr.decode()
+
+ # pylint: disable=too-many-arguments,too-many-branches
+ def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
+ '''Base command for oc '''
+ cmds = [self.oc_binary]
+
+ if oadm:
+ cmds.append('adm')
+
+ cmds.extend(cmd)
+
+ if self.all_namespaces:
+ cmds.extend(['--all-namespaces'])
+ elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
+ cmds.extend(['-n', self.namespace])
+
+ rval = {}
+ results = ''
+ err = None
+
+ if self.verbose:
+ print(' '.join(cmds))
+
+ try:
+ returncode, stdout, stderr = self._run(cmds, input_data)
+ except OSError as ex:
+ returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
+
+ rval = {"returncode": returncode,
+ "results": results,
+ "cmd": ' '.join(cmds)}
+
+ if returncode == 0:
+ if output:
+ if output_type == 'json':
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ err = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout
+
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if err:
+ rval.update({"err": err,
+ "stderr": stderr,
+ "stdout": stdout,
+ "cmd": cmds})
+
+ else:
+ rval.update({"stderr": stderr,
+ "stdout": stdout,
+ "results": {}})
+
+ return rval
+
+
+class Utils(object):
+ ''' utilities for openshiftcli modules '''
+
+ @staticmethod
+ def _write(filename, contents):
+ ''' Actually write the file contents to disk. This helps with mocking. '''
+
+ with open(filename, 'w') as sfd:
+ sfd.write(contents)
+
+ @staticmethod
+ def create_tmp_file_from_contents(rname, data, ftype='yaml'):
+ ''' create a file in tmp with name and contents'''
+
+ tmp = Utils.create_tmpfile(prefix=rname)
+
+ if ftype == 'yaml':
+ # AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
+ # pylint: disable=no-member
+ if hasattr(yaml, 'RoundTripDumper'):
+ Utils._write(tmp, yaml.dump(data, Dumper=yaml.RoundTripDumper))
+ else:
+ Utils._write(tmp, yaml.safe_dump(data, default_flow_style=False))
+
+ elif ftype == 'json':
+ Utils._write(tmp, json.dumps(data))
+ else:
+ Utils._write(tmp, data)
+
+ # Register cleanup when module is done
+ atexit.register(Utils.cleanup, [tmp])
+ return tmp
+
+ @staticmethod
+ def create_tmpfile_copy(inc_file):
+ '''create a temporary copy of a file'''
+ tmpfile = Utils.create_tmpfile('lib_openshift-')
+ Utils._write(tmpfile, open(inc_file).read())
+
+ # Cleanup the tmpfile
+ atexit.register(Utils.cleanup, [tmpfile])
+
+ return tmpfile
+
+ @staticmethod
+ def create_tmpfile(prefix='tmp'):
+ ''' Generates and returns a temporary file name '''
+
+ with tempfile.NamedTemporaryFile(prefix=prefix, delete=False) as tmp:
+ return tmp.name
+
+ @staticmethod
+ def create_tmp_files_from_contents(content, content_type=None):
+ '''Turn an array of dict: filename, content into a files array'''
+ if not isinstance(content, list):
+ content = [content]
+ files = []
+ for item in content:
+ path = Utils.create_tmp_file_from_contents(item['path'] + '-',
+ item['data'],
+ ftype=content_type)
+ files.append({'name': os.path.basename(item['path']),
+ 'path': path})
+ return files
+
+ @staticmethod
+ def cleanup(files):
+ '''Clean up on exit '''
+ for sfile in files:
+ if os.path.exists(sfile):
+ if os.path.isdir(sfile):
+ shutil.rmtree(sfile)
+ elif os.path.isfile(sfile):
+ os.remove(sfile)
+
+ @staticmethod
+ def exists(results, _name):
+ ''' Check to see if the results include the name '''
+ if not results:
+ return False
+
+ if Utils.find_result(results, _name):
+ return True
+
+ return False
+
+ @staticmethod
+ def find_result(results, _name):
+ ''' Find the specified result by name'''
+ rval = None
+ for result in results:
+ if 'metadata' in result and result['metadata']['name'] == _name:
+ rval = result
+ break
+
+ return rval
+
+ @staticmethod
+ def get_resource_file(sfile, sfile_type='yaml'):
+ ''' return the service file '''
+ contents = None
+ with open(sfile) as sfd:
+ contents = sfd.read()
+
+ if sfile_type == 'yaml':
+ # AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
+ # pylint: disable=no-member
+ if hasattr(yaml, 'RoundTripLoader'):
+ contents = yaml.load(contents, yaml.RoundTripLoader)
+ else:
+ contents = yaml.safe_load(contents)
+ elif sfile_type == 'json':
+ contents = json.loads(contents)
+
+ return contents
+
+ @staticmethod
+ def filter_versions(stdout):
+ ''' filter the oc version output '''
+
+ version_dict = {}
+ version_search = ['oc', 'openshift', 'kubernetes']
+
+ for line in stdout.strip().split('\n'):
+ for term in version_search:
+ if not line:
+ continue
+ if line.startswith(term):
+ version_dict[term] = line.split()[-1]
+
+ # horrible hack to get openshift version in Openshift 3.2
+ # By default "oc version in 3.2 does not return an "openshift" version
+ if "openshift" not in version_dict:
+ version_dict["openshift"] = version_dict["oc"]
+
+ return version_dict
+
+ @staticmethod
+ def add_custom_versions(versions):
+ ''' create custom versions strings '''
+
+ versions_dict = {}
+
+ for tech, version in versions.items():
+ # clean up "-" from version
+ if "-" in version:
+ version = version.split("-")[0]
+
+ if version.startswith('v'):
+ versions_dict[tech + '_numeric'] = version[1:].split('+')[0]
+ # "v3.3.0.33" is what we have, we want "3.3"
+ versions_dict[tech + '_short'] = version[1:4]
+
+ return versions_dict
+
+ @staticmethod
+ def openshift_installed():
+ ''' check if openshift is installed '''
+ import yum
+
+ yum_base = yum.YumBase()
+ if yum_base.rpmdb.searchNevra(name='atomic-openshift'):
+ return True
+
+ return False
+
+ # Disabling too-many-branches. This is a yaml dictionary comparison function
+ # pylint: disable=too-many-branches,too-many-return-statements,too-many-statements
+ @staticmethod
+ def check_def_equal(user_def, result_def, skip_keys=None, debug=False):
+ ''' Given a user defined definition, compare it with the results given back by our query. '''
+
+ # Currently these values are autogenerated and we do not need to check them
+ skip = ['metadata', 'status']
+ if skip_keys:
+ skip.extend(skip_keys)
+
+ for key, value in result_def.items():
+ if key in skip:
+ continue
+
+ # Both are lists
+ if isinstance(value, list):
+ if key not in user_def:
+ if debug:
+ print('User data does not have key [%s]' % key)
+ print('User data: %s' % user_def)
+ return False
+
+ if not isinstance(user_def[key], list):
+ if debug:
+ print('user_def[key] is not a list key=[%s] user_def[key]=%s' % (key, user_def[key]))
+ return False
+
+ if len(user_def[key]) != len(value):
+ if debug:
+ print("List lengths are not equal.")
+ print("key=[%s]: user_def[%s] != value[%s]" % (key, len(user_def[key]), len(value)))
+ print("user_def: %s" % user_def[key])
+ print("value: %s" % value)
+ return False
+
+ for values in zip(user_def[key], value):
+ if isinstance(values[0], dict) and isinstance(values[1], dict):
+ if debug:
+ print('sending list - list')
+ print(type(values[0]))
+ print(type(values[1]))
+ result = Utils.check_def_equal(values[0], values[1], skip_keys=skip_keys, debug=debug)
+ if not result:
+ print('list compare returned false')
+ return False
+
+ elif value != user_def[key]:
+ if debug:
+ print('value should be identical')
+ print(user_def[key])
+ print(value)
+ return False
+
+ # recurse on a dictionary
+ elif isinstance(value, dict):
+ if key not in user_def:
+ if debug:
+ print("user_def does not have key [%s]" % key)
+ return False
+ if not isinstance(user_def[key], dict):
+ if debug:
+ print("dict returned false: not instance of dict")
+ return False
+
+ # before passing ensure keys match
+ api_values = set(value.keys()) - set(skip)
+ user_values = set(user_def[key].keys()) - set(skip)
+ if api_values != user_values:
+ if debug:
+ print("keys are not equal in dict")
+ print(user_values)
+ print(api_values)
+ return False
+
+ result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug)
+ if not result:
+ if debug:
+ print("dict returned false")
+ print(result)
+ return False
+
+ # Verify each key, value pair is the same
+ else:
+ if key not in user_def or value != user_def[key]:
+ if debug:
+ print("value not equal; user_def does not have key")
+ print(key)
+ print(value)
+ if key in user_def:
+ print(user_def[key])
+ return False
+
+ if debug:
+ print('returning true')
+ return True
+
+
+class OpenShiftCLIConfig(object):
+ '''Generic Config'''
+ def __init__(self, rname, namespace, kubeconfig, options):
+ self.kubeconfig = kubeconfig
+ self.name = rname
+ self.namespace = namespace
+ self._options = options
+
+ @property
+ def config_options(self):
+ ''' return config options '''
+ return self._options
+
+ def to_option_list(self):
+ '''return all options as a string'''
+ return self.stringify()
+
+ def stringify(self):
+ ''' return the options hash as cli params in a string '''
+ rval = []
+ for key in sorted(self.config_options.keys()):
+ data = self.config_options[key]
+ if data['include'] \
+ and (data['value'] or isinstance(data['value'], int)):
+ rval.append('--{}={}'.format(key.replace('_', '-'), data['value']))
+
+ return rval
+
+
+# -*- -*- -*- End included fragment: lib/base.py -*- -*- -*-
+
+# -*- -*- -*- Begin included fragment: class/oc_objectvalidator.py -*- -*- -*-
+
+# pylint: disable=too-many-instance-attributes
+class OCObjectValidator(OpenShiftCLI):
+ ''' Class to wrap the oc command line tools '''
+
+ def __init__(self, kubeconfig):
+ ''' Constructor for OCObjectValidator '''
+ # namespace has no meaning for object validation, hardcode to 'default'
+ super(OCObjectValidator, self).__init__('default', kubeconfig)
+
+ def get_invalid(self, kind, invalid_filter):
+ ''' return invalid object information '''
+
+ rval = self._get(kind)
+ if rval['returncode'] != 0:
+ return False, rval, []
+
+ return True, rval, list(filter(invalid_filter, rval['results'][0]['items'])) # wrap filter with list for py3
+
+ # pylint: disable=too-many-return-statements
+ @staticmethod
+ def run_ansible(params):
+ ''' run the idempotent ansible code
+
+ params comes from the ansible portion of this module
+ '''
+
+ objectvalidator = OCObjectValidator(params['kubeconfig'])
+ all_invalid = {}
+ failed = False
+
+ def _is_invalid_namespace(namespace):
+ # check if it uses a reserved name
+ name = namespace['metadata']['name']
+ if not any((name == 'kube',
+ name == 'openshift',
+ name.startswith('kube-'),
+ name.startswith('openshift-'),)):
+ return False
+
+ # determine if the namespace was created by a user
+ if 'annotations' not in namespace['metadata']:
+ return False
+ return 'openshift.io/requester' in namespace['metadata']['annotations']
+
+ checks = (
+ (
+ 'hostsubnet',
+ lambda x: x['metadata']['name'] != x['host'],
+ u'hostsubnets where metadata.name != host',
+ ),
+ (
+ 'netnamespace',
+ lambda x: x['metadata']['name'] != x['netname'],
+ u'netnamespaces where metadata.name != netname',
+ ),
+ (
+ 'namespace',
+ _is_invalid_namespace,
+ u'namespaces that use reserved names and were not created by infrastructure components',
+ ),
+ )
+
+ for resource, invalid_filter, invalid_msg in checks:
+ success, rval, invalid = objectvalidator.get_invalid(resource, invalid_filter)
+ if not success:
+ return {'failed': True, 'msg': 'Failed to GET {}.'.format(resource), 'state': 'list', 'results': rval}
+ if invalid:
+ failed = True
+ all_invalid[invalid_msg] = invalid
+
+ if failed:
+ return {
+ 'failed': True,
+ 'msg': (
+ "All objects are not valid. If you are a supported customer please contact "
+ "Red Hat Support providing the complete output above. If you are not a customer "
+ "please contact users@lists.openshift.redhat.com for assistance."
+ ),
+ 'state': 'list',
+ 'results': all_invalid
+ }
+
+ return {'msg': 'All objects are valid.'}
+
+# -*- -*- -*- End included fragment: class/oc_objectvalidator.py -*- -*- -*-
+
+# -*- -*- -*- Begin included fragment: ansible/oc_objectvalidator.py -*- -*- -*-
+
+def main():
+ '''
+ ansible oc module for validating OpenShift objects
+ '''
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
+ ),
+ supports_check_mode=False,
+ )
+
+
+ rval = OCObjectValidator.run_ansible(module.params)
+ if 'failed' in rval:
+ module.fail_json(**rval)
+
+ module.exit_json(**rval)
+
+if __name__ == '__main__':
+ main()
+
+# -*- -*- -*- End included fragment: ansible/oc_objectvalidator.py -*- -*- -*-
diff --git a/roles/lib_openshift/library/oc_process.py b/roles/lib_openshift/library/oc_process.py
index 4f53bb5d6..cabb2ff29 100644
--- a/roles/lib_openshift/library/oc_process.py
+++ b/roles/lib_openshift/library/oc_process.py
@@ -263,7 +263,8 @@ class Yedit(object):
continue
elif data and not isinstance(data, dict):
- return None
+ raise YeditException("Unexpected item type found while going through key " +
+ "path: {} (at key: {})".format(key, dict_key))
data[dict_key] = {}
data = data[dict_key]
@@ -272,7 +273,7 @@ class Yedit(object):
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
- return None
+ raise YeditException("Unexpected item type found while going through key path: {}".format(key))
if key == '':
data = item
@@ -286,6 +287,12 @@ class Yedit(object):
elif key_indexes[-1][1] and isinstance(data, dict):
data[key_indexes[-1][1]] = item
+ # didn't add/update to an existing list, nor add/update key to a dict
+ # so we must have been provided some syntax like a.b.c[<int>] = "data" for a
+ # non-existent array
+ else:
+ raise YeditException("Error adding to object at path: {}".format(key))
+
return data
@staticmethod
@@ -1011,13 +1018,13 @@ class OpenShiftCLI(object):
if oadm:
cmds.append('adm')
+ cmds.extend(cmd)
+
if self.all_namespaces:
cmds.extend(['--all-namespaces'])
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- cmds.extend(cmd)
-
rval = {}
results = ''
err = None
@@ -1039,9 +1046,9 @@ class OpenShiftCLI(object):
if output_type == 'json':
try:
rval['results'] = json.loads(stdout)
- except ValueError as err:
- if "No JSON object could be decoded" in err.args:
- err = err.args
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ err = verr.args
elif output_type == 'raw':
rval['results'] = stdout
@@ -1279,8 +1286,8 @@ class Utils(object):
elif value != user_def[key]:
if debug:
print('value should be identical')
- print(value)
print(user_def[key])
+ print(value)
return False
# recurse on a dictionary
@@ -1300,8 +1307,8 @@ class Utils(object):
if api_values != user_values:
if debug:
print("keys are not equal in dict")
- print(api_values)
print(user_values)
+ print(api_values)
return False
result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug)
@@ -1347,10 +1354,11 @@ class OpenShiftCLIConfig(object):
def stringify(self):
''' return the options hash as cli params in a string '''
rval = []
- for key, data in self.config_options.items():
+ for key in sorted(self.config_options.keys()):
+ data = self.config_options[key]
if data['include'] \
and (data['value'] or isinstance(data['value'], int)):
- rval.append('--%s=%s' % (key.replace('_', '-'), data['value']))
+ rval.append('--{}={}'.format(key.replace('_', '-'), data['value']))
return rval
diff --git a/roles/lib_openshift/library/oc_project.py b/roles/lib_openshift/library/oc_project.py
new file mode 100644
index 000000000..0d0094c45
--- /dev/null
+++ b/roles/lib_openshift/library/oc_project.py
@@ -0,0 +1,1676 @@
+#!/usr/bin/env python
+# pylint: disable=missing-docstring
+# flake8: noqa: T001
+# ___ ___ _ _ ___ ___ _ _____ ___ ___
+# / __| __| \| | __| _ \ /_\_ _| __| \
+# | (_ | _|| .` | _|| / / _ \| | | _|| |) |
+# \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____
+# | \ / _ \ | \| |/ _ \_ _| | __| \_ _|_ _|
+# | |) | (_) | | .` | (_) || | | _|| |) | | | |
+# |___/ \___/ |_|\_|\___/ |_| |___|___/___| |_|
+#
+# Copyright 2016 Red Hat, Inc. and/or its affiliates
+# and other contributors as indicated by the @author tags.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# -*- -*- -*- Begin included fragment: lib/import.py -*- -*- -*-
+'''
+ OpenShiftCLI class that wraps the oc commands in a subprocess
+'''
+# pylint: disable=too-many-lines
+
+from __future__ import print_function
+import atexit
+import copy
+import json
+import os
+import re
+import shutil
+import subprocess
+import tempfile
+# pylint: disable=import-error
+try:
+ import ruamel.yaml as yaml
+except ImportError:
+ import yaml
+
+from ansible.module_utils.basic import AnsibleModule
+
+# -*- -*- -*- End included fragment: lib/import.py -*- -*- -*-
+
+# -*- -*- -*- Begin included fragment: doc/project -*- -*- -*-
+
+DOCUMENTATION = '''
+---
+module: oc_project
+short_description: Module to manage openshift projects
+description:
+ - Manage openshift projects programmatically.
+options:
+ state:
+ description:
+ - If present, the project will be created if it doesn't exist or update if different. If absent, the project will be removed if present. If list, information about the project will be gathered and returned as part of the Ansible call results.
+ required: false
+ default: present
+ choices: ["present", "absent", "list"]
+ aliases: []
+ kubeconfig:
+ description:
+ - The path for the kubeconfig file to use for authentication
+ required: false
+ default: /etc/origin/master/admin.kubeconfig
+ aliases: []
+ debug:
+ description:
+ - Turn on debug output.
+ required: false
+ default: False
+ aliases: []
+ name:
+ description:
+ - Name of the object that is being queried.
+ required: false
+ default: None
+ aliases: []
+ display_name:
+ description:
+ - The display name attribute for a project
+ required: false
+ default: None
+ aliases: []
+ description:
+ description:
+ - The description attribute for a project
+ required: false
+ default: None
+ aliases: []
+ admin:
+ description:
+ - The project admin username
+ required: false
+ default: false
+ aliases: []
+ admin_role:
+ description:
+ - The project admin username
+ required: false
+ default: 'admin'
+ aliases: []
+ node_selector:
+ description:
+ - The node selector for this project.
+ - This allows certain pods in this project to run on certain nodes.
+ required: false
+ default: None
+ aliases: []
+author:
+- "Kenny Woodson <kwoodson@redhat.com>"
+extends_documentation_fragment: []
+'''
+
+EXAMPLES = '''
+- name: create secret
+ oc_project:
+ state: present
+ name: openshift-ops
+ display_name: operations team project
+ node_selector:
+ - top=secret
+ - noncustomer=True
+'''
+
+# -*- -*- -*- End included fragment: doc/project -*- -*- -*-
+
+# -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
+# pylint: disable=undefined-variable,missing-docstring
+# noqa: E301,E302
+
+
+class YeditException(Exception):
+ ''' Exception class for Yedit '''
+ pass
+
+
+# pylint: disable=too-many-public-methods
+class Yedit(object):
+ ''' Class to modify yaml files '''
+ re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
+ re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
+ com_sep = set(['.', '#', '|', ':'])
+
+ # pylint: disable=too-many-arguments
+ def __init__(self,
+ filename=None,
+ content=None,
+ content_type='yaml',
+ separator='.',
+ backup=False):
+ self.content = content
+ self._separator = separator
+ self.filename = filename
+ self.__yaml_dict = content
+ self.content_type = content_type
+ self.backup = backup
+ self.load(content_type=self.content_type)
+ if self.__yaml_dict is None:
+ self.__yaml_dict = {}
+
+ @property
+ def separator(self):
+ ''' getter method for yaml_dict '''
+ return self._separator
+
+ @separator.setter
+ def separator(self):
+ ''' getter method for yaml_dict '''
+ return self._separator
+
+ @property
+ def yaml_dict(self):
+ ''' getter method for yaml_dict '''
+ return self.__yaml_dict
+
+ @yaml_dict.setter
+ def yaml_dict(self, value):
+ ''' setter method for yaml_dict '''
+ self.__yaml_dict = value
+
+ @staticmethod
+ def parse_key(key, sep='.'):
+ '''parse the key allowing the appropriate separator'''
+ common_separators = list(Yedit.com_sep - set([sep]))
+ return re.findall(Yedit.re_key % ''.join(common_separators), key)
+
+ @staticmethod
+ def valid_key(key, sep='.'):
+ '''validate the incoming key'''
+ common_separators = list(Yedit.com_sep - set([sep]))
+ if not re.match(Yedit.re_valid_key % ''.join(common_separators), key):
+ return False
+
+ return True
+
+ @staticmethod
+ def remove_entry(data, key, sep='.'):
+ ''' remove data at location key '''
+ if key == '' and isinstance(data, dict):
+ data.clear()
+ return True
+ elif key == '' and isinstance(data, list):
+ del data[:]
+ return True
+
+ if not (key and Yedit.valid_key(key, sep)) and \
+ isinstance(data, (list, dict)):
+ return None
+
+ key_indexes = Yedit.parse_key(key, sep)
+ for arr_ind, dict_key in key_indexes[:-1]:
+ if dict_key and isinstance(data, dict):
+ data = data.get(dict_key, None)
+ elif (arr_ind and isinstance(data, list) and
+ int(arr_ind) <= len(data) - 1):
+ data = data[int(arr_ind)]
+ else:
+ return None
+
+ # process last index for remove
+ # expected list entry
+ if key_indexes[-1][0]:
+ if isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
+ del data[int(key_indexes[-1][0])]
+ return True
+
+ # expected dict entry
+ elif key_indexes[-1][1]:
+ if isinstance(data, dict):
+ del data[key_indexes[-1][1]]
+ return True
+
+ @staticmethod
+ def add_entry(data, key, item=None, sep='.'):
+ ''' Get an item from a dictionary with key notation a.b.c
+ d = {'a': {'b': 'c'}}}
+ key = a#b
+ return c
+ '''
+ if key == '':
+ pass
+ elif (not (key and Yedit.valid_key(key, sep)) and
+ isinstance(data, (list, dict))):
+ return None
+
+ key_indexes = Yedit.parse_key(key, sep)
+ for arr_ind, dict_key in key_indexes[:-1]:
+ if dict_key:
+ if isinstance(data, dict) and dict_key in data and data[dict_key]: # noqa: E501
+ data = data[dict_key]
+ continue
+
+ elif data and not isinstance(data, dict):
+ raise YeditException("Unexpected item type found while going through key " +
+ "path: {} (at key: {})".format(key, dict_key))
+
+ data[dict_key] = {}
+ data = data[dict_key]
+
+ elif (arr_ind and isinstance(data, list) and
+ int(arr_ind) <= len(data) - 1):
+ data = data[int(arr_ind)]
+ else:
+ raise YeditException("Unexpected item type found while going through key path: {}".format(key))
+
+ if key == '':
+ data = item
+
+ # process last index for add
+ # expected list entry
+ elif key_indexes[-1][0] and isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
+ data[int(key_indexes[-1][0])] = item
+
+ # expected dict entry
+ elif key_indexes[-1][1] and isinstance(data, dict):
+ data[key_indexes[-1][1]] = item
+
+ # didn't add/update to an existing list, nor add/update key to a dict
+ # so we must have been provided some syntax like a.b.c[<int>] = "data" for a
+ # non-existent array
+ else:
+ raise YeditException("Error adding to object at path: {}".format(key))
+
+ return data
+
+ @staticmethod
+ def get_entry(data, key, sep='.'):
+ ''' Get an item from a dictionary with key notation a.b.c
+ d = {'a': {'b': 'c'}}}
+ key = a.b
+ return c
+ '''
+ if key == '':
+ pass
+ elif (not (key and Yedit.valid_key(key, sep)) and
+ isinstance(data, (list, dict))):
+ return None
+
+ key_indexes = Yedit.parse_key(key, sep)
+ for arr_ind, dict_key in key_indexes:
+ if dict_key and isinstance(data, dict):
+ data = data.get(dict_key, None)
+ elif (arr_ind and isinstance(data, list) and
+ int(arr_ind) <= len(data) - 1):
+ data = data[int(arr_ind)]
+ else:
+ return None
+
+ return data
+
+ @staticmethod
+ def _write(filename, contents):
+ ''' Actually write the file contents to disk. This helps with mocking. '''
+
+ tmp_filename = filename + '.yedit'
+
+ with open(tmp_filename, 'w') as yfd:
+ yfd.write(contents)
+
+ os.rename(tmp_filename, filename)
+
+ def write(self):
+ ''' write to file '''
+ if not self.filename:
+ raise YeditException('Please specify a filename.')
+
+ if self.backup and self.file_exists():
+ shutil.copy(self.filename, self.filename + '.orig')
+
+ # Try to set format attributes if supported
+ try:
+ self.yaml_dict.fa.set_block_style()
+ except AttributeError:
+ pass
+
+ # Try to use RoundTripDumper if supported.
+ try:
+ Yedit._write(self.filename, yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper))
+ except AttributeError:
+ Yedit._write(self.filename, yaml.safe_dump(self.yaml_dict, default_flow_style=False))
+
+ return (True, self.yaml_dict)
+
+ def read(self):
+ ''' read from file '''
+ # check if it exists
+ if self.filename is None or not self.file_exists():
+ return None
+
+ contents = None
+ with open(self.filename) as yfd:
+ contents = yfd.read()
+
+ return contents
+
+ def file_exists(self):
+ ''' return whether file exists '''
+ if os.path.exists(self.filename):
+ return True
+
+ return False
+
+ def load(self, content_type='yaml'):
+ ''' return yaml file '''
+ contents = self.read()
+
+ if not contents and not self.content:
+ return None
+
+ if self.content:
+ if isinstance(self.content, dict):
+ self.yaml_dict = self.content
+ return self.yaml_dict
+ elif isinstance(self.content, str):
+ contents = self.content
+
+ # check if it is yaml
+ try:
+ if content_type == 'yaml' and contents:
+ # Try to set format attributes if supported
+ try:
+ self.yaml_dict.fa.set_block_style()
+ except AttributeError:
+ pass
+
+ # Try to use RoundTripLoader if supported.
+ try:
+ self.yaml_dict = yaml.safe_load(contents, yaml.RoundTripLoader)
+ except AttributeError:
+ self.yaml_dict = yaml.safe_load(contents)
+
+ # Try to set format attributes if supported
+ try:
+ self.yaml_dict.fa.set_block_style()
+ except AttributeError:
+ pass
+
+ elif content_type == 'json' and contents:
+ self.yaml_dict = json.loads(contents)
+ except yaml.YAMLError as err:
+ # Error loading yaml or json
+ raise YeditException('Problem with loading yaml file. %s' % err)
+
+ return self.yaml_dict
+
+ def get(self, key):
+ ''' get a specified key'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, key, self.separator)
+ except KeyError:
+ entry = None
+
+ return entry
+
+ def pop(self, path, key_or_item):
+ ''' remove a key, value pair from a dict or an item for a list'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry is None:
+ return (False, self.yaml_dict)
+
+ if isinstance(entry, dict):
+ # AUDIT:maybe-no-member makes sense due to fuzzy types
+ # pylint: disable=maybe-no-member
+ if key_or_item in entry:
+ entry.pop(key_or_item)
+ return (True, self.yaml_dict)
+ return (False, self.yaml_dict)
+
+ elif isinstance(entry, list):
+ # AUDIT:maybe-no-member makes sense due to fuzzy types
+ # pylint: disable=maybe-no-member
+ ind = None
+ try:
+ ind = entry.index(key_or_item)
+ except ValueError:
+ return (False, self.yaml_dict)
+
+ entry.pop(ind)
+ return (True, self.yaml_dict)
+
+ return (False, self.yaml_dict)
+
+ def delete(self, path):
+ ''' remove path from a dict'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry is None:
+ return (False, self.yaml_dict)
+
+ result = Yedit.remove_entry(self.yaml_dict, path, self.separator)
+ if not result:
+ return (False, self.yaml_dict)
+
+ return (True, self.yaml_dict)
+
+ def exists(self, path, value):
+ ''' check if value exists at path'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if isinstance(entry, list):
+ if value in entry:
+ return True
+ return False
+
+ elif isinstance(entry, dict):
+ if isinstance(value, dict):
+ rval = False
+ for key, val in value.items():
+ if entry[key] != val:
+ rval = False
+ break
+ else:
+ rval = True
+ return rval
+
+ return value in entry
+
+ return entry == value
+
+ def append(self, path, value):
+ '''append value to a list'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry is None:
+ self.put(path, [])
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ if not isinstance(entry, list):
+ return (False, self.yaml_dict)
+
+ # AUDIT:maybe-no-member makes sense due to loading data from
+ # a serialized format.
+ # pylint: disable=maybe-no-member
+ entry.append(value)
+ return (True, self.yaml_dict)
+
+ # pylint: disable=too-many-arguments
+ def update(self, path, value, index=None, curr_value=None):
+ ''' put path, value into a dict '''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if isinstance(entry, dict):
+ # AUDIT:maybe-no-member makes sense due to fuzzy types
+ # pylint: disable=maybe-no-member
+ if not isinstance(value, dict):
+ raise YeditException('Cannot replace key, value entry in ' +
+ 'dict with non-dict type. value=[%s] [%s]' % (value, type(value))) # noqa: E501
+
+ entry.update(value)
+ return (True, self.yaml_dict)
+
+ elif isinstance(entry, list):
+ # AUDIT:maybe-no-member makes sense due to fuzzy types
+ # pylint: disable=maybe-no-member
+ ind = None
+ if curr_value:
+ try:
+ ind = entry.index(curr_value)
+ except ValueError:
+ return (False, self.yaml_dict)
+
+ elif index is not None:
+ ind = index
+
+ if ind is not None and entry[ind] != value:
+ entry[ind] = value
+ return (True, self.yaml_dict)
+
+ # see if it exists in the list
+ try:
+ ind = entry.index(value)
+ except ValueError:
+ # doesn't exist, append it
+ entry.append(value)
+ return (True, self.yaml_dict)
+
+ # already exists, return
+ if ind is not None:
+ return (False, self.yaml_dict)
+ return (False, self.yaml_dict)
+
+ def put(self, path, value):
+ ''' put path, value into a dict '''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry == value:
+ return (False, self.yaml_dict)
+
+ # deepcopy didn't work
+ # Try to use ruamel.yaml and fallback to pyyaml
+ try:
+ tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
+ default_flow_style=False),
+ yaml.RoundTripLoader)
+ except AttributeError:
+ tmp_copy = copy.deepcopy(self.yaml_dict)
+
+ # set the format attributes if available
+ try:
+ tmp_copy.fa.set_block_style()
+ except AttributeError:
+ pass
+
+ result = Yedit.add_entry(tmp_copy, path, value, self.separator)
+ if not result:
+ return (False, self.yaml_dict)
+
+ self.yaml_dict = tmp_copy
+
+ return (True, self.yaml_dict)
+
+ def create(self, path, value):
+ ''' create a yaml file '''
+ if not self.file_exists():
+ # deepcopy didn't work
+ # Try to use ruamel.yaml and fallback to pyyaml
+ try:
+ tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
+ default_flow_style=False),
+ yaml.RoundTripLoader)
+ except AttributeError:
+ tmp_copy = copy.deepcopy(self.yaml_dict)
+
+ # set the format attributes if available
+ try:
+ tmp_copy.fa.set_block_style()
+ except AttributeError:
+ pass
+
+ result = Yedit.add_entry(tmp_copy, path, value, self.separator)
+ if result:
+ self.yaml_dict = tmp_copy
+ return (True, self.yaml_dict)
+
+ return (False, self.yaml_dict)
+
+ @staticmethod
+ def get_curr_value(invalue, val_type):
+ '''return the current value'''
+ if invalue is None:
+ return None
+
+ curr_value = invalue
+ if val_type == 'yaml':
+ curr_value = yaml.load(invalue)
+ elif val_type == 'json':
+ curr_value = json.loads(invalue)
+
+ return curr_value
+
+ @staticmethod
+ def parse_value(inc_value, vtype=''):
+ '''determine value type passed'''
+ true_bools = ['y', 'Y', 'yes', 'Yes', 'YES', 'true', 'True', 'TRUE',
+ 'on', 'On', 'ON', ]
+ false_bools = ['n', 'N', 'no', 'No', 'NO', 'false', 'False', 'FALSE',
+ 'off', 'Off', 'OFF']
+
+ # It came in as a string but you didn't specify value_type as string
+ # we will convert to bool if it matches any of the above cases
+ if isinstance(inc_value, str) and 'bool' in vtype:
+ if inc_value not in true_bools and inc_value not in false_bools:
+ raise YeditException('Not a boolean type. str=[%s] vtype=[%s]'
+ % (inc_value, vtype))
+ elif isinstance(inc_value, bool) and 'str' in vtype:
+ inc_value = str(inc_value)
+
+ # If vtype is not str then go ahead and attempt to yaml load it.
+ if isinstance(inc_value, str) and 'str' not in vtype:
+ try:
+ inc_value = yaml.load(inc_value)
+ except Exception:
+ raise YeditException('Could not determine type of incoming ' +
+ 'value. value=[%s] vtype=[%s]'
+ % (type(inc_value), vtype))
+
+ return inc_value
+
+ # pylint: disable=too-many-return-statements,too-many-branches
+ @staticmethod
+ def run_ansible(module):
+ '''perform the idempotent crud operations'''
+ yamlfile = Yedit(filename=module.params['src'],
+ backup=module.params['backup'],
+ separator=module.params['separator'])
+
+ if module.params['src']:
+ rval = yamlfile.load()
+
+ if yamlfile.yaml_dict is None and \
+ module.params['state'] != 'present':
+ return {'failed': True,
+ 'msg': 'Error opening file [%s]. Verify that the ' +
+ 'file exists, that it is has correct' +
+ ' permissions, and is valid yaml.'}
+
+ if module.params['state'] == 'list':
+ if module.params['content']:
+ content = Yedit.parse_value(module.params['content'],
+ module.params['content_type'])
+ yamlfile.yaml_dict = content
+
+ if module.params['key']:
+ rval = yamlfile.get(module.params['key']) or {}
+
+ return {'changed': False, 'result': rval, 'state': "list"}
+
+ elif module.params['state'] == 'absent':
+ if module.params['content']:
+ content = Yedit.parse_value(module.params['content'],
+ module.params['content_type'])
+ yamlfile.yaml_dict = content
+
+ if module.params['update']:
+ rval = yamlfile.pop(module.params['key'],
+ module.params['value'])
+ else:
+ rval = yamlfile.delete(module.params['key'])
+
+ if rval[0] and module.params['src']:
+ yamlfile.write()
+
+ return {'changed': rval[0], 'result': rval[1], 'state': "absent"}
+
+ elif module.params['state'] == 'present':
+ # check if content is different than what is in the file
+ if module.params['content']:
+ content = Yedit.parse_value(module.params['content'],
+ module.params['content_type'])
+
+ # We had no edits to make and the contents are the same
+ if yamlfile.yaml_dict == content and \
+ module.params['value'] is None:
+ return {'changed': False,
+ 'result': yamlfile.yaml_dict,
+ 'state': "present"}
+
+ yamlfile.yaml_dict = content
+
+ # we were passed a value; parse it
+ if module.params['value']:
+ value = Yedit.parse_value(module.params['value'],
+ module.params['value_type'])
+ key = module.params['key']
+ if module.params['update']:
+ # pylint: disable=line-too-long
+ curr_value = Yedit.get_curr_value(Yedit.parse_value(module.params['curr_value']), # noqa: E501
+ module.params['curr_value_format']) # noqa: E501
+
+ rval = yamlfile.update(key, value, module.params['index'], curr_value) # noqa: E501
+
+ elif module.params['append']:
+ rval = yamlfile.append(key, value)
+ else:
+ rval = yamlfile.put(key, value)
+
+ if rval[0] and module.params['src']:
+ yamlfile.write()
+
+ return {'changed': rval[0],
+ 'result': rval[1], 'state': "present"}
+
+ # no edits to make
+ if module.params['src']:
+ # pylint: disable=redefined-variable-type
+ rval = yamlfile.write()
+ return {'changed': rval[0],
+ 'result': rval[1],
+ 'state': "present"}
+
+ return {'failed': True, 'msg': 'Unkown state passed'}
+
+# -*- -*- -*- End included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
+
+# -*- -*- -*- Begin included fragment: lib/base.py -*- -*- -*-
+# pylint: disable=too-many-lines
+# noqa: E301,E302,E303,T001
+
+
+class OpenShiftCLIError(Exception):
+ '''Exception class for openshiftcli'''
+ pass
+
+
+ADDITIONAL_PATH_LOOKUPS = ['/usr/local/bin', os.path.expanduser('~/bin')]
+
+
+def locate_oc_binary():
+ ''' Find and return oc binary file '''
+ # https://github.com/openshift/openshift-ansible/issues/3410
+ # oc can be in /usr/local/bin in some cases, but that may not
+ # be in $PATH due to ansible/sudo
+ paths = os.environ.get("PATH", os.defpath).split(os.pathsep) + ADDITIONAL_PATH_LOOKUPS
+
+ oc_binary = 'oc'
+
+ # Use shutil.which if it is available, otherwise fallback to a naive path search
+ try:
+ which_result = shutil.which(oc_binary, path=os.pathsep.join(paths))
+ if which_result is not None:
+ oc_binary = which_result
+ except AttributeError:
+ for path in paths:
+ if os.path.exists(os.path.join(path, oc_binary)):
+ oc_binary = os.path.join(path, oc_binary)
+ break
+
+ return oc_binary
+
+
+# pylint: disable=too-few-public-methods
+class OpenShiftCLI(object):
+ ''' Class to wrap the command line tools '''
+ def __init__(self,
+ namespace,
+ kubeconfig='/etc/origin/master/admin.kubeconfig',
+ verbose=False,
+ all_namespaces=False):
+ ''' Constructor for OpenshiftCLI '''
+ self.namespace = namespace
+ self.verbose = verbose
+ self.kubeconfig = Utils.create_tmpfile_copy(kubeconfig)
+ self.all_namespaces = all_namespaces
+ self.oc_binary = locate_oc_binary()
+
+ # Pylint allows only 5 arguments to be passed.
+ # pylint: disable=too-many-arguments
+ def _replace_content(self, resource, rname, content, force=False, sep='.'):
+ ''' replace the current object with the content '''
+ res = self._get(resource, rname)
+ if not res['results']:
+ return res
+
+ fname = Utils.create_tmpfile(rname + '-')
+
+ yed = Yedit(fname, res['results'][0], separator=sep)
+ changes = []
+ for key, value in content.items():
+ changes.append(yed.put(key, value))
+
+ if any([change[0] for change in changes]):
+ yed.write()
+
+ atexit.register(Utils.cleanup, [fname])
+
+ return self._replace(fname, force)
+
+ return {'returncode': 0, 'updated': False}
+
+ def _replace(self, fname, force=False):
+ '''replace the current object with oc replace'''
+ cmd = ['replace', '-f', fname]
+ if force:
+ cmd.append('--force')
+ return self.openshift_cmd(cmd)
+
+ def _create_from_content(self, rname, content):
+ '''create a temporary file and then call oc create on it'''
+ fname = Utils.create_tmpfile(rname + '-')
+ yed = Yedit(fname, content=content)
+ yed.write()
+
+ atexit.register(Utils.cleanup, [fname])
+
+ return self._create(fname)
+
+ def _create(self, fname):
+ '''call oc create on a filename'''
+ return self.openshift_cmd(['create', '-f', fname])
+
+ def _delete(self, resource, rname, selector=None):
+ '''call oc delete on a resource'''
+ cmd = ['delete', resource, rname]
+ if selector:
+ cmd.append('--selector=%s' % selector)
+
+ return self.openshift_cmd(cmd)
+
+ def _process(self, template_name, create=False, params=None, template_data=None): # noqa: E501
+ '''process a template
+
+ template_name: the name of the template to process
+ create: whether to send to oc create after processing
+ params: the parameters for the template
+ template_data: the incoming template's data; instead of a file
+ '''
+ cmd = ['process']
+ if template_data:
+ cmd.extend(['-f', '-'])
+ else:
+ cmd.append(template_name)
+ if params:
+ param_str = ["%s=%s" % (key, value) for key, value in params.items()]
+ cmd.append('-v')
+ cmd.extend(param_str)
+
+ results = self.openshift_cmd(cmd, output=True, input_data=template_data)
+
+ if results['returncode'] != 0 or not create:
+ return results
+
+ fname = Utils.create_tmpfile(template_name + '-')
+ yed = Yedit(fname, results['results'])
+ yed.write()
+
+ atexit.register(Utils.cleanup, [fname])
+
+ return self.openshift_cmd(['create', '-f', fname])
+
+ def _get(self, resource, rname=None, selector=None):
+ '''return a resource by name '''
+ cmd = ['get', resource]
+ if selector:
+ cmd.append('--selector=%s' % selector)
+ elif rname:
+ cmd.append(rname)
+
+ cmd.extend(['-o', 'json'])
+
+ rval = self.openshift_cmd(cmd, output=True)
+
+ # Ensure results are retuned in an array
+ if 'items' in rval:
+ rval['results'] = rval['items']
+ elif not isinstance(rval['results'], list):
+ rval['results'] = [rval['results']]
+
+ return rval
+
+ def _schedulable(self, node=None, selector=None, schedulable=True):
+ ''' perform oadm manage-node scheduable '''
+ cmd = ['manage-node']
+ if node:
+ cmd.extend(node)
+ else:
+ cmd.append('--selector=%s' % selector)
+
+ cmd.append('--schedulable=%s' % schedulable)
+
+ return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
+
+ def _list_pods(self, node=None, selector=None, pod_selector=None):
+ ''' perform oadm list pods
+
+ node: the node in which to list pods
+ selector: the label selector filter if provided
+ pod_selector: the pod selector filter if provided
+ '''
+ cmd = ['manage-node']
+ if node:
+ cmd.extend(node)
+ else:
+ cmd.append('--selector=%s' % selector)
+
+ if pod_selector:
+ cmd.append('--pod-selector=%s' % pod_selector)
+
+ cmd.extend(['--list-pods', '-o', 'json'])
+
+ return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
+
+ # pylint: disable=too-many-arguments
+ def _evacuate(self, node=None, selector=None, pod_selector=None, dry_run=False, grace_period=None, force=False):
+ ''' perform oadm manage-node evacuate '''
+ cmd = ['manage-node']
+ if node:
+ cmd.extend(node)
+ else:
+ cmd.append('--selector=%s' % selector)
+
+ if dry_run:
+ cmd.append('--dry-run')
+
+ if pod_selector:
+ cmd.append('--pod-selector=%s' % pod_selector)
+
+ if grace_period:
+ cmd.append('--grace-period=%s' % int(grace_period))
+
+ if force:
+ cmd.append('--force')
+
+ cmd.append('--evacuate')
+
+ return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
+
+ def _version(self):
+ ''' return the openshift version'''
+ return self.openshift_cmd(['version'], output=True, output_type='raw')
+
+ def _import_image(self, url=None, name=None, tag=None):
+ ''' perform image import '''
+ cmd = ['import-image']
+
+ image = '{0}'.format(name)
+ if tag:
+ image += ':{0}'.format(tag)
+
+ cmd.append(image)
+
+ if url:
+ cmd.append('--from={0}/{1}'.format(url, image))
+
+ cmd.append('-n{0}'.format(self.namespace))
+
+ cmd.append('--confirm')
+ return self.openshift_cmd(cmd)
+
+ def _run(self, cmds, input_data):
+ ''' Actually executes the command. This makes mocking easier. '''
+ curr_env = os.environ.copy()
+ curr_env.update({'KUBECONFIG': self.kubeconfig})
+ proc = subprocess.Popen(cmds,
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ env=curr_env)
+
+ stdout, stderr = proc.communicate(input_data)
+
+ return proc.returncode, stdout.decode(), stderr.decode()
+
+ # pylint: disable=too-many-arguments,too-many-branches
+ def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
+ '''Base command for oc '''
+ cmds = [self.oc_binary]
+
+ if oadm:
+ cmds.append('adm')
+
+ cmds.extend(cmd)
+
+ if self.all_namespaces:
+ cmds.extend(['--all-namespaces'])
+ elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
+ cmds.extend(['-n', self.namespace])
+
+ rval = {}
+ results = ''
+ err = None
+
+ if self.verbose:
+ print(' '.join(cmds))
+
+ try:
+ returncode, stdout, stderr = self._run(cmds, input_data)
+ except OSError as ex:
+ returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
+
+ rval = {"returncode": returncode,
+ "results": results,
+ "cmd": ' '.join(cmds)}
+
+ if returncode == 0:
+ if output:
+ if output_type == 'json':
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ err = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout
+
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if err:
+ rval.update({"err": err,
+ "stderr": stderr,
+ "stdout": stdout,
+ "cmd": cmds})
+
+ else:
+ rval.update({"stderr": stderr,
+ "stdout": stdout,
+ "results": {}})
+
+ return rval
+
+
+class Utils(object):
+ ''' utilities for openshiftcli modules '''
+
+ @staticmethod
+ def _write(filename, contents):
+ ''' Actually write the file contents to disk. This helps with mocking. '''
+
+ with open(filename, 'w') as sfd:
+ sfd.write(contents)
+
+ @staticmethod
+ def create_tmp_file_from_contents(rname, data, ftype='yaml'):
+ ''' create a file in tmp with name and contents'''
+
+ tmp = Utils.create_tmpfile(prefix=rname)
+
+ if ftype == 'yaml':
+ # AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
+ # pylint: disable=no-member
+ if hasattr(yaml, 'RoundTripDumper'):
+ Utils._write(tmp, yaml.dump(data, Dumper=yaml.RoundTripDumper))
+ else:
+ Utils._write(tmp, yaml.safe_dump(data, default_flow_style=False))
+
+ elif ftype == 'json':
+ Utils._write(tmp, json.dumps(data))
+ else:
+ Utils._write(tmp, data)
+
+ # Register cleanup when module is done
+ atexit.register(Utils.cleanup, [tmp])
+ return tmp
+
+ @staticmethod
+ def create_tmpfile_copy(inc_file):
+ '''create a temporary copy of a file'''
+ tmpfile = Utils.create_tmpfile('lib_openshift-')
+ Utils._write(tmpfile, open(inc_file).read())
+
+ # Cleanup the tmpfile
+ atexit.register(Utils.cleanup, [tmpfile])
+
+ return tmpfile
+
+ @staticmethod
+ def create_tmpfile(prefix='tmp'):
+ ''' Generates and returns a temporary file name '''
+
+ with tempfile.NamedTemporaryFile(prefix=prefix, delete=False) as tmp:
+ return tmp.name
+
+ @staticmethod
+ def create_tmp_files_from_contents(content, content_type=None):
+ '''Turn an array of dict: filename, content into a files array'''
+ if not isinstance(content, list):
+ content = [content]
+ files = []
+ for item in content:
+ path = Utils.create_tmp_file_from_contents(item['path'] + '-',
+ item['data'],
+ ftype=content_type)
+ files.append({'name': os.path.basename(item['path']),
+ 'path': path})
+ return files
+
+ @staticmethod
+ def cleanup(files):
+ '''Clean up on exit '''
+ for sfile in files:
+ if os.path.exists(sfile):
+ if os.path.isdir(sfile):
+ shutil.rmtree(sfile)
+ elif os.path.isfile(sfile):
+ os.remove(sfile)
+
+ @staticmethod
+ def exists(results, _name):
+ ''' Check to see if the results include the name '''
+ if not results:
+ return False
+
+ if Utils.find_result(results, _name):
+ return True
+
+ return False
+
+ @staticmethod
+ def find_result(results, _name):
+ ''' Find the specified result by name'''
+ rval = None
+ for result in results:
+ if 'metadata' in result and result['metadata']['name'] == _name:
+ rval = result
+ break
+
+ return rval
+
+ @staticmethod
+ def get_resource_file(sfile, sfile_type='yaml'):
+ ''' return the service file '''
+ contents = None
+ with open(sfile) as sfd:
+ contents = sfd.read()
+
+ if sfile_type == 'yaml':
+ # AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
+ # pylint: disable=no-member
+ if hasattr(yaml, 'RoundTripLoader'):
+ contents = yaml.load(contents, yaml.RoundTripLoader)
+ else:
+ contents = yaml.safe_load(contents)
+ elif sfile_type == 'json':
+ contents = json.loads(contents)
+
+ return contents
+
+ @staticmethod
+ def filter_versions(stdout):
+ ''' filter the oc version output '''
+
+ version_dict = {}
+ version_search = ['oc', 'openshift', 'kubernetes']
+
+ for line in stdout.strip().split('\n'):
+ for term in version_search:
+ if not line:
+ continue
+ if line.startswith(term):
+ version_dict[term] = line.split()[-1]
+
+ # horrible hack to get openshift version in Openshift 3.2
+ # By default "oc version in 3.2 does not return an "openshift" version
+ if "openshift" not in version_dict:
+ version_dict["openshift"] = version_dict["oc"]
+
+ return version_dict
+
+ @staticmethod
+ def add_custom_versions(versions):
+ ''' create custom versions strings '''
+
+ versions_dict = {}
+
+ for tech, version in versions.items():
+ # clean up "-" from version
+ if "-" in version:
+ version = version.split("-")[0]
+
+ if version.startswith('v'):
+ versions_dict[tech + '_numeric'] = version[1:].split('+')[0]
+ # "v3.3.0.33" is what we have, we want "3.3"
+ versions_dict[tech + '_short'] = version[1:4]
+
+ return versions_dict
+
+ @staticmethod
+ def openshift_installed():
+ ''' check if openshift is installed '''
+ import yum
+
+ yum_base = yum.YumBase()
+ if yum_base.rpmdb.searchNevra(name='atomic-openshift'):
+ return True
+
+ return False
+
+ # Disabling too-many-branches. This is a yaml dictionary comparison function
+ # pylint: disable=too-many-branches,too-many-return-statements,too-many-statements
+ @staticmethod
+ def check_def_equal(user_def, result_def, skip_keys=None, debug=False):
+ ''' Given a user defined definition, compare it with the results given back by our query. '''
+
+ # Currently these values are autogenerated and we do not need to check them
+ skip = ['metadata', 'status']
+ if skip_keys:
+ skip.extend(skip_keys)
+
+ for key, value in result_def.items():
+ if key in skip:
+ continue
+
+ # Both are lists
+ if isinstance(value, list):
+ if key not in user_def:
+ if debug:
+ print('User data does not have key [%s]' % key)
+ print('User data: %s' % user_def)
+ return False
+
+ if not isinstance(user_def[key], list):
+ if debug:
+ print('user_def[key] is not a list key=[%s] user_def[key]=%s' % (key, user_def[key]))
+ return False
+
+ if len(user_def[key]) != len(value):
+ if debug:
+ print("List lengths are not equal.")
+ print("key=[%s]: user_def[%s] != value[%s]" % (key, len(user_def[key]), len(value)))
+ print("user_def: %s" % user_def[key])
+ print("value: %s" % value)
+ return False
+
+ for values in zip(user_def[key], value):
+ if isinstance(values[0], dict) and isinstance(values[1], dict):
+ if debug:
+ print('sending list - list')
+ print(type(values[0]))
+ print(type(values[1]))
+ result = Utils.check_def_equal(values[0], values[1], skip_keys=skip_keys, debug=debug)
+ if not result:
+ print('list compare returned false')
+ return False
+
+ elif value != user_def[key]:
+ if debug:
+ print('value should be identical')
+ print(user_def[key])
+ print(value)
+ return False
+
+ # recurse on a dictionary
+ elif isinstance(value, dict):
+ if key not in user_def:
+ if debug:
+ print("user_def does not have key [%s]" % key)
+ return False
+ if not isinstance(user_def[key], dict):
+ if debug:
+ print("dict returned false: not instance of dict")
+ return False
+
+ # before passing ensure keys match
+ api_values = set(value.keys()) - set(skip)
+ user_values = set(user_def[key].keys()) - set(skip)
+ if api_values != user_values:
+ if debug:
+ print("keys are not equal in dict")
+ print(user_values)
+ print(api_values)
+ return False
+
+ result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug)
+ if not result:
+ if debug:
+ print("dict returned false")
+ print(result)
+ return False
+
+ # Verify each key, value pair is the same
+ else:
+ if key not in user_def or value != user_def[key]:
+ if debug:
+ print("value not equal; user_def does not have key")
+ print(key)
+ print(value)
+ if key in user_def:
+ print(user_def[key])
+ return False
+
+ if debug:
+ print('returning true')
+ return True
+
+
+class OpenShiftCLIConfig(object):
+ '''Generic Config'''
+ def __init__(self, rname, namespace, kubeconfig, options):
+ self.kubeconfig = kubeconfig
+ self.name = rname
+ self.namespace = namespace
+ self._options = options
+
+ @property
+ def config_options(self):
+ ''' return config options '''
+ return self._options
+
+ def to_option_list(self):
+ '''return all options as a string'''
+ return self.stringify()
+
+ def stringify(self):
+ ''' return the options hash as cli params in a string '''
+ rval = []
+ for key in sorted(self.config_options.keys()):
+ data = self.config_options[key]
+ if data['include'] \
+ and (data['value'] or isinstance(data['value'], int)):
+ rval.append('--{}={}'.format(key.replace('_', '-'), data['value']))
+
+ return rval
+
+
+# -*- -*- -*- End included fragment: lib/base.py -*- -*- -*-
+
+# -*- -*- -*- Begin included fragment: lib/project.py -*- -*- -*-
+
+
+# pylint: disable=too-many-instance-attributes
+class ProjectConfig(OpenShiftCLIConfig):
+ ''' project config object '''
+ def __init__(self, rname, namespace, kubeconfig, project_options):
+ super(ProjectConfig, self).__init__(rname, None, kubeconfig, project_options)
+
+
+class Project(Yedit):
+ ''' Class to wrap the oc command line tools '''
+ annotations_path = "metadata.annotations"
+ kind = 'Project'
+ annotation_prefix = 'openshift.io/'
+
+ def __init__(self, content):
+ '''Project constructor'''
+ super(Project, self).__init__(content=content)
+
+ def get_annotations(self):
+ ''' return the annotations'''
+ return self.get(Project.annotations_path) or {}
+
+ def add_annotations(self, inc_annos):
+ ''' add an annotation to the other annotations'''
+ if not isinstance(inc_annos, list):
+ inc_annos = [inc_annos]
+
+ annos = self.get_annotations()
+ if not annos:
+ self.put(Project.annotations_path, inc_annos)
+ else:
+ for anno in inc_annos:
+ for key, value in anno.items():
+ annos[key] = value
+
+ return True
+
+ def find_annotation(self, key):
+ ''' find an annotation'''
+ annotations = self.get_annotations()
+ for anno in annotations:
+ if Project.annotation_prefix + key == anno:
+ return annotations[anno]
+
+ return None
+
+ def delete_annotation(self, inc_anno_keys):
+ ''' remove an annotation from a project'''
+ if not isinstance(inc_anno_keys, list):
+ inc_anno_keys = [inc_anno_keys]
+
+ annos = self.get(Project.annotations_path) or {}
+
+ if not annos:
+ return True
+
+ removed = False
+ for inc_anno in inc_anno_keys:
+ anno = self.find_annotation(inc_anno)
+ if anno:
+ del annos[Project.annotation_prefix + anno]
+ removed = True
+
+ return removed
+
+ def update_annotation(self, key, value):
+ ''' remove an annotation for a project'''
+ annos = self.get(Project.annotations_path) or {}
+
+ if not annos:
+ return True
+
+ updated = False
+ anno = self.find_annotation(key)
+ if anno:
+ annos[Project.annotation_prefix + key] = value
+ updated = True
+
+ else:
+ self.add_annotations({Project.annotation_prefix + key: value})
+
+ return updated
+
+# -*- -*- -*- End included fragment: lib/project.py -*- -*- -*-
+
+# -*- -*- -*- Begin included fragment: class/oc_project.py -*- -*- -*-
+
+
+# pylint: disable=too-many-instance-attributes
+class OCProject(OpenShiftCLI):
+ ''' Project Class to manage project/namespace objects'''
+ kind = 'namespace'
+
+ def __init__(self,
+ config,
+ verbose=False):
+ ''' Constructor for OCProject '''
+ super(OCProject, self).__init__(None, config.kubeconfig)
+ self.config = config
+ self._project = None
+
+ @property
+ def project(self):
+ ''' property for project'''
+ if not self._project:
+ self.get()
+ return self._project
+
+ @project.setter
+ def project(self, data):
+ ''' setter function for project propeorty'''
+ self._project = data
+
+ def exists(self):
+ ''' return whether a project exists '''
+ if self.project:
+ return True
+
+ return False
+
+ def get(self):
+ '''return project '''
+ result = self._get(self.kind, self.config.name)
+
+ if result['returncode'] == 0:
+ self.project = Project(content=result['results'][0])
+ result['results'] = self.project.yaml_dict
+
+ elif 'namespaces "%s" not found' % self.config.name in result['stderr']:
+ result = {'results': [], 'returncode': 0}
+
+ return result
+
+ def delete(self):
+ '''delete the object'''
+ return self._delete(self.kind, self.config.name)
+
+ def create(self):
+ '''create a project '''
+ cmd = ['new-project', self.config.name]
+ cmd.extend(self.config.to_option_list())
+
+ return self.openshift_cmd(cmd, oadm=True)
+
+ def update(self):
+ '''update a project '''
+
+ if self.config.config_options['display_name']['value'] is not None:
+ self.project.update_annotation('display-name', self.config.config_options['display_name']['value'])
+
+ if self.config.config_options['description']['value'] is not None:
+ self.project.update_annotation('description', self.config.config_options['description']['value'])
+
+ # work around for immutable project field
+ if self.config.config_options['node_selector']['value'] is not None:
+ self.project.update_annotation('node-selector', self.config.config_options['node_selector']['value'])
+
+ return self._replace_content(self.kind, self.config.name, self.project.yaml_dict)
+
+ def needs_update(self):
+ ''' verify an update is needed '''
+ if self.config.config_options['display_name']['value'] is not None:
+ result = self.project.find_annotation("display-name")
+ if result != self.config.config_options['display_name']['value']:
+ return True
+
+ if self.config.config_options['description']['value'] is not None:
+ result = self.project.find_annotation("description")
+ if result != self.config.config_options['description']['value']:
+ return True
+
+ if self.config.config_options['node_selector']['value'] is not None:
+ result = self.project.find_annotation("node-selector")
+ if result != self.config.config_options['node_selector']['value']:
+ return True
+
+ return False
+
+ # pylint: disable=too-many-return-statements,too-many-branches
+ @staticmethod
+ def run_ansible(params, check_mode):
+ '''run the idempotent ansible code'''
+
+ _ns = None
+ if params['node_selector'] is not None:
+ _ns = ','.join(params['node_selector'])
+
+ pconfig = ProjectConfig(params['name'],
+ 'None',
+ params['kubeconfig'],
+ {'admin': {'value': params['admin'], 'include': True},
+ 'admin_role': {'value': params['admin_role'], 'include': True},
+ 'description': {'value': params['description'], 'include': True},
+ 'display_name': {'value': params['display_name'], 'include': True},
+ 'node_selector': {'value': _ns, 'include': True},
+ })
+
+ oadm_project = OCProject(pconfig, verbose=params['debug'])
+
+ state = params['state']
+
+ api_rval = oadm_project.get()
+
+ #####
+ # Get
+ #####
+ if state == 'list':
+ return {'changed': False, 'results': api_rval['results'], 'state': state}
+
+ ########
+ # Delete
+ ########
+ if state == 'absent':
+ if oadm_project.exists():
+
+ if check_mode:
+ return {'changed': True, 'msg': 'CHECK_MODE: Would have performed a delete.'}
+
+ api_rval = oadm_project.delete()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ return {'changed': True, 'results': api_rval, 'state': state}
+
+ return {'changed': False, 'state': state}
+
+ if state == 'present':
+ ########
+ # Create
+ ########
+ if not oadm_project.exists():
+
+ if check_mode:
+ return {'changed': True, 'msg': 'CHECK_MODE: Would have performed a create.'}
+
+ # Create it here
+ api_rval = oadm_project.create()
+
+ # return the created object
+ api_rval = oadm_project.get()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ return {'changed': True, 'results': api_rval, 'state': state}
+
+ ########
+ # Update
+ ########
+ if oadm_project.needs_update():
+
+ if check_mode:
+ return {'changed': True, 'msg': 'CHECK_MODE: Would have performed an update.'}
+
+ api_rval = oadm_project.update()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ # return the created object
+ api_rval = oadm_project.get()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ return {'changed': True, 'results': api_rval, 'state': state}
+
+ return {'changed': False, 'results': api_rval, 'state': state}
+
+ return {'failed': True,
+ 'changed': False,
+ 'msg': 'Unknown state passed. [%s]' % state}
+
+# -*- -*- -*- End included fragment: class/oc_project.py -*- -*- -*-
+
+# -*- -*- -*- Begin included fragment: ansible/oc_project.py -*- -*- -*-
+
+def main():
+ '''
+ ansible oc module for project
+ '''
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
+ state=dict(default='present', type='str',
+ choices=['present', 'absent', 'list']),
+ debug=dict(default=False, type='bool'),
+ name=dict(default=None, require=True, type='str'),
+ display_name=dict(default=None, type='str'),
+ node_selector=dict(default=None, type='list'),
+ description=dict(default=None, type='str'),
+ admin=dict(default=None, type='str'),
+ admin_role=dict(default='admin', type='str'),
+ ),
+ supports_check_mode=True,
+ )
+
+ rval = OCProject.run_ansible(module.params, module.check_mode)
+ if 'failed' in rval:
+ return module.fail_json(**rval)
+
+ return module.exit_json(**rval)
+
+
+if __name__ == '__main__':
+ main()
+
+# -*- -*- -*- End included fragment: ansible/oc_project.py -*- -*- -*-
diff --git a/roles/lib_openshift/library/oc_route.py b/roles/lib_openshift/library/oc_route.py
index a2cbd9b93..fe59cca33 100644
--- a/roles/lib_openshift/library/oc_route.py
+++ b/roles/lib_openshift/library/oc_route.py
@@ -305,7 +305,8 @@ class Yedit(object):
continue
elif data and not isinstance(data, dict):
- return None
+ raise YeditException("Unexpected item type found while going through key " +
+ "path: {} (at key: {})".format(key, dict_key))
data[dict_key] = {}
data = data[dict_key]
@@ -314,7 +315,7 @@ class Yedit(object):
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
- return None
+ raise YeditException("Unexpected item type found while going through key path: {}".format(key))
if key == '':
data = item
@@ -328,6 +329,12 @@ class Yedit(object):
elif key_indexes[-1][1] and isinstance(data, dict):
data[key_indexes[-1][1]] = item
+ # didn't add/update to an existing list, nor add/update key to a dict
+ # so we must have been provided some syntax like a.b.c[<int>] = "data" for a
+ # non-existent array
+ else:
+ raise YeditException("Error adding to object at path: {}".format(key))
+
return data
@staticmethod
@@ -1053,13 +1060,13 @@ class OpenShiftCLI(object):
if oadm:
cmds.append('adm')
+ cmds.extend(cmd)
+
if self.all_namespaces:
cmds.extend(['--all-namespaces'])
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- cmds.extend(cmd)
-
rval = {}
results = ''
err = None
@@ -1081,9 +1088,9 @@ class OpenShiftCLI(object):
if output_type == 'json':
try:
rval['results'] = json.loads(stdout)
- except ValueError as err:
- if "No JSON object could be decoded" in err.args:
- err = err.args
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ err = verr.args
elif output_type == 'raw':
rval['results'] = stdout
@@ -1321,8 +1328,8 @@ class Utils(object):
elif value != user_def[key]:
if debug:
print('value should be identical')
- print(value)
print(user_def[key])
+ print(value)
return False
# recurse on a dictionary
@@ -1342,8 +1349,8 @@ class Utils(object):
if api_values != user_values:
if debug:
print("keys are not equal in dict")
- print(api_values)
print(user_values)
+ print(api_values)
return False
result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug)
@@ -1389,10 +1396,11 @@ class OpenShiftCLIConfig(object):
def stringify(self):
''' return the options hash as cli params in a string '''
rval = []
- for key, data in self.config_options.items():
+ for key in sorted(self.config_options.keys()):
+ data = self.config_options[key]
if data['include'] \
and (data['value'] or isinstance(data['value'], int)):
- rval.append('--%s=%s' % (key.replace('_', '-'), data['value']))
+ rval.append('--{}={}'.format(key.replace('_', '-'), data['value']))
return rval
@@ -1594,8 +1602,10 @@ class OCRoute(OpenShiftCLI):
def update(self):
'''update the object'''
- # need to update the tls information and the service name
- return self._replace_content(self.kind, self.config.name, self.config.data)
+ return self._replace_content(self.kind,
+ self.config.name,
+ self.config.data,
+ force=(self.config.host != self.route.get_host()))
def needs_update(self):
''' verify an update is needed '''
diff --git a/roles/lib_openshift/library/oc_scale.py b/roles/lib_openshift/library/oc_scale.py
index 3523e7ea6..98f1d94a7 100644
--- a/roles/lib_openshift/library/oc_scale.py
+++ b/roles/lib_openshift/library/oc_scale.py
@@ -249,7 +249,8 @@ class Yedit(object):
continue
elif data and not isinstance(data, dict):
- return None
+ raise YeditException("Unexpected item type found while going through key " +
+ "path: {} (at key: {})".format(key, dict_key))
data[dict_key] = {}
data = data[dict_key]
@@ -258,7 +259,7 @@ class Yedit(object):
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
- return None
+ raise YeditException("Unexpected item type found while going through key path: {}".format(key))
if key == '':
data = item
@@ -272,6 +273,12 @@ class Yedit(object):
elif key_indexes[-1][1] and isinstance(data, dict):
data[key_indexes[-1][1]] = item
+ # didn't add/update to an existing list, nor add/update key to a dict
+ # so we must have been provided some syntax like a.b.c[<int>] = "data" for a
+ # non-existent array
+ else:
+ raise YeditException("Error adding to object at path: {}".format(key))
+
return data
@staticmethod
@@ -997,13 +1004,13 @@ class OpenShiftCLI(object):
if oadm:
cmds.append('adm')
+ cmds.extend(cmd)
+
if self.all_namespaces:
cmds.extend(['--all-namespaces'])
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- cmds.extend(cmd)
-
rval = {}
results = ''
err = None
@@ -1025,9 +1032,9 @@ class OpenShiftCLI(object):
if output_type == 'json':
try:
rval['results'] = json.loads(stdout)
- except ValueError as err:
- if "No JSON object could be decoded" in err.args:
- err = err.args
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ err = verr.args
elif output_type == 'raw':
rval['results'] = stdout
@@ -1265,8 +1272,8 @@ class Utils(object):
elif value != user_def[key]:
if debug:
print('value should be identical')
- print(value)
print(user_def[key])
+ print(value)
return False
# recurse on a dictionary
@@ -1286,8 +1293,8 @@ class Utils(object):
if api_values != user_values:
if debug:
print("keys are not equal in dict")
- print(api_values)
print(user_values)
+ print(api_values)
return False
result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug)
@@ -1333,10 +1340,11 @@ class OpenShiftCLIConfig(object):
def stringify(self):
''' return the options hash as cli params in a string '''
rval = []
- for key, data in self.config_options.items():
+ for key in sorted(self.config_options.keys()):
+ data = self.config_options[key]
if data['include'] \
and (data['value'] or isinstance(data['value'], int)):
- rval.append('--%s=%s' % (key.replace('_', '-'), data['value']))
+ rval.append('--{}={}'.format(key.replace('_', '-'), data['value']))
return rval
@@ -1449,6 +1457,18 @@ spec:
return False
+ def get_env_var(self, key):
+ '''return a environment variables '''
+ results = self.get(DeploymentConfig.env_path) or []
+ if not results:
+ return None
+
+ for env_var in results:
+ if env_var['name'] == key:
+ return env_var
+
+ return None
+
def get_env_vars(self):
'''return a environment variables '''
return self.get(DeploymentConfig.env_path) or []
diff --git a/roles/lib_openshift/library/oc_secret.py b/roles/lib_openshift/library/oc_secret.py
index db9a3a7ec..deba4ab8a 100644
--- a/roles/lib_openshift/library/oc_secret.py
+++ b/roles/lib_openshift/library/oc_secret.py
@@ -295,7 +295,8 @@ class Yedit(object):
continue
elif data and not isinstance(data, dict):
- return None
+ raise YeditException("Unexpected item type found while going through key " +
+ "path: {} (at key: {})".format(key, dict_key))
data[dict_key] = {}
data = data[dict_key]
@@ -304,7 +305,7 @@ class Yedit(object):
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
- return None
+ raise YeditException("Unexpected item type found while going through key path: {}".format(key))
if key == '':
data = item
@@ -318,6 +319,12 @@ class Yedit(object):
elif key_indexes[-1][1] and isinstance(data, dict):
data[key_indexes[-1][1]] = item
+ # didn't add/update to an existing list, nor add/update key to a dict
+ # so we must have been provided some syntax like a.b.c[<int>] = "data" for a
+ # non-existent array
+ else:
+ raise YeditException("Error adding to object at path: {}".format(key))
+
return data
@staticmethod
@@ -1043,13 +1050,13 @@ class OpenShiftCLI(object):
if oadm:
cmds.append('adm')
+ cmds.extend(cmd)
+
if self.all_namespaces:
cmds.extend(['--all-namespaces'])
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- cmds.extend(cmd)
-
rval = {}
results = ''
err = None
@@ -1071,9 +1078,9 @@ class OpenShiftCLI(object):
if output_type == 'json':
try:
rval['results'] = json.loads(stdout)
- except ValueError as err:
- if "No JSON object could be decoded" in err.args:
- err = err.args
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ err = verr.args
elif output_type == 'raw':
rval['results'] = stdout
@@ -1311,8 +1318,8 @@ class Utils(object):
elif value != user_def[key]:
if debug:
print('value should be identical')
- print(value)
print(user_def[key])
+ print(value)
return False
# recurse on a dictionary
@@ -1332,8 +1339,8 @@ class Utils(object):
if api_values != user_values:
if debug:
print("keys are not equal in dict")
- print(api_values)
print(user_values)
+ print(api_values)
return False
result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug)
@@ -1379,10 +1386,11 @@ class OpenShiftCLIConfig(object):
def stringify(self):
''' return the options hash as cli params in a string '''
rval = []
- for key, data in self.config_options.items():
+ for key in sorted(self.config_options.keys()):
+ data = self.config_options[key]
if data['include'] \
and (data['value'] or isinstance(data['value'], int)):
- rval.append('--%s=%s' % (key.replace('_', '-'), data['value']))
+ rval.append('--{}={}'.format(key.replace('_', '-'), data['value']))
return rval
diff --git a/roles/lib_openshift/library/oc_service.py b/roles/lib_openshift/library/oc_service.py
index c8d4b3040..c2e91e39e 100644
--- a/roles/lib_openshift/library/oc_service.py
+++ b/roles/lib_openshift/library/oc_service.py
@@ -301,7 +301,8 @@ class Yedit(object):
continue
elif data and not isinstance(data, dict):
- return None
+ raise YeditException("Unexpected item type found while going through key " +
+ "path: {} (at key: {})".format(key, dict_key))
data[dict_key] = {}
data = data[dict_key]
@@ -310,7 +311,7 @@ class Yedit(object):
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
- return None
+ raise YeditException("Unexpected item type found while going through key path: {}".format(key))
if key == '':
data = item
@@ -324,6 +325,12 @@ class Yedit(object):
elif key_indexes[-1][1] and isinstance(data, dict):
data[key_indexes[-1][1]] = item
+ # didn't add/update to an existing list, nor add/update key to a dict
+ # so we must have been provided some syntax like a.b.c[<int>] = "data" for a
+ # non-existent array
+ else:
+ raise YeditException("Error adding to object at path: {}".format(key))
+
return data
@staticmethod
@@ -1049,13 +1056,13 @@ class OpenShiftCLI(object):
if oadm:
cmds.append('adm')
+ cmds.extend(cmd)
+
if self.all_namespaces:
cmds.extend(['--all-namespaces'])
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- cmds.extend(cmd)
-
rval = {}
results = ''
err = None
@@ -1077,9 +1084,9 @@ class OpenShiftCLI(object):
if output_type == 'json':
try:
rval['results'] = json.loads(stdout)
- except ValueError as err:
- if "No JSON object could be decoded" in err.args:
- err = err.args
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ err = verr.args
elif output_type == 'raw':
rval['results'] = stdout
@@ -1317,8 +1324,8 @@ class Utils(object):
elif value != user_def[key]:
if debug:
print('value should be identical')
- print(value)
print(user_def[key])
+ print(value)
return False
# recurse on a dictionary
@@ -1338,8 +1345,8 @@ class Utils(object):
if api_values != user_values:
if debug:
print("keys are not equal in dict")
- print(api_values)
print(user_values)
+ print(api_values)
return False
result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug)
@@ -1385,10 +1392,11 @@ class OpenShiftCLIConfig(object):
def stringify(self):
''' return the options hash as cli params in a string '''
rval = []
- for key, data in self.config_options.items():
+ for key in sorted(self.config_options.keys()):
+ data = self.config_options[key]
if data['include'] \
and (data['value'] or isinstance(data['value'], int)):
- rval.append('--%s=%s' % (key.replace('_', '-'), data['value']))
+ rval.append('--{}={}'.format(key.replace('_', '-'), data['value']))
return rval
@@ -1463,6 +1471,7 @@ class Service(Yedit):
port_path = "spec.ports"
portal_ip = "spec.portalIP"
cluster_ip = "spec.clusterIP"
+ selector_path = 'spec.selector'
kind = 'Service'
def __init__(self, content):
@@ -1473,6 +1482,10 @@ class Service(Yedit):
''' get a list of ports '''
return self.get(Service.port_path) or []
+ def get_selector(self):
+ ''' get the service selector'''
+ return self.get(Service.selector_path) or {}
+
def add_ports(self, inc_ports):
''' add a port object to the ports list '''
if not isinstance(inc_ports, list):
@@ -1546,7 +1559,7 @@ class OCService(OpenShiftCLI):
kubeconfig='/etc/origin/master/admin.kubeconfig',
verbose=False):
''' Constructor for OCVolume '''
- super(OCService, self).__init__(namespace, kubeconfig)
+ super(OCService, self).__init__(namespace, kubeconfig, verbose)
self.namespace = namespace
self.config = ServiceConfig(sname, namespace, ports, selector, labels,
cluster_ip, portal_ip, session_affinity, service_type)
@@ -1617,7 +1630,9 @@ class OCService(OpenShiftCLI):
params['portalip'],
params['ports'],
params['session_affinity'],
- params['service_type'])
+ params['service_type'],
+ params['kubeconfig'],
+ params['debug'])
state = params['state']
diff --git a/roles/lib_openshift/library/oc_serviceaccount.py b/roles/lib_openshift/library/oc_serviceaccount.py
index 3e650b5f2..a1d8fff14 100644
--- a/roles/lib_openshift/library/oc_serviceaccount.py
+++ b/roles/lib_openshift/library/oc_serviceaccount.py
@@ -247,7 +247,8 @@ class Yedit(object):
continue
elif data and not isinstance(data, dict):
- return None
+ raise YeditException("Unexpected item type found while going through key " +
+ "path: {} (at key: {})".format(key, dict_key))
data[dict_key] = {}
data = data[dict_key]
@@ -256,7 +257,7 @@ class Yedit(object):
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
- return None
+ raise YeditException("Unexpected item type found while going through key path: {}".format(key))
if key == '':
data = item
@@ -270,6 +271,12 @@ class Yedit(object):
elif key_indexes[-1][1] and isinstance(data, dict):
data[key_indexes[-1][1]] = item
+ # didn't add/update to an existing list, nor add/update key to a dict
+ # so we must have been provided some syntax like a.b.c[<int>] = "data" for a
+ # non-existent array
+ else:
+ raise YeditException("Error adding to object at path: {}".format(key))
+
return data
@staticmethod
@@ -995,13 +1002,13 @@ class OpenShiftCLI(object):
if oadm:
cmds.append('adm')
+ cmds.extend(cmd)
+
if self.all_namespaces:
cmds.extend(['--all-namespaces'])
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- cmds.extend(cmd)
-
rval = {}
results = ''
err = None
@@ -1023,9 +1030,9 @@ class OpenShiftCLI(object):
if output_type == 'json':
try:
rval['results'] = json.loads(stdout)
- except ValueError as err:
- if "No JSON object could be decoded" in err.args:
- err = err.args
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ err = verr.args
elif output_type == 'raw':
rval['results'] = stdout
@@ -1263,8 +1270,8 @@ class Utils(object):
elif value != user_def[key]:
if debug:
print('value should be identical')
- print(value)
print(user_def[key])
+ print(value)
return False
# recurse on a dictionary
@@ -1284,8 +1291,8 @@ class Utils(object):
if api_values != user_values:
if debug:
print("keys are not equal in dict")
- print(api_values)
print(user_values)
+ print(api_values)
return False
result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug)
@@ -1331,10 +1338,11 @@ class OpenShiftCLIConfig(object):
def stringify(self):
''' return the options hash as cli params in a string '''
rval = []
- for key, data in self.config_options.items():
+ for key in sorted(self.config_options.keys()):
+ data = self.config_options[key]
if data['include'] \
and (data['value'] or isinstance(data['value'], int)):
- rval.append('--%s=%s' % (key.replace('_', '-'), data['value']))
+ rval.append('--{}={}'.format(key.replace('_', '-'), data['value']))
return rval
diff --git a/roles/lib_openshift/library/oc_serviceaccount_secret.py b/roles/lib_openshift/library/oc_serviceaccount_secret.py
index 749cf2d8e..470043cc6 100644
--- a/roles/lib_openshift/library/oc_serviceaccount_secret.py
+++ b/roles/lib_openshift/library/oc_serviceaccount_secret.py
@@ -247,7 +247,8 @@ class Yedit(object):
continue
elif data and not isinstance(data, dict):
- return None
+ raise YeditException("Unexpected item type found while going through key " +
+ "path: {} (at key: {})".format(key, dict_key))
data[dict_key] = {}
data = data[dict_key]
@@ -256,7 +257,7 @@ class Yedit(object):
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
- return None
+ raise YeditException("Unexpected item type found while going through key path: {}".format(key))
if key == '':
data = item
@@ -270,6 +271,12 @@ class Yedit(object):
elif key_indexes[-1][1] and isinstance(data, dict):
data[key_indexes[-1][1]] = item
+ # didn't add/update to an existing list, nor add/update key to a dict
+ # so we must have been provided some syntax like a.b.c[<int>] = "data" for a
+ # non-existent array
+ else:
+ raise YeditException("Error adding to object at path: {}".format(key))
+
return data
@staticmethod
@@ -995,13 +1002,13 @@ class OpenShiftCLI(object):
if oadm:
cmds.append('adm')
+ cmds.extend(cmd)
+
if self.all_namespaces:
cmds.extend(['--all-namespaces'])
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- cmds.extend(cmd)
-
rval = {}
results = ''
err = None
@@ -1023,9 +1030,9 @@ class OpenShiftCLI(object):
if output_type == 'json':
try:
rval['results'] = json.loads(stdout)
- except ValueError as err:
- if "No JSON object could be decoded" in err.args:
- err = err.args
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ err = verr.args
elif output_type == 'raw':
rval['results'] = stdout
@@ -1263,8 +1270,8 @@ class Utils(object):
elif value != user_def[key]:
if debug:
print('value should be identical')
- print(value)
print(user_def[key])
+ print(value)
return False
# recurse on a dictionary
@@ -1284,8 +1291,8 @@ class Utils(object):
if api_values != user_values:
if debug:
print("keys are not equal in dict")
- print(api_values)
print(user_values)
+ print(api_values)
return False
result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug)
@@ -1331,10 +1338,11 @@ class OpenShiftCLIConfig(object):
def stringify(self):
''' return the options hash as cli params in a string '''
rval = []
- for key, data in self.config_options.items():
+ for key in sorted(self.config_options.keys()):
+ data = self.config_options[key]
if data['include'] \
and (data['value'] or isinstance(data['value'], int)):
- rval.append('--%s=%s' % (key.replace('_', '-'), data['value']))
+ rval.append('--{}={}'.format(key.replace('_', '-'), data['value']))
return rval
diff --git a/roles/lib_openshift/library/oc_version.py b/roles/lib_openshift/library/oc_version.py
index e9b970967..378c2b2e5 100644
--- a/roles/lib_openshift/library/oc_version.py
+++ b/roles/lib_openshift/library/oc_version.py
@@ -219,7 +219,8 @@ class Yedit(object):
continue
elif data and not isinstance(data, dict):
- return None
+ raise YeditException("Unexpected item type found while going through key " +
+ "path: {} (at key: {})".format(key, dict_key))
data[dict_key] = {}
data = data[dict_key]
@@ -228,7 +229,7 @@ class Yedit(object):
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
- return None
+ raise YeditException("Unexpected item type found while going through key path: {}".format(key))
if key == '':
data = item
@@ -242,6 +243,12 @@ class Yedit(object):
elif key_indexes[-1][1] and isinstance(data, dict):
data[key_indexes[-1][1]] = item
+ # didn't add/update to an existing list, nor add/update key to a dict
+ # so we must have been provided some syntax like a.b.c[<int>] = "data" for a
+ # non-existent array
+ else:
+ raise YeditException("Error adding to object at path: {}".format(key))
+
return data
@staticmethod
@@ -967,13 +974,13 @@ class OpenShiftCLI(object):
if oadm:
cmds.append('adm')
+ cmds.extend(cmd)
+
if self.all_namespaces:
cmds.extend(['--all-namespaces'])
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- cmds.extend(cmd)
-
rval = {}
results = ''
err = None
@@ -995,9 +1002,9 @@ class OpenShiftCLI(object):
if output_type == 'json':
try:
rval['results'] = json.loads(stdout)
- except ValueError as err:
- if "No JSON object could be decoded" in err.args:
- err = err.args
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ err = verr.args
elif output_type == 'raw':
rval['results'] = stdout
@@ -1235,8 +1242,8 @@ class Utils(object):
elif value != user_def[key]:
if debug:
print('value should be identical')
- print(value)
print(user_def[key])
+ print(value)
return False
# recurse on a dictionary
@@ -1256,8 +1263,8 @@ class Utils(object):
if api_values != user_values:
if debug:
print("keys are not equal in dict")
- print(api_values)
print(user_values)
+ print(api_values)
return False
result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug)
@@ -1303,10 +1310,11 @@ class OpenShiftCLIConfig(object):
def stringify(self):
''' return the options hash as cli params in a string '''
rval = []
- for key, data in self.config_options.items():
+ for key in sorted(self.config_options.keys()):
+ data = self.config_options[key]
if data['include'] \
and (data['value'] or isinstance(data['value'], int)):
- rval.append('--%s=%s' % (key.replace('_', '-'), data['value']))
+ rval.append('--{}={}'.format(key.replace('_', '-'), data['value']))
return rval
diff --git a/roles/lib_openshift/meta/main.yml b/roles/lib_openshift/meta/main.yml
index 7c72daa63..d5e7ec6e5 100644
--- a/roles/lib_openshift/meta/main.yml
+++ b/roles/lib_openshift/meta/main.yml
@@ -11,5 +11,4 @@ galaxy_info:
- 7
categories:
- cloud
-dependencies:
-- { role: openshift_repos }
+dependencies: []
diff --git a/roles/lib_openshift/src/ansible/oadm_manage_node.py b/roles/lib_openshift/src/ansible/oc_adm_manage_node.py
index b870c1211..b870c1211 100644
--- a/roles/lib_openshift/src/ansible/oadm_manage_node.py
+++ b/roles/lib_openshift/src/ansible/oc_adm_manage_node.py
diff --git a/roles/lib_openshift/src/ansible/oc_adm_policy_group.py b/roles/lib_openshift/src/ansible/oc_adm_policy_group.py
new file mode 100644
index 000000000..cf6691b03
--- /dev/null
+++ b/roles/lib_openshift/src/ansible/oc_adm_policy_group.py
@@ -0,0 +1,34 @@
+# pylint: skip-file
+# flake8: noqa
+
+
+def main():
+ '''
+ ansible oc adm module for group policy
+ '''
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(default='present', type='str',
+ choices=['present', 'absent']),
+ debug=dict(default=False, type='bool'),
+ resource_name=dict(required=True, type='str'),
+ namespace=dict(default='default', type='str'),
+ kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
+
+ group=dict(required=True, type='str'),
+ resource_kind=dict(required=True, choices=['role', 'cluster-role', 'scc'], type='str'),
+ ),
+ supports_check_mode=True,
+ )
+
+ results = PolicyGroup.run_ansible(module.params, module.check_mode)
+
+ if 'failed' in results:
+ module.fail_json(**results)
+
+ module.exit_json(**results)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/roles/lib_openshift/src/ansible/oc_adm_policy_user.py b/roles/lib_openshift/src/ansible/oc_adm_policy_user.py
new file mode 100644
index 000000000..a22496866
--- /dev/null
+++ b/roles/lib_openshift/src/ansible/oc_adm_policy_user.py
@@ -0,0 +1,34 @@
+# pylint: skip-file
+# flake8: noqa
+
+
+def main():
+ '''
+ ansible oc adm module for user policy
+ '''
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(default='present', type='str',
+ choices=['present', 'absent']),
+ debug=dict(default=False, type='bool'),
+ resource_name=dict(required=True, type='str'),
+ namespace=dict(default='default', type='str'),
+ kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
+
+ user=dict(required=True, type='str'),
+ resource_kind=dict(required=True, choices=['role', 'cluster-role', 'scc'], type='str'),
+ ),
+ supports_check_mode=True,
+ )
+
+ results = PolicyUser.run_ansible(module.params, module.check_mode)
+
+ if 'failed' in results:
+ module.fail_json(**results)
+
+ module.exit_json(**results)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/roles/lib_openshift/src/ansible/oc_adm_registry.py b/roles/lib_openshift/src/ansible/oc_adm_registry.py
index a49b84589..c85973c7d 100644
--- a/roles/lib_openshift/src/ansible/oc_adm_registry.py
+++ b/roles/lib_openshift/src/ansible/oc_adm_registry.py
@@ -24,8 +24,8 @@ def main():
service_account=dict(default='registry', type='str'),
mount_host=dict(default=None, type='str'),
volume_mounts=dict(default=None, type='list'),
- env_vars=dict(default=None, type='dict'),
- edits=dict(default=None, type='list'),
+ env_vars=dict(default={}, type='dict'),
+ edits=dict(default=[], type='list'),
enforce_quota=dict(default=False, type='bool'),
force=dict(default=False, type='bool'),
daemonset=dict(default=False, type='bool'),
diff --git a/roles/lib_openshift/src/ansible/oc_atomic_container.py b/roles/lib_openshift/src/ansible/oc_atomic_container.py
new file mode 100644
index 000000000..20d75cb63
--- /dev/null
+++ b/roles/lib_openshift/src/ansible/oc_atomic_container.py
@@ -0,0 +1,137 @@
+# pylint: skip-file
+# flake8: noqa
+
+# pylint: disable=wrong-import-position,too-many-branches,invalid-name
+import json
+from ansible.module_utils.basic import AnsibleModule
+
+
+def _install(module, container, image, values_list):
+ ''' install a container using atomic CLI. values_list is the list of --set arguments.
+ container is the name given to the container. image is the image to use for the installation. '''
+ args = ['atomic', 'install', "--system", '--name=%s' % container] + values_list + [image]
+ rc, out, err = module.run_command(args, check_rc=False)
+ if rc != 0:
+ return rc, out, err, False
+ else:
+ changed = "Extracting" in out
+ return rc, out, err, changed
+
+def _uninstall(module, name):
+ ''' uninstall an atomic container by its name. '''
+ args = ['atomic', 'uninstall', name]
+ rc, out, err = module.run_command(args, check_rc=False)
+ return rc, out, err, False
+
+
+def do_install(module, container, image, values_list):
+ ''' install a container and exit the module. '''
+ rc, out, err, changed = _install(module, container, image, values_list)
+ if rc != 0:
+ module.fail_json(rc=rc, msg=err)
+ else:
+ module.exit_json(msg=out, changed=changed)
+
+
+def do_uninstall(module, name):
+ ''' uninstall a container and exit the module. '''
+ rc, out, err, changed = _uninstall(module, name)
+ if rc != 0:
+ module.fail_json(rc=rc, msg=err)
+ module.exit_json(msg=out, changed=changed)
+
+
+def do_update(module, container, old_image, image, values_list):
+ ''' update a container and exit the module. If the container uses a different
+ image than the current installed one, then first uninstall the old one '''
+
+ # the image we want is different than the installed one
+ if old_image != image:
+ rc, out, err, _ = _uninstall(module, container)
+ if rc != 0:
+ module.fail_json(rc=rc, msg=err)
+ return do_install(module, container, image, values_list)
+
+ # if the image didn't change, use "atomic containers update"
+ args = ['atomic', 'containers', 'update'] + values_list + [container]
+ rc, out, err = module.run_command(args, check_rc=False)
+ if rc != 0:
+ module.fail_json(rc=rc, msg=err)
+ else:
+ changed = "Extracting" in out
+ module.exit_json(msg=out, changed=changed)
+
+
+def do_rollback(module, name):
+ ''' move to the previous deployment of the container, if present, and exit the module. '''
+ args = ['atomic', 'containers', 'rollback', name]
+ rc, out, err = module.run_command(args, check_rc=False)
+ if rc != 0:
+ module.fail_json(rc=rc, msg=err)
+ else:
+ changed = "Rolling back" in out
+ module.exit_json(msg=out, changed=changed)
+
+
+def core(module):
+ ''' entrypoint for the module. '''
+ name = module.params['name']
+ image = module.params['image']
+ values = module.params['values']
+ state = module.params['state']
+
+ module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C')
+ out = {}
+ err = {}
+ rc = 0
+
+ values_list = ["--set=%s" % x for x in values] if values else []
+
+ args = ['atomic', 'containers', 'list', '--json', '--all', '-f', 'container=%s' % name]
+ rc, out, err = module.run_command(args, check_rc=False)
+ if rc != 0:
+ module.fail_json(rc=rc, msg=err)
+ return
+
+ containers = json.loads(out)
+ present = len(containers) > 0
+ old_image = containers[0]["image_name"] if present else None
+
+ if state == 'present' and present:
+ module.exit_json(msg=out, changed=False)
+ elif (state in ['latest', 'present']) and not present:
+ do_install(module, name, image, values_list)
+ elif state == 'latest':
+ do_update(module, name, old_image, image, values_list)
+ elif state == 'absent':
+ if not present:
+ module.exit_json(msg="", changed=False)
+ else:
+ do_uninstall(module, name)
+ elif state == 'rollback':
+ do_rollback(module, name)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(default=None, required=True),
+ image=dict(default=None, required=True),
+ state=dict(default='latest', choices=['present', 'absent', 'latest', 'rollback']),
+ values=dict(type='list', default=[]),
+ ),
+ )
+
+ # Verify that the platform supports atomic command
+ rc, _, err = module.run_command('atomic -v', check_rc=False)
+ if rc != 0:
+ module.fail_json(msg="Error in running atomic command", err=err)
+
+ try:
+ core(module)
+ except Exception as e: # pylint: disable=broad-except
+ module.fail_json(msg=str(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/roles/lib_openshift/src/ansible/oc_objectvalidator.py b/roles/lib_openshift/src/ansible/oc_objectvalidator.py
new file mode 100644
index 000000000..658bb5ded
--- /dev/null
+++ b/roles/lib_openshift/src/ansible/oc_objectvalidator.py
@@ -0,0 +1,24 @@
+# pylint: skip-file
+# flake8: noqa
+
+def main():
+ '''
+ ansible oc module for validating OpenShift objects
+ '''
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
+ ),
+ supports_check_mode=False,
+ )
+
+
+ rval = OCObjectValidator.run_ansible(module.params)
+ if 'failed' in rval:
+ module.fail_json(**rval)
+
+ module.exit_json(**rval)
+
+if __name__ == '__main__':
+ main()
diff --git a/roles/lib_openshift/src/ansible/oc_project.py b/roles/lib_openshift/src/ansible/oc_project.py
new file mode 100644
index 000000000..b035cd712
--- /dev/null
+++ b/roles/lib_openshift/src/ansible/oc_project.py
@@ -0,0 +1,33 @@
+# pylint: skip-file
+# flake8: noqa
+
+def main():
+ '''
+ ansible oc module for project
+ '''
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
+ state=dict(default='present', type='str',
+ choices=['present', 'absent', 'list']),
+ debug=dict(default=False, type='bool'),
+ name=dict(default=None, require=True, type='str'),
+ display_name=dict(default=None, type='str'),
+ node_selector=dict(default=None, type='list'),
+ description=dict(default=None, type='str'),
+ admin=dict(default=None, type='str'),
+ admin_role=dict(default='admin', type='str'),
+ ),
+ supports_check_mode=True,
+ )
+
+ rval = OCProject.run_ansible(module.params, module.check_mode)
+ if 'failed' in rval:
+ return module.fail_json(**rval)
+
+ return module.exit_json(**rval)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/roles/lib_openshift/src/class/oc_adm_ca_server_cert.py b/roles/lib_openshift/src/class/oc_adm_ca_server_cert.py
index 6ed1f2f35..18c69f2fa 100644
--- a/roles/lib_openshift/src/class/oc_adm_ca_server_cert.py
+++ b/roles/lib_openshift/src/class/oc_adm_ca_server_cert.py
@@ -122,6 +122,9 @@ class CAServerCert(OpenShiftCLI):
api_rval = server_cert.create()
+ if api_rval['returncode'] != 0:
+ return {'Failed': True, 'msg': api_rval}
+
return {'changed': True, 'results': api_rval, 'state': state}
########
diff --git a/roles/lib_openshift/src/class/oadm_manage_node.py b/roles/lib_openshift/src/class/oc_adm_manage_node.py
index c07320477..c07320477 100644
--- a/roles/lib_openshift/src/class/oadm_manage_node.py
+++ b/roles/lib_openshift/src/class/oc_adm_manage_node.py
diff --git a/roles/lib_openshift/src/class/oc_adm_policy_group.py b/roles/lib_openshift/src/class/oc_adm_policy_group.py
new file mode 100644
index 000000000..1e51913e0
--- /dev/null
+++ b/roles/lib_openshift/src/class/oc_adm_policy_group.py
@@ -0,0 +1,223 @@
+# pylint: skip-file
+# flake8: noqa
+
+
+class PolicyGroupException(Exception):
+ ''' PolicyGroup exception'''
+ pass
+
+
+class PolicyGroupConfig(OpenShiftCLIConfig):
+ ''' PolicyGroupConfig is a DTO for group related policy. '''
+ def __init__(self, namespace, kubeconfig, policy_options):
+ super(PolicyGroupConfig, self).__init__(policy_options['name']['value'],
+ namespace, kubeconfig, policy_options)
+ self.kind = self.get_kind()
+ self.namespace = namespace
+
+ def get_kind(self):
+ ''' return the kind we are working with '''
+ if self.config_options['resource_kind']['value'] == 'role':
+ return 'rolebinding'
+ elif self.config_options['resource_kind']['value'] == 'cluster-role':
+ return 'clusterrolebinding'
+ elif self.config_options['resource_kind']['value'] == 'scc':
+ return 'scc'
+
+ return None
+
+
+# pylint: disable=too-many-return-statements
+class PolicyGroup(OpenShiftCLI):
+ ''' Class to handle attaching policies to users '''
+
+
+ def __init__(self,
+ config,
+ verbose=False):
+ ''' Constructor for PolicyGroup '''
+ super(PolicyGroup, self).__init__(config.namespace, config.kubeconfig, verbose)
+ self.config = config
+ self.verbose = verbose
+ self._rolebinding = None
+ self._scc = None
+ self._cluster_policy_bindings = None
+ self._policy_bindings = None
+
+ @property
+ def policybindings(self):
+ if self._policy_bindings is None:
+ results = self._get('clusterpolicybindings', None)
+ if results['returncode'] != 0:
+ raise OpenShiftCLIError('Could not retrieve policybindings')
+ self._policy_bindings = results['results'][0]['items'][0]
+
+ return self._policy_bindings
+
+ @property
+ def clusterpolicybindings(self):
+ if self._cluster_policy_bindings is None:
+ results = self._get('clusterpolicybindings', None)
+ if results['returncode'] != 0:
+ raise OpenShiftCLIError('Could not retrieve clusterpolicybindings')
+ self._cluster_policy_bindings = results['results'][0]['items'][0]
+
+ return self._cluster_policy_bindings
+
+ @property
+ def role_binding(self):
+ ''' role_binding getter '''
+ return self._rolebinding
+
+ @role_binding.setter
+ def role_binding(self, binding):
+ ''' role_binding setter '''
+ self._rolebinding = binding
+
+ @property
+ def security_context_constraint(self):
+ ''' security_context_constraint getter '''
+ return self._scc
+
+ @security_context_constraint.setter
+ def security_context_constraint(self, scc):
+ ''' security_context_constraint setter '''
+ self._scc = scc
+
+ def get(self):
+ '''fetch the desired kind'''
+ resource_name = self.config.config_options['name']['value']
+ if resource_name == 'cluster-reader':
+ resource_name += 's'
+
+ # oc adm policy add-... creates policy bindings with the name
+ # "[resource_name]-binding", however some bindings in the system
+ # simply use "[resource_name]". So try both.
+
+ results = self._get(self.config.kind, resource_name)
+ if results['returncode'] == 0:
+ return results
+
+ # Now try -binding naming convention
+ return self._get(self.config.kind, resource_name + "-binding")
+
+ def exists_role_binding(self):
+ ''' return whether role_binding exists '''
+ bindings = None
+ if self.config.config_options['resource_kind']['value'] == 'cluster-role':
+ bindings = self.clusterpolicybindings
+ else:
+ bindings = self.policybindings
+
+ if bindings is None:
+ return False
+
+ for binding in bindings['roleBindings']:
+ _rb = binding['roleBinding']
+ if _rb['roleRef']['name'] == self.config.config_options['name']['value'] and \
+ _rb['groupNames'] is not None and \
+ self.config.config_options['group']['value'] in _rb['groupNames']:
+ self.role_binding = binding
+ return True
+
+ return False
+
+ def exists_scc(self):
+ ''' return whether scc exists '''
+ results = self.get()
+ if results['returncode'] == 0:
+ self.security_context_constraint = SecurityContextConstraints(results['results'][0])
+
+ if self.security_context_constraint.find_group(self.config.config_options['group']['value']) != None:
+ return True
+
+ return False
+
+ return results
+
+ def exists(self):
+ '''does the object exist?'''
+ if self.config.config_options['resource_kind']['value'] == 'cluster-role':
+ return self.exists_role_binding()
+
+ elif self.config.config_options['resource_kind']['value'] == 'role':
+ return self.exists_role_binding()
+
+ elif self.config.config_options['resource_kind']['value'] == 'scc':
+ return self.exists_scc()
+
+ return False
+
+ def perform(self):
+ '''perform action on resource'''
+ cmd = ['policy',
+ self.config.config_options['action']['value'],
+ self.config.config_options['name']['value'],
+ self.config.config_options['group']['value']]
+
+ return self.openshift_cmd(cmd, oadm=True)
+
+ @staticmethod
+ def run_ansible(params, check_mode):
+ '''run the idempotent ansible code'''
+
+ state = params['state']
+
+ action = None
+ if state == 'present':
+ action = 'add-' + params['resource_kind'] + '-to-group'
+ else:
+ action = 'remove-' + params['resource_kind'] + '-from-group'
+
+ nconfig = PolicyGroupConfig(params['namespace'],
+ params['kubeconfig'],
+ {'action': {'value': action, 'include': False},
+ 'group': {'value': params['group'], 'include': False},
+ 'resource_kind': {'value': params['resource_kind'], 'include': False},
+ 'name': {'value': params['resource_name'], 'include': False},
+ })
+
+ policygroup = PolicyGroup(nconfig, params['debug'])
+
+ # Run the oc adm policy group related command
+
+ ########
+ # Delete
+ ########
+ if state == 'absent':
+ if not policygroup.exists():
+ return {'changed': False, 'state': 'absent'}
+
+ if check_mode:
+ return {'changed': False, 'msg': 'CHECK_MODE: would have performed a delete.'}
+
+ api_rval = policygroup.perform()
+
+ if api_rval['returncode'] != 0:
+ return {'msg': api_rval}
+
+ return {'changed': True, 'results' : api_rval, state:'absent'}
+
+ if state == 'present':
+ ########
+ # Create
+ ########
+ results = policygroup.exists()
+ if isinstance(results, dict) and 'returncode' in results and results['returncode'] != 0:
+ return {'msg': results}
+
+ if not results:
+
+ if check_mode:
+ return {'changed': False, 'msg': 'CHECK_MODE: would have performed a create.'}
+
+ api_rval = policygroup.perform()
+
+ if api_rval['returncode'] != 0:
+ return {'msg': api_rval}
+
+ return {'changed': True, 'results': api_rval, state: 'present'}
+
+ return {'changed': False, state: 'present'}
+
+ return {'failed': True, 'changed': False, 'results': 'Unknown state passed. %s' % state, state: 'unknown'}
diff --git a/roles/lib_openshift/src/class/oc_adm_policy_user.py b/roles/lib_openshift/src/class/oc_adm_policy_user.py
new file mode 100644
index 000000000..88fcc1ddc
--- /dev/null
+++ b/roles/lib_openshift/src/class/oc_adm_policy_user.py
@@ -0,0 +1,217 @@
+# pylint: skip-file
+# flake8: noqa
+
+
+class PolicyUserException(Exception):
+ ''' PolicyUser exception'''
+ pass
+
+
+class PolicyUserConfig(OpenShiftCLIConfig):
+ ''' PolicyUserConfig is a DTO for user related policy. '''
+ def __init__(self, namespace, kubeconfig, policy_options):
+ super(PolicyUserConfig, self).__init__(policy_options['name']['value'],
+ namespace, kubeconfig, policy_options)
+ self.kind = self.get_kind()
+ self.namespace = namespace
+
+ def get_kind(self):
+ ''' return the kind we are working with '''
+ if self.config_options['resource_kind']['value'] == 'role':
+ return 'rolebinding'
+ elif self.config_options['resource_kind']['value'] == 'cluster-role':
+ return 'clusterrolebinding'
+ elif self.config_options['resource_kind']['value'] == 'scc':
+ return 'scc'
+
+ return None
+
+
+# pylint: disable=too-many-return-statements
+class PolicyUser(OpenShiftCLI):
+ ''' Class to handle attaching policies to users '''
+
+ def __init__(self,
+ policy_config,
+ verbose=False):
+ ''' Constructor for PolicyUser '''
+ super(PolicyUser, self).__init__(policy_config.namespace, policy_config.kubeconfig, verbose)
+ self.config = policy_config
+ self.verbose = verbose
+ self._rolebinding = None
+ self._scc = None
+ self._cluster_policy_bindings = None
+ self._policy_bindings = None
+
+ @property
+ def policybindings(self):
+ if self._policy_bindings is None:
+ results = self._get('clusterpolicybindings', None)
+ if results['returncode'] != 0:
+ raise OpenShiftCLIError('Could not retrieve policybindings')
+ self._policy_bindings = results['results'][0]['items'][0]
+
+ return self._policy_bindings
+
+ @property
+ def clusterpolicybindings(self):
+ if self._cluster_policy_bindings is None:
+ results = self._get('clusterpolicybindings', None)
+ if results['returncode'] != 0:
+ raise OpenShiftCLIError('Could not retrieve clusterpolicybindings')
+ self._cluster_policy_bindings = results['results'][0]['items'][0]
+
+ return self._cluster_policy_bindings
+
+ @property
+ def role_binding(self):
+ ''' role_binding property '''
+ return self._rolebinding
+
+ @role_binding.setter
+ def role_binding(self, binding):
+ ''' setter for role_binding property '''
+ self._rolebinding = binding
+
+ @property
+ def security_context_constraint(self):
+ ''' security_context_constraint property '''
+ return self._scc
+
+ @security_context_constraint.setter
+ def security_context_constraint(self, scc):
+ ''' setter for security_context_constraint property '''
+ self._scc = scc
+
+ def get(self):
+ '''fetch the desired kind
+
+ This is only used for scc objects.
+ The {cluster}rolebindings happen in exists.
+ '''
+ resource_name = self.config.config_options['name']['value']
+ if resource_name == 'cluster-reader':
+ resource_name += 's'
+
+ return self._get(self.config.kind, resource_name)
+
+ def exists_role_binding(self):
+ ''' return whether role_binding exists '''
+ bindings = None
+ if self.config.config_options['resource_kind']['value'] == 'cluster-role':
+ bindings = self.clusterpolicybindings
+ else:
+ bindings = self.policybindings
+
+ if bindings is None:
+ return False
+
+ for binding in bindings['roleBindings']:
+ _rb = binding['roleBinding']
+ if _rb['roleRef']['name'] == self.config.config_options['name']['value'] and \
+ _rb['userNames'] is not None and \
+ self.config.config_options['user']['value'] in _rb['userNames']:
+ self.role_binding = binding
+ return True
+
+ return False
+
+ def exists_scc(self):
+ ''' return whether scc exists '''
+ results = self.get()
+ if results['returncode'] == 0:
+ self.security_context_constraint = SecurityContextConstraints(results['results'][0])
+
+ if self.security_context_constraint.find_user(self.config.config_options['user']['value']) != None:
+ return True
+
+ return False
+
+ return results
+
+ def exists(self):
+ '''does the object exist?'''
+ if self.config.config_options['resource_kind']['value'] == 'cluster-role':
+ return self.exists_role_binding()
+
+ elif self.config.config_options['resource_kind']['value'] == 'role':
+ return self.exists_role_binding()
+
+ elif self.config.config_options['resource_kind']['value'] == 'scc':
+ return self.exists_scc()
+
+ return False
+
+ def perform(self):
+ '''perform action on resource'''
+ cmd = ['policy',
+ self.config.config_options['action']['value'],
+ self.config.config_options['name']['value'],
+ self.config.config_options['user']['value']]
+
+ return self.openshift_cmd(cmd, oadm=True)
+
+ @staticmethod
+ def run_ansible(params, check_mode):
+ '''run the idempotent ansible code'''
+
+ state = params['state']
+
+ action = None
+ if state == 'present':
+ action = 'add-' + params['resource_kind'] + '-to-user'
+ else:
+ action = 'remove-' + params['resource_kind'] + '-from-user'
+
+ nconfig = PolicyUserConfig(params['namespace'],
+ params['kubeconfig'],
+ {'action': {'value': action, 'include': False},
+ 'user': {'value': params['user'], 'include': False},
+ 'resource_kind': {'value': params['resource_kind'], 'include': False},
+ 'name': {'value': params['resource_name'], 'include': False},
+ })
+
+ policyuser = PolicyUser(nconfig, params['debug'])
+
+ # Run the oc adm policy user related command
+
+ ########
+ # Delete
+ ########
+ if state == 'absent':
+ if not policyuser.exists():
+ return {'changed': False, 'state': 'absent'}
+
+ if check_mode:
+ return {'changed': False, 'msg': 'CHECK_MODE: would have performed a delete.'}
+
+ api_rval = policyuser.perform()
+
+ if api_rval['returncode'] != 0:
+ return {'msg': api_rval}
+
+ return {'changed': True, 'results' : api_rval, state:'absent'}
+
+ if state == 'present':
+ ########
+ # Create
+ ########
+ results = policyuser.exists()
+ if isinstance(results, dict) and 'returncode' in results and results['returncode'] != 0:
+ return {'msg': results}
+
+ if not results:
+
+ if check_mode:
+ return {'changed': False, 'msg': 'CHECK_MODE: would have performed a create.'}
+
+ api_rval = policyuser.perform()
+
+ if api_rval['returncode'] != 0:
+ return {'msg': api_rval}
+
+ return {'changed': True, 'results': api_rval, state: 'present'}
+
+ return {'changed': False, state: 'present'}
+
+ return {'failed': True, 'changed': False, 'results': 'Unknown state passed. %s' % state, state: 'unknown'}
diff --git a/roles/lib_openshift/src/class/oc_adm_registry.py b/roles/lib_openshift/src/class/oc_adm_registry.py
index eb78667ca..25519c9c9 100644
--- a/roles/lib_openshift/src/class/oc_adm_registry.py
+++ b/roles/lib_openshift/src/class/oc_adm_registry.py
@@ -87,8 +87,8 @@ class Registry(OpenShiftCLI):
''' prepared_registry property '''
if not self.__prepared_registry:
results = self.prepare_registry()
- if not results:
- raise RegistryException('Could not perform registry preparation.')
+ if not results or ('returncode' in results and results['returncode'] != 0):
+ raise RegistryException('Could not perform registry preparation. {}'.format(results))
self.__prepared_registry = results
return self.__prepared_registry
@@ -109,7 +109,7 @@ class Registry(OpenShiftCLI):
if result['returncode'] == 0 and part['kind'] == 'dc':
self.deploymentconfig = DeploymentConfig(result['results'][0])
elif result['returncode'] == 0 and part['kind'] == 'svc':
- self.service = Yedit(content=result['results'][0])
+ self.service = Service(result['results'][0])
if result['returncode'] != 0:
rval = result['returncode']
@@ -119,8 +119,7 @@ class Registry(OpenShiftCLI):
def exists(self):
'''does the object exist?'''
- self.get()
- if self.deploymentconfig or self.service:
+ if self.deploymentconfig and self.service:
return True
return False
@@ -146,7 +145,7 @@ class Registry(OpenShiftCLI):
''' prepare a registry for instantiation '''
options = self.config.to_option_list()
- cmd = ['registry', '-n', self.config.namespace]
+ cmd = ['registry']
cmd.extend(options)
cmd.extend(['--dry-run=True', '-o', 'json'])
@@ -154,8 +153,8 @@ class Registry(OpenShiftCLI):
# probably need to parse this
# pylint thinks results is a string
# pylint: disable=no-member
- if results['returncode'] != 0 and 'items' in results['results']:
- return results
+ if results['returncode'] != 0 and 'items' not in results['results']:
+ raise RegistryException('Could not perform registry preparation. {}'.format(results))
service = None
deploymentconfig = None
@@ -179,6 +178,10 @@ class Registry(OpenShiftCLI):
if self.portal_ip:
service.put('spec.portalIP', self.portal_ip)
+ # the dry-run doesn't apply the selector correctly
+ if self.service:
+ service.put('spec.selector', self.service.get_selector())
+
# need to create the service and the deploymentconfig
service_file = Utils.create_tmp_file_from_contents('service', service.yaml_dict)
deployment_file = Utils.create_tmp_file_from_contents('deploymentconfig', deploymentconfig.yaml_dict)
@@ -193,8 +196,20 @@ class Registry(OpenShiftCLI):
def create(self):
'''Create a registry'''
results = []
- for config_file in ['deployment_file', 'service_file']:
- results.append(self._create(self.prepared_registry[config_file]))
+ self.needs_update()
+ # if the object is none, then we need to create it
+ # if the object needs an update, then we should call replace
+ # Handle the deploymentconfig
+ if self.deploymentconfig is None:
+ results.append(self._create(self.prepared_registry['deployment_file']))
+ elif self.prepared_registry['deployment_update']:
+ results.append(self._replace(self.prepared_registry['deployment_file']))
+
+ # Handle the service
+ if self.service is None:
+ results.append(self._create(self.prepared_registry['service_file']))
+ elif self.prepared_registry['service_update']:
+ results.append(self._replace(self.prepared_registry['service_file']))
# Clean up returned results
rval = 0
@@ -206,7 +221,7 @@ class Registry(OpenShiftCLI):
return {'returncode': rval, 'results': results}
def update(self):
- '''run update for the registry. This performs a delete and then create '''
+ '''run update for the registry. This performs a replace if required'''
# Store the current service IP
if self.service:
svcip = self.service.get('spec.clusterIP')
@@ -232,6 +247,14 @@ class Registry(OpenShiftCLI):
def add_modifications(self, deploymentconfig):
''' update a deployment config with changes '''
+ # The environment variable for REGISTRY_HTTP_SECRET is autogenerated
+ # We should set the generated deploymentconfig to the in memory version
+ # the following modifications will overwrite if needed
+ if self.deploymentconfig:
+ result = self.deploymentconfig.get_env_var('REGISTRY_HTTP_SECRET')
+ if result:
+ deploymentconfig.update_env_var('REGISTRY_HTTP_SECRET', result['value'])
+
# Currently we know that our deployment of a registry requires a few extra modifications
# Modification 1
# we need specific environment variables to be set
@@ -272,14 +295,12 @@ class Registry(OpenShiftCLI):
def needs_update(self):
''' check to see if we need to update '''
- if not self.service or not self.deploymentconfig:
- return True
-
exclude_list = ['clusterIP', 'portalIP', 'type', 'protocol']
- if not Utils.check_def_equal(self.prepared_registry['service'].yaml_dict,
- self.service.yaml_dict,
- exclude_list,
- debug=self.verbose):
+ if self.service is None or \
+ not Utils.check_def_equal(self.prepared_registry['service'].yaml_dict,
+ self.service.yaml_dict,
+ exclude_list,
+ debug=self.verbose):
self.prepared_registry['service_update'] = True
exclude_list = ['dnsPolicy',
@@ -295,10 +316,11 @@ class Registry(OpenShiftCLI):
'activeDeadlineSeconds', # added in 1.5 for timeouts
]
- if not Utils.check_def_equal(self.prepared_registry['deployment'].yaml_dict,
- self.deploymentconfig.yaml_dict,
- exclude_list,
- debug=self.verbose):
+ if self.deploymentconfig is None or \
+ not Utils.check_def_equal(self.prepared_registry['deployment'].yaml_dict,
+ self.deploymentconfig.yaml_dict,
+ exclude_list,
+ debug=self.verbose):
self.prepared_registry['deployment_update'] = True
return self.prepared_registry['deployment_update'] or self.prepared_registry['service_update'] or False
diff --git a/roles/lib_openshift/src/class/oc_adm_router.py b/roles/lib_openshift/src/class/oc_adm_router.py
index 66769e73b..356d06fdf 100644
--- a/roles/lib_openshift/src/class/oc_adm_router.py
+++ b/roles/lib_openshift/src/class/oc_adm_router.py
@@ -113,6 +113,21 @@ class Router(OpenShiftCLI):
''' setter for property rolebinding '''
self._rolebinding = config
+ def get_object_by_kind(self, kind):
+ '''return the current object kind by name'''
+ if re.match("^(dc|deploymentconfig)$", kind, flags=re.IGNORECASE):
+ return self.deploymentconfig
+ elif re.match("^(svc|service)$", kind, flags=re.IGNORECASE):
+ return self.service
+ elif re.match("^(sa|serviceaccount)$", kind, flags=re.IGNORECASE):
+ return self.serviceaccount
+ elif re.match("secret", kind, flags=re.IGNORECASE):
+ return self.secret
+ elif re.match("clusterrolebinding", kind, flags=re.IGNORECASE):
+ return self.rolebinding
+
+ return None
+
def get(self):
''' return the self.router_parts '''
self.service = None
@@ -209,7 +224,7 @@ class Router(OpenShiftCLI):
options = self.config.to_option_list()
- cmd = ['router', self.config.name, '-n', self.config.namespace]
+ cmd = ['router', self.config.name]
cmd.extend(options)
cmd.extend(['--dry-run=True', '-o', 'json'])
@@ -263,13 +278,19 @@ class Router(OpenShiftCLI):
- clusterrolebinding
'''
results = []
+ self.needs_update()
import time
# pylint: disable=maybe-no-member
- for _, oc_data in self.prepared_router.items():
+ for kind, oc_data in self.prepared_router.items():
if oc_data['obj'] is not None:
time.sleep(1)
- results.append(self._create(oc_data['path']))
+ if self.get_object_by_kind(kind) is None:
+ results.append(self._create(oc_data['path']))
+
+ elif oc_data['update']:
+ results.append(self._replace(oc_data['path']))
+
rval = 0
for result in results:
@@ -297,17 +318,15 @@ class Router(OpenShiftCLI):
# pylint: disable=too-many-return-statements,too-many-branches
def needs_update(self):
''' check to see if we need to update '''
- if not self.deploymentconfig or not self.service or not self.serviceaccount or not self.secret:
- return True
-
# ServiceAccount:
# Need to determine changes from the pregenerated ones from the original
# Since these are auto generated, we can skip
skip = ['secrets', 'imagePullSecrets']
- if not Utils.check_def_equal(self.prepared_router['ServiceAccount']['obj'].yaml_dict,
- self.serviceaccount.yaml_dict,
- skip_keys=skip,
- debug=self.verbose):
+ if self.serviceaccount is None or \
+ not Utils.check_def_equal(self.prepared_router['ServiceAccount']['obj'].yaml_dict,
+ self.serviceaccount.yaml_dict,
+ skip_keys=skip,
+ debug=self.verbose):
self.prepared_router['ServiceAccount']['update'] = True
# Secret:
@@ -316,10 +335,11 @@ class Router(OpenShiftCLI):
if not self.secret:
self.prepared_router['Secret']['update'] = True
- if not Utils.check_def_equal(self.prepared_router['Secret']['obj'].yaml_dict,
- self.secret.yaml_dict,
- skip_keys=skip,
- debug=self.verbose):
+ if self.secret is None or \
+ not Utils.check_def_equal(self.prepared_router['Secret']['obj'].yaml_dict,
+ self.secret.yaml_dict,
+ skip_keys=skip,
+ debug=self.verbose):
self.prepared_router['Secret']['update'] = True
# Service:
@@ -328,28 +348,30 @@ class Router(OpenShiftCLI):
port['protocol'] = 'TCP'
skip = ['portalIP', 'clusterIP', 'sessionAffinity', 'type']
- if not Utils.check_def_equal(self.prepared_router['Service']['obj'].yaml_dict,
- self.service.yaml_dict,
- skip_keys=skip,
- debug=self.verbose):
+ if self.service is None or \
+ not Utils.check_def_equal(self.prepared_router['Service']['obj'].yaml_dict,
+ self.service.yaml_dict,
+ skip_keys=skip,
+ debug=self.verbose):
self.prepared_router['Service']['update'] = True
# DeploymentConfig:
# Router needs some exceptions.
# We do not want to check the autogenerated password for stats admin
- if not self.config.config_options['stats_password']['value']:
- for idx, env_var in enumerate(self.prepared_router['DeploymentConfig']['obj'].get(\
- 'spec.template.spec.containers[0].env') or []):
- if env_var['name'] == 'STATS_PASSWORD':
- env_var['value'] = \
- self.deploymentconfig.get('spec.template.spec.containers[0].env[%s].value' % idx)
- break
-
- # dry-run doesn't add the protocol to the ports section. We will manually do that.
- for idx, port in enumerate(self.prepared_router['DeploymentConfig']['obj'].get(\
- 'spec.template.spec.containers[0].ports') or []):
- if not 'protocol' in port:
- port['protocol'] = 'TCP'
+ if self.deploymentconfig is not None:
+ if not self.config.config_options['stats_password']['value']:
+ for idx, env_var in enumerate(self.prepared_router['DeploymentConfig']['obj'].get(\
+ 'spec.template.spec.containers[0].env') or []):
+ if env_var['name'] == 'STATS_PASSWORD':
+ env_var['value'] = \
+ self.deploymentconfig.get('spec.template.spec.containers[0].env[%s].value' % idx)
+ break
+
+ # dry-run doesn't add the protocol to the ports section. We will manually do that.
+ for idx, port in enumerate(self.prepared_router['DeploymentConfig']['obj'].get(\
+ 'spec.template.spec.containers[0].ports') or []):
+ if not 'protocol' in port:
+ port['protocol'] = 'TCP'
# These are different when generating
skip = ['dnsPolicy',
@@ -360,10 +382,11 @@ class Router(OpenShiftCLI):
'defaultMode',
]
- if not Utils.check_def_equal(self.prepared_router['DeploymentConfig']['obj'].yaml_dict,
- self.deploymentconfig.yaml_dict,
- skip_keys=skip,
- debug=self.verbose):
+ if self.deploymentconfig is None or \
+ not Utils.check_def_equal(self.prepared_router['DeploymentConfig']['obj'].yaml_dict,
+ self.deploymentconfig.yaml_dict,
+ skip_keys=skip,
+ debug=self.verbose):
self.prepared_router['DeploymentConfig']['update'] = True
# Check if any of the parts need updating, if so, return True
diff --git a/roles/lib_openshift/src/class/oc_objectvalidator.py b/roles/lib_openshift/src/class/oc_objectvalidator.py
new file mode 100644
index 000000000..43f6cac67
--- /dev/null
+++ b/roles/lib_openshift/src/class/oc_objectvalidator.py
@@ -0,0 +1,86 @@
+# pylint: skip-file
+# flake8: noqa
+
+# pylint: disable=too-many-instance-attributes
+class OCObjectValidator(OpenShiftCLI):
+ ''' Class to wrap the oc command line tools '''
+
+ def __init__(self, kubeconfig):
+ ''' Constructor for OCObjectValidator '''
+ # namespace has no meaning for object validation, hardcode to 'default'
+ super(OCObjectValidator, self).__init__('default', kubeconfig)
+
+ def get_invalid(self, kind, invalid_filter):
+ ''' return invalid object information '''
+
+ rval = self._get(kind)
+ if rval['returncode'] != 0:
+ return False, rval, []
+
+ return True, rval, list(filter(invalid_filter, rval['results'][0]['items'])) # wrap filter with list for py3
+
+ # pylint: disable=too-many-return-statements
+ @staticmethod
+ def run_ansible(params):
+ ''' run the idempotent ansible code
+
+ params comes from the ansible portion of this module
+ '''
+
+ objectvalidator = OCObjectValidator(params['kubeconfig'])
+ all_invalid = {}
+ failed = False
+
+ def _is_invalid_namespace(namespace):
+ # check if it uses a reserved name
+ name = namespace['metadata']['name']
+ if not any((name == 'kube',
+ name == 'openshift',
+ name.startswith('kube-'),
+ name.startswith('openshift-'),)):
+ return False
+
+ # determine if the namespace was created by a user
+ if 'annotations' not in namespace['metadata']:
+ return False
+ return 'openshift.io/requester' in namespace['metadata']['annotations']
+
+ checks = (
+ (
+ 'hostsubnet',
+ lambda x: x['metadata']['name'] != x['host'],
+ u'hostsubnets where metadata.name != host',
+ ),
+ (
+ 'netnamespace',
+ lambda x: x['metadata']['name'] != x['netname'],
+ u'netnamespaces where metadata.name != netname',
+ ),
+ (
+ 'namespace',
+ _is_invalid_namespace,
+ u'namespaces that use reserved names and were not created by infrastructure components',
+ ),
+ )
+
+ for resource, invalid_filter, invalid_msg in checks:
+ success, rval, invalid = objectvalidator.get_invalid(resource, invalid_filter)
+ if not success:
+ return {'failed': True, 'msg': 'Failed to GET {}.'.format(resource), 'state': 'list', 'results': rval}
+ if invalid:
+ failed = True
+ all_invalid[invalid_msg] = invalid
+
+ if failed:
+ return {
+ 'failed': True,
+ 'msg': (
+ "All objects are not valid. If you are a supported customer please contact "
+ "Red Hat Support providing the complete output above. If you are not a customer "
+ "please contact users@lists.openshift.redhat.com for assistance."
+ ),
+ 'state': 'list',
+ 'results': all_invalid
+ }
+
+ return {'msg': 'All objects are valid.'}
diff --git a/roles/lib_openshift/src/class/oc_project.py b/roles/lib_openshift/src/class/oc_project.py
new file mode 100644
index 000000000..5f02957b7
--- /dev/null
+++ b/roles/lib_openshift/src/class/oc_project.py
@@ -0,0 +1,189 @@
+# pylint: skip-file
+# flake8: noqa
+
+
+# pylint: disable=too-many-instance-attributes
+class OCProject(OpenShiftCLI):
+ ''' Project Class to manage project/namespace objects'''
+ kind = 'namespace'
+
+ def __init__(self,
+ config,
+ verbose=False):
+ ''' Constructor for OCProject '''
+ super(OCProject, self).__init__(None, config.kubeconfig)
+ self.config = config
+ self._project = None
+
+ @property
+ def project(self):
+ ''' property for project'''
+ if not self._project:
+ self.get()
+ return self._project
+
+ @project.setter
+ def project(self, data):
+ ''' setter function for project propeorty'''
+ self._project = data
+
+ def exists(self):
+ ''' return whether a project exists '''
+ if self.project:
+ return True
+
+ return False
+
+ def get(self):
+ '''return project '''
+ result = self._get(self.kind, self.config.name)
+
+ if result['returncode'] == 0:
+ self.project = Project(content=result['results'][0])
+ result['results'] = self.project.yaml_dict
+
+ elif 'namespaces "%s" not found' % self.config.name in result['stderr']:
+ result = {'results': [], 'returncode': 0}
+
+ return result
+
+ def delete(self):
+ '''delete the object'''
+ return self._delete(self.kind, self.config.name)
+
+ def create(self):
+ '''create a project '''
+ cmd = ['new-project', self.config.name]
+ cmd.extend(self.config.to_option_list())
+
+ return self.openshift_cmd(cmd, oadm=True)
+
+ def update(self):
+ '''update a project '''
+
+ if self.config.config_options['display_name']['value'] is not None:
+ self.project.update_annotation('display-name', self.config.config_options['display_name']['value'])
+
+ if self.config.config_options['description']['value'] is not None:
+ self.project.update_annotation('description', self.config.config_options['description']['value'])
+
+ # work around for immutable project field
+ if self.config.config_options['node_selector']['value'] is not None:
+ self.project.update_annotation('node-selector', self.config.config_options['node_selector']['value'])
+
+ return self._replace_content(self.kind, self.config.name, self.project.yaml_dict)
+
+ def needs_update(self):
+ ''' verify an update is needed '''
+ if self.config.config_options['display_name']['value'] is not None:
+ result = self.project.find_annotation("display-name")
+ if result != self.config.config_options['display_name']['value']:
+ return True
+
+ if self.config.config_options['description']['value'] is not None:
+ result = self.project.find_annotation("description")
+ if result != self.config.config_options['description']['value']:
+ return True
+
+ if self.config.config_options['node_selector']['value'] is not None:
+ result = self.project.find_annotation("node-selector")
+ if result != self.config.config_options['node_selector']['value']:
+ return True
+
+ return False
+
+ # pylint: disable=too-many-return-statements,too-many-branches
+ @staticmethod
+ def run_ansible(params, check_mode):
+ '''run the idempotent ansible code'''
+
+ _ns = None
+ if params['node_selector'] is not None:
+ _ns = ','.join(params['node_selector'])
+
+ pconfig = ProjectConfig(params['name'],
+ 'None',
+ params['kubeconfig'],
+ {'admin': {'value': params['admin'], 'include': True},
+ 'admin_role': {'value': params['admin_role'], 'include': True},
+ 'description': {'value': params['description'], 'include': True},
+ 'display_name': {'value': params['display_name'], 'include': True},
+ 'node_selector': {'value': _ns, 'include': True},
+ })
+
+ oadm_project = OCProject(pconfig, verbose=params['debug'])
+
+ state = params['state']
+
+ api_rval = oadm_project.get()
+
+ #####
+ # Get
+ #####
+ if state == 'list':
+ return {'changed': False, 'results': api_rval['results'], 'state': state}
+
+ ########
+ # Delete
+ ########
+ if state == 'absent':
+ if oadm_project.exists():
+
+ if check_mode:
+ return {'changed': True, 'msg': 'CHECK_MODE: Would have performed a delete.'}
+
+ api_rval = oadm_project.delete()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ return {'changed': True, 'results': api_rval, 'state': state}
+
+ return {'changed': False, 'state': state}
+
+ if state == 'present':
+ ########
+ # Create
+ ########
+ if not oadm_project.exists():
+
+ if check_mode:
+ return {'changed': True, 'msg': 'CHECK_MODE: Would have performed a create.'}
+
+ # Create it here
+ api_rval = oadm_project.create()
+
+ # return the created object
+ api_rval = oadm_project.get()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ return {'changed': True, 'results': api_rval, 'state': state}
+
+ ########
+ # Update
+ ########
+ if oadm_project.needs_update():
+
+ if check_mode:
+ return {'changed': True, 'msg': 'CHECK_MODE: Would have performed an update.'}
+
+ api_rval = oadm_project.update()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ # return the created object
+ api_rval = oadm_project.get()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ return {'changed': True, 'results': api_rval, 'state': state}
+
+ return {'changed': False, 'results': api_rval, 'state': state}
+
+ return {'failed': True,
+ 'changed': False,
+ 'msg': 'Unknown state passed. [%s]' % state}
diff --git a/roles/lib_openshift/src/class/oc_route.py b/roles/lib_openshift/src/class/oc_route.py
index 448457292..3935525f1 100644
--- a/roles/lib_openshift/src/class/oc_route.py
+++ b/roles/lib_openshift/src/class/oc_route.py
@@ -55,8 +55,10 @@ class OCRoute(OpenShiftCLI):
def update(self):
'''update the object'''
- # need to update the tls information and the service name
- return self._replace_content(self.kind, self.config.name, self.config.data)
+ return self._replace_content(self.kind,
+ self.config.name,
+ self.config.data,
+ force=(self.config.host != self.route.get_host()))
def needs_update(self):
''' verify an update is needed '''
diff --git a/roles/lib_openshift/src/class/oc_service.py b/roles/lib_openshift/src/class/oc_service.py
index d4cc83a59..20cf23df5 100644
--- a/roles/lib_openshift/src/class/oc_service.py
+++ b/roles/lib_openshift/src/class/oc_service.py
@@ -22,7 +22,7 @@ class OCService(OpenShiftCLI):
kubeconfig='/etc/origin/master/admin.kubeconfig',
verbose=False):
''' Constructor for OCVolume '''
- super(OCService, self).__init__(namespace, kubeconfig)
+ super(OCService, self).__init__(namespace, kubeconfig, verbose)
self.namespace = namespace
self.config = ServiceConfig(sname, namespace, ports, selector, labels,
cluster_ip, portal_ip, session_affinity, service_type)
@@ -93,7 +93,9 @@ class OCService(OpenShiftCLI):
params['portalip'],
params['ports'],
params['session_affinity'],
- params['service_type'])
+ params['service_type'],
+ params['kubeconfig'],
+ params['debug'])
state = params['state']
diff --git a/roles/lib_openshift/src/doc/atomic_container b/roles/lib_openshift/src/doc/atomic_container
new file mode 100644
index 000000000..53fc40f36
--- /dev/null
+++ b/roles/lib_openshift/src/doc/atomic_container
@@ -0,0 +1,36 @@
+# flake8: noqa
+# pylint: skip-file
+
+DOCUMENTATION = '''
+---
+module: oc_atomic_container
+short_description: Manage the container images on the atomic host platform
+description:
+ - Manage the container images on the atomic host platform
+ - Allows to execute the commands on the container images
+requirements:
+ - atomic
+ - "python >= 2.6"
+options:
+ name:
+ description:
+ - Name of the container
+ required: True
+ default: null
+ image:
+ description:
+ - The image to use to install the container
+ required: True
+ default: null
+ state:
+ description:
+ - State of the container
+ required: True
+ choices: ["latest", "absent", "latest", "rollback"]
+ default: "latest"
+ values:
+ description:
+ - Values for the installation of the container
+ required: False
+ default: None
+'''
diff --git a/roles/lib_openshift/src/doc/manage_node b/roles/lib_openshift/src/doc/manage_node
index 382377f3e..b651ea4e7 100644
--- a/roles/lib_openshift/src/doc/manage_node
+++ b/roles/lib_openshift/src/doc/manage_node
@@ -3,7 +3,7 @@
DOCUMENTATION = '''
---
-module: oadm_manage_node
+module: oc_adm_manage_node
short_description: Module to manage openshift nodes
description:
- Manage openshift nodes programmatically.
@@ -75,13 +75,13 @@ extends_documentation_fragment: []
EXAMPLES = '''
- name: oadm manage-node --schedulable=true --selector=ops_node=new
- oadm_manage_node:
+ oc_adm_manage_node:
selector: ops_node=new
schedulable: True
register: schedout
- name: oadm manage-node my-k8s-node-5 --evacuate
- oadm_manage_node:
+ oc_adm_manage_node:
node: my-k8s-node-5
evacuate: True
force: True
diff --git a/roles/lib_openshift/src/doc/objectvalidator b/roles/lib_openshift/src/doc/objectvalidator
new file mode 100644
index 000000000..98861e261
--- /dev/null
+++ b/roles/lib_openshift/src/doc/objectvalidator
@@ -0,0 +1,27 @@
+# flake8: noqa
+# pylint: skip-file
+
+DOCUMENTATION = '''
+---
+module: oc_objectvalidator
+short_description: Validate OpenShift objects
+description:
+ - Validate OpenShift objects
+options:
+ kubeconfig:
+ description:
+ - The path for the kubeconfig file to use for authentication
+ required: false
+ default: /etc/origin/master/admin.kubeconfig
+ aliases: []
+author:
+- "Mo Khan <monis@redhat.com>"
+extends_documentation_fragment: []
+'''
+
+EXAMPLES = '''
+oc_objectvalidator:
+- name: run oc_objectvalidator
+ oc_objectvalidator:
+ register: oc_objectvalidator
+'''
diff --git a/roles/lib_openshift/src/doc/policy_group b/roles/lib_openshift/src/doc/policy_group
new file mode 100644
index 000000000..343413269
--- /dev/null
+++ b/roles/lib_openshift/src/doc/policy_group
@@ -0,0 +1,74 @@
+# flake8: noqa
+# pylint: skip-file
+
+DOCUMENTATION = '''
+---
+module: oc_adm_policy_group
+short_description: Module to manage openshift policy for groups
+description:
+ - Manage openshift policy for groups.
+options:
+ kubeconfig:
+ description:
+ - The path for the kubeconfig file to use for authentication
+ required: false
+ default: /etc/origin/master/admin.kubeconfig
+ aliases: []
+ namespace:
+ description:
+ - The namespace scope
+ required: false
+ default: None
+ aliases: []
+ debug:
+ description:
+ - Turn on debug output.
+ required: false
+ default: False
+ aliases: []
+ group:
+ description:
+ - The name of the group
+ required: true
+ default: None
+ aliases: []
+ resource_kind:
+ description:
+ - The kind of policy to affect
+ required: true
+ default: None
+ choices: ["role", "cluster-role", "scc"]
+ aliases: []
+ resource_name:
+ description:
+ - The name of the policy
+ required: true
+ default: None
+ aliases: []
+ state:
+ description:
+ - Desired state of the policy
+ required: true
+ default: present
+ choices: ["present", "absent"]
+ aliases: []
+author:
+- "Kenny Woodson <kwoodson@redhat.com>"
+extends_documentation_fragment: []
+'''
+
+EXAMPLES = '''
+- name: oc adm policy remove-scc-from-group an-scc agroup
+ oc_adm_policy_group:
+ group: agroup
+ resource_kind: scc
+ resource_name: an-scc
+ state: absent
+
+- name: oc adm policy add-cluster-role-to-group system:build-strategy-docker agroup
+ oc_adm_policy_group:
+ group: agroup
+ resource_kind: cluster-role
+ resource_name: system:build-strategy-docker
+ state: present
+'''
diff --git a/roles/lib_openshift/src/doc/policy_user b/roles/lib_openshift/src/doc/policy_user
new file mode 100644
index 000000000..351c9af65
--- /dev/null
+++ b/roles/lib_openshift/src/doc/policy_user
@@ -0,0 +1,74 @@
+# flake8: noqa
+# pylint: skip-file
+
+DOCUMENTATION = '''
+---
+module: oc_adm_policy_user
+short_description: Module to manage openshift policy for users
+description:
+ - Manage openshift policy for users.
+options:
+ kubeconfig:
+ description:
+ - The path for the kubeconfig file to use for authentication
+ required: false
+ default: /etc/origin/master/admin.kubeconfig
+ aliases: []
+ namespace:
+ description:
+ - The namespace scope
+ required: false
+ default: None
+ aliases: []
+ debug:
+ description:
+ - Turn on debug output.
+ required: false
+ default: False
+ aliases: []
+ user:
+ description:
+ - The name of the user
+ required: true
+ default: None
+ aliases: []
+ resource_kind:
+ description:
+ - The kind of policy to affect
+ required: true
+ default: None
+ choices: ["role", "cluster-role", "scc"]
+ aliases: []
+ resource_name:
+ description:
+ - The name of the policy
+ required: true
+ default: None
+ aliases: []
+ state:
+ description:
+ - Desired state of the policy
+ required: true
+ default: present
+ choices: ["present", "absent"]
+ aliases: []
+author:
+- "Kenny Woodson <kwoodson@redhat.com>"
+extends_documentation_fragment: []
+'''
+
+EXAMPLES = '''
+- name: oc adm policy remove-scc-from-user an-scc ausername
+ oc_adm_policy_user:
+ user: ausername
+ resource_kind: scc
+ resource_name: an-scc
+ state: absent
+
+- name: oc adm policy add-cluster-role-to-user system:build-strategy-docker ausername
+ oc_adm_policy_user:
+ user: ausername
+ resource_kind: cluster-role
+ resource_name: system:build-strategy-docker
+ state: present
+'''
diff --git a/roles/lib_openshift/src/doc/project b/roles/lib_openshift/src/doc/project
new file mode 100644
index 000000000..92efe4320
--- /dev/null
+++ b/roles/lib_openshift/src/doc/project
@@ -0,0 +1,81 @@
+# flake8: noqa
+# pylint: skip-file
+
+DOCUMENTATION = '''
+---
+module: oc_project
+short_description: Module to manage openshift projects
+description:
+ - Manage openshift projects programmatically.
+options:
+ state:
+ description:
+ - If present, the project will be created if it doesn't exist or update if different. If absent, the project will be removed if present. If list, information about the project will be gathered and returned as part of the Ansible call results.
+ required: false
+ default: present
+ choices: ["present", "absent", "list"]
+ aliases: []
+ kubeconfig:
+ description:
+ - The path for the kubeconfig file to use for authentication
+ required: false
+ default: /etc/origin/master/admin.kubeconfig
+ aliases: []
+ debug:
+ description:
+ - Turn on debug output.
+ required: false
+ default: False
+ aliases: []
+ name:
+ description:
+ - Name of the object that is being queried.
+ required: false
+ default: None
+ aliases: []
+ display_name:
+ description:
+ - The display name attribute for a project
+ required: false
+ default: None
+ aliases: []
+ description:
+ description:
+ - The description attribute for a project
+ required: false
+ default: None
+ aliases: []
+ admin:
+ description:
+ - The project admin username
+ required: false
+ default: false
+ aliases: []
+ admin_role:
+ description:
+ - The project admin username
+ required: false
+ default: 'admin'
+ aliases: []
+ node_selector:
+ description:
+ - The node selector for this project.
+ - This allows certain pods in this project to run on certain nodes.
+ required: false
+ default: None
+ aliases: []
+author:
+- "Kenny Woodson <kwoodson@redhat.com>"
+extends_documentation_fragment: []
+'''
+
+EXAMPLES = '''
+- name: create secret
+ oc_project:
+ state: present
+ name: openshift-ops
+ display_name: operations team project
+ node_selector:
+ - top=secret
+ - noncustomer=True
+'''
diff --git a/roles/lib_openshift/src/lib/base.py b/roles/lib_openshift/src/lib/base.py
index 2e822d8ef..132c586c9 100644
--- a/roles/lib_openshift/src/lib/base.py
+++ b/roles/lib_openshift/src/lib/base.py
@@ -255,13 +255,13 @@ class OpenShiftCLI(object):
if oadm:
cmds.append('adm')
+ cmds.extend(cmd)
+
if self.all_namespaces:
cmds.extend(['--all-namespaces'])
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- cmds.extend(cmd)
-
rval = {}
results = ''
err = None
@@ -283,9 +283,9 @@ class OpenShiftCLI(object):
if output_type == 'json':
try:
rval['results'] = json.loads(stdout)
- except ValueError as err:
- if "No JSON object could be decoded" in err.args:
- err = err.args
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ err = verr.args
elif output_type == 'raw':
rval['results'] = stdout
@@ -523,8 +523,8 @@ class Utils(object):
elif value != user_def[key]:
if debug:
print('value should be identical')
- print(value)
print(user_def[key])
+ print(value)
return False
# recurse on a dictionary
@@ -544,8 +544,8 @@ class Utils(object):
if api_values != user_values:
if debug:
print("keys are not equal in dict")
- print(api_values)
print(user_values)
+ print(api_values)
return False
result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug)
@@ -591,10 +591,11 @@ class OpenShiftCLIConfig(object):
def stringify(self):
''' return the options hash as cli params in a string '''
rval = []
- for key, data in self.config_options.items():
+ for key in sorted(self.config_options.keys()):
+ data = self.config_options[key]
if data['include'] \
and (data['value'] or isinstance(data['value'], int)):
- rval.append('--%s=%s' % (key.replace('_', '-'), data['value']))
+ rval.append('--{}={}'.format(key.replace('_', '-'), data['value']))
return rval
diff --git a/roles/lib_openshift/src/lib/deploymentconfig.py b/roles/lib_openshift/src/lib/deploymentconfig.py
index e37475ef5..0549bba84 100644
--- a/roles/lib_openshift/src/lib/deploymentconfig.py
+++ b/roles/lib_openshift/src/lib/deploymentconfig.py
@@ -105,6 +105,18 @@ spec:
return False
+ def get_env_var(self, key):
+ '''return a environment variables '''
+ results = self.get(DeploymentConfig.env_path) or []
+ if not results:
+ return None
+
+ for env_var in results:
+ if env_var['name'] == key:
+ return env_var
+
+ return None
+
def get_env_vars(self):
'''return a environment variables '''
return self.get(DeploymentConfig.env_path) or []
diff --git a/roles/lib_openshift/src/lib/project.py b/roles/lib_openshift/src/lib/project.py
new file mode 100644
index 000000000..40994741c
--- /dev/null
+++ b/roles/lib_openshift/src/lib/project.py
@@ -0,0 +1,85 @@
+# pylint: skip-file
+# flake8: noqa
+
+
+# pylint: disable=too-many-instance-attributes
+class ProjectConfig(OpenShiftCLIConfig):
+ ''' project config object '''
+ def __init__(self, rname, namespace, kubeconfig, project_options):
+ super(ProjectConfig, self).__init__(rname, None, kubeconfig, project_options)
+
+
+class Project(Yedit):
+ ''' Class to wrap the oc command line tools '''
+ annotations_path = "metadata.annotations"
+ kind = 'Project'
+ annotation_prefix = 'openshift.io/'
+
+ def __init__(self, content):
+ '''Project constructor'''
+ super(Project, self).__init__(content=content)
+
+ def get_annotations(self):
+ ''' return the annotations'''
+ return self.get(Project.annotations_path) or {}
+
+ def add_annotations(self, inc_annos):
+ ''' add an annotation to the other annotations'''
+ if not isinstance(inc_annos, list):
+ inc_annos = [inc_annos]
+
+ annos = self.get_annotations()
+ if not annos:
+ self.put(Project.annotations_path, inc_annos)
+ else:
+ for anno in inc_annos:
+ for key, value in anno.items():
+ annos[key] = value
+
+ return True
+
+ def find_annotation(self, key):
+ ''' find an annotation'''
+ annotations = self.get_annotations()
+ for anno in annotations:
+ if Project.annotation_prefix + key == anno:
+ return annotations[anno]
+
+ return None
+
+ def delete_annotation(self, inc_anno_keys):
+ ''' remove an annotation from a project'''
+ if not isinstance(inc_anno_keys, list):
+ inc_anno_keys = [inc_anno_keys]
+
+ annos = self.get(Project.annotations_path) or {}
+
+ if not annos:
+ return True
+
+ removed = False
+ for inc_anno in inc_anno_keys:
+ anno = self.find_annotation(inc_anno)
+ if anno:
+ del annos[Project.annotation_prefix + anno]
+ removed = True
+
+ return removed
+
+ def update_annotation(self, key, value):
+ ''' remove an annotation for a project'''
+ annos = self.get(Project.annotations_path) or {}
+
+ if not annos:
+ return True
+
+ updated = False
+ anno = self.find_annotation(key)
+ if anno:
+ annos[Project.annotation_prefix + key] = value
+ updated = True
+
+ else:
+ self.add_annotations({Project.annotation_prefix + key: value})
+
+ return updated
diff --git a/roles/lib_openshift/src/lib/scc.py b/roles/lib_openshift/src/lib/scc.py
new file mode 100644
index 000000000..3e2aa08d7
--- /dev/null
+++ b/roles/lib_openshift/src/lib/scc.py
@@ -0,0 +1,218 @@
+# pylint: skip-file
+# flake8: noqa
+
+
+# pylint: disable=too-many-instance-attributes
+class SecurityContextConstraintsConfig(object):
+ ''' Handle scc options '''
+ # pylint: disable=too-many-arguments
+ def __init__(self,
+ sname,
+ kubeconfig,
+ options=None,
+ fs_group='MustRunAs',
+ default_add_capabilities=None,
+ groups=None,
+ priority=None,
+ required_drop_capabilities=None,
+ run_as_user='MustRunAsRange',
+ se_linux_context='MustRunAs',
+ supplemental_groups='RunAsAny',
+ users=None,
+ annotations=None):
+ ''' constructor for handling scc options '''
+ self.kubeconfig = kubeconfig
+ self.name = sname
+ self.options = options
+ self.fs_group = fs_group
+ self.default_add_capabilities = default_add_capabilities
+ self.groups = groups
+ self.priority = priority
+ self.required_drop_capabilities = required_drop_capabilities
+ self.run_as_user = run_as_user
+ self.se_linux_context = se_linux_context
+ self.supplemental_groups = supplemental_groups
+ self.users = users
+ self.annotations = annotations
+ self.data = {}
+
+ self.create_dict()
+
+ def create_dict(self):
+ ''' assign the correct properties for a scc dict '''
+ # allow options
+ if self.options:
+ for key, value in self.options.items():
+ self.data[key] = value
+ else:
+ self.data['allowHostDirVolumePlugin'] = False
+ self.data['allowHostIPC'] = False
+ self.data['allowHostNetwork'] = False
+ self.data['allowHostPID'] = False
+ self.data['allowHostPorts'] = False
+ self.data['allowPrivilegedContainer'] = False
+ self.data['allowedCapabilities'] = None
+
+ # version
+ self.data['apiVersion'] = 'v1'
+ # kind
+ self.data['kind'] = 'SecurityContextConstraints'
+ # defaultAddCapabilities
+ self.data['defaultAddCapabilities'] = self.default_add_capabilities
+ # fsGroup
+ self.data['fsGroup']['type'] = self.fs_group
+ # groups
+ self.data['groups'] = []
+ if self.groups:
+ self.data['groups'] = self.groups
+ # metadata
+ self.data['metadata'] = {}
+ self.data['metadata']['name'] = self.name
+ if self.annotations:
+ for key, value in self.annotations.items():
+ self.data['metadata'][key] = value
+ # priority
+ self.data['priority'] = self.priority
+ # requiredDropCapabilities
+ self.data['requiredDropCapabilities'] = self.required_drop_capabilities
+ # runAsUser
+ self.data['runAsUser'] = {'type': self.run_as_user}
+ # seLinuxContext
+ self.data['seLinuxContext'] = {'type': self.se_linux_context}
+ # supplementalGroups
+ self.data['supplementalGroups'] = {'type': self.supplemental_groups}
+ # users
+ self.data['users'] = []
+ if self.users:
+ self.data['users'] = self.users
+
+
+# pylint: disable=too-many-instance-attributes,too-many-public-methods,no-member
+class SecurityContextConstraints(Yedit):
+ ''' Class to wrap the oc command line tools '''
+ default_add_capabilities_path = "defaultAddCapabilities"
+ fs_group_path = "fsGroup"
+ groups_path = "groups"
+ priority_path = "priority"
+ required_drop_capabilities_path = "requiredDropCapabilities"
+ run_as_user_path = "runAsUser"
+ se_linux_context_path = "seLinuxContext"
+ supplemental_groups_path = "supplementalGroups"
+ users_path = "users"
+ kind = 'SecurityContextConstraints'
+
+ def __init__(self, content):
+ '''SecurityContextConstraints constructor'''
+ super(SecurityContextConstraints, self).__init__(content=content)
+ self._users = None
+ self._groups = None
+
+ @property
+ def users(self):
+ ''' users property getter '''
+ if self._users is None:
+ self._users = self.get_users()
+ return self._users
+
+ @property
+ def groups(self):
+ ''' groups property getter '''
+ if self._groups is None:
+ self._groups = self.get_groups()
+ return self._groups
+
+ @users.setter
+ def users(self, data):
+ ''' users property setter'''
+ self._users = data
+
+ @groups.setter
+ def groups(self, data):
+ ''' groups property setter'''
+ self._groups = data
+
+ def get_users(self):
+ '''get scc users'''
+ return self.get(SecurityContextConstraints.users_path) or []
+
+ def get_groups(self):
+ '''get scc groups'''
+ return self.get(SecurityContextConstraints.groups_path) or []
+
+ def add_user(self, inc_user):
+ ''' add a user '''
+ if self.users:
+ self.users.append(inc_user)
+ else:
+ self.put(SecurityContextConstraints.users_path, [inc_user])
+
+ return True
+
+ def add_group(self, inc_group):
+ ''' add a group '''
+ if self.groups:
+ self.groups.append(inc_group)
+ else:
+ self.put(SecurityContextConstraints.groups_path, [inc_group])
+
+ return True
+
+ def remove_user(self, inc_user):
+ ''' remove a user '''
+ try:
+ self.users.remove(inc_user)
+ except ValueError as _:
+ return False
+
+ return True
+
+ def remove_group(self, inc_group):
+ ''' remove a group '''
+ try:
+ self.groups.remove(inc_group)
+ except ValueError as _:
+ return False
+
+ return True
+
+ def update_user(self, inc_user):
+ ''' update a user '''
+ try:
+ index = self.users.index(inc_user)
+ except ValueError as _:
+ return self.add_user(inc_user)
+
+ self.users[index] = inc_user
+
+ return True
+
+ def update_group(self, inc_group):
+ ''' update a group '''
+ try:
+ index = self.groups.index(inc_group)
+ except ValueError as _:
+ return self.add_group(inc_group)
+
+ self.groups[index] = inc_group
+
+ return True
+
+ def find_user(self, inc_user):
+ ''' find a user '''
+ index = None
+ try:
+ index = self.users.index(inc_user)
+ except ValueError as _:
+ return index
+
+ return index
+
+ def find_group(self, inc_group):
+ ''' find a group '''
+ index = None
+ try:
+ index = self.groups.index(inc_group)
+ except ValueError as _:
+ return index
+
+ return index
diff --git a/roles/lib_openshift/src/lib/service.py b/roles/lib_openshift/src/lib/service.py
index ffe27da47..eef568779 100644
--- a/roles/lib_openshift/src/lib/service.py
+++ b/roles/lib_openshift/src/lib/service.py
@@ -67,6 +67,7 @@ class Service(Yedit):
port_path = "spec.ports"
portal_ip = "spec.portalIP"
cluster_ip = "spec.clusterIP"
+ selector_path = 'spec.selector'
kind = 'Service'
def __init__(self, content):
@@ -77,6 +78,10 @@ class Service(Yedit):
''' get a list of ports '''
return self.get(Service.port_path) or []
+ def get_selector(self):
+ ''' get the service selector'''
+ return self.get(Service.selector_path) or {}
+
def add_ports(self, inc_ports):
''' add a port object to the ports list '''
if not isinstance(inc_ports, list):
diff --git a/roles/lib_openshift/src/lib/volume.py b/roles/lib_openshift/src/lib/volume.py
index 84ef1f705..e0abb1d1b 100644
--- a/roles/lib_openshift/src/lib/volume.py
+++ b/roles/lib_openshift/src/lib/volume.py
@@ -17,20 +17,21 @@ class Volume(object):
''' return a properly structured volume '''
volume_mount = None
volume = {'name': volume_info['name']}
- if volume_info['type'] == 'secret':
+ volume_type = volume_info['type'].lower()
+ if volume_type == 'secret':
volume['secret'] = {}
volume[volume_info['type']] = {'secretName': volume_info['secret_name']}
volume_mount = {'mountPath': volume_info['path'],
'name': volume_info['name']}
- elif volume_info['type'] == 'emptydir':
+ elif volume_type == 'emptydir':
volume['emptyDir'] = {}
volume_mount = {'mountPath': volume_info['path'],
'name': volume_info['name']}
- elif volume_info['type'] == 'pvc':
+ elif volume_type == 'pvc' or volume_type == 'persistentvolumeclaim':
volume['persistentVolumeClaim'] = {}
volume['persistentVolumeClaim']['claimName'] = volume_info['claimName']
volume['persistentVolumeClaim']['claimSize'] = volume_info['claimSize']
- elif volume_info['type'] == 'hostpath':
+ elif volume_type == 'hostpath':
volume['hostPath'] = {}
volume['hostPath']['path'] = volume_info['path']
diff --git a/roles/lib_openshift/src/sources.yml b/roles/lib_openshift/src/sources.yml
index 35f8d71b2..44a1524b0 100644
--- a/roles/lib_openshift/src/sources.yml
+++ b/roles/lib_openshift/src/sources.yml
@@ -9,15 +9,39 @@ oc_adm_ca_server_cert.py:
- class/oc_adm_ca_server_cert.py
- ansible/oc_adm_ca_server_cert.py
-oadm_manage_node.py:
+oc_adm_manage_node.py:
- doc/generated
- doc/license
- lib/import.py
- doc/manage_node
- ../../lib_utils/src/class/yedit.py
- lib/base.py
-- class/oadm_manage_node.py
-- ansible/oadm_manage_node.py
+- class/oc_adm_manage_node.py
+- ansible/oc_adm_manage_node.py
+
+oc_adm_policy_user.py:
+- doc/generated
+- doc/license
+- lib/import.py
+- doc/policy_user
+- ../../lib_utils/src/class/yedit.py
+- lib/base.py
+- lib/rolebinding.py
+- lib/scc.py
+- class/oc_adm_policy_user.py
+- ansible/oc_adm_policy_user.py
+
+oc_adm_policy_group.py:
+- doc/generated
+- doc/license
+- lib/import.py
+- doc/policy_group
+- ../../lib_utils/src/class/yedit.py
+- lib/base.py
+- lib/rolebinding.py
+- lib/scc.py
+- class/oc_adm_policy_group.py
+- ansible/oc_adm_policy_group.py
oc_adm_registry.py:
- doc/generated
@@ -49,6 +73,12 @@ oc_adm_router.py:
- class/oc_adm_router.py
- ansible/oc_adm_router.py
+oc_atomic_container.py:
+- doc/generated
+- doc/license
+- doc/atomic_container
+- ansible/oc_atomic_container.py
+
oc_edit.py:
- doc/generated
- doc/license
@@ -100,6 +130,17 @@ oc_process.py:
- class/oc_process.py
- ansible/oc_process.py
+oc_project.py:
+- doc/generated
+- doc/license
+- lib/import.py
+- doc/project
+- ../../lib_utils/src/class/yedit.py
+- lib/base.py
+- lib/project.py
+- class/oc_project.py
+- ansible/oc_project.py
+
oc_route.py:
- doc/generated
- doc/license
@@ -176,3 +217,13 @@ oc_version.py:
- lib/base.py
- class/oc_version.py
- ansible/oc_version.py
+
+oc_objectvalidator.py:
+- doc/generated
+- doc/license
+- lib/import.py
+- doc/objectvalidator
+- ../../lib_utils/src/class/yedit.py
+- lib/base.py
+- class/oc_objectvalidator.py
+- ansible/oc_objectvalidator.py
diff --git a/roles/lib_openshift/src/test/integration/oadm_manage_node.yml b/roles/lib_openshift/src/test/integration/oc_adm_manage_node.yml
index 3ee13a409..1ed2ef11b 100755
--- a/roles/lib_openshift/src/test/integration/oadm_manage_node.yml
+++ b/roles/lib_openshift/src/test/integration/oc_adm_manage_node.yml
@@ -1,6 +1,6 @@
#!/usr/bin/ansible-playbook --module-path=../../../library/
#
-# ./oadm_manage_node.yml -e "cli_master_test=$OPENSHIFT_MASTER
+# ./oc_adm_manage_node.yml -e "cli_master_test=$OPENSHIFT_MASTER
---
- hosts: "{{ cli_master_test }}"
gather_facts: no
@@ -17,7 +17,7 @@
node_to_test: "{{ obj_out['results']['results'][0]['items'][0]['metadata']['name'] }}"
- name: list pods from a node
- oadm_manage_node:
+ oc_adm_manage_node:
list_pods: True
node:
- "{{ node_to_test }}"
@@ -29,7 +29,7 @@
msg: Pod data was not returned
- name: set node to unschedulable
- oadm_manage_node:
+ oc_adm_manage_node:
schedulable: False
node:
- "{{ node_to_test }}"
@@ -56,7 +56,7 @@
that: nodeout.results.results[0]['spec']['unschedulable']
- name: set node to schedulable
- oadm_manage_node:
+ oc_adm_manage_node:
schedulable: True
node:
- "{{ node_to_test }}"
diff --git a/roles/lib_openshift/src/test/integration/oc_project.yml b/roles/lib_openshift/src/test/integration/oc_project.yml
new file mode 100755
index 000000000..9f700c62c
--- /dev/null
+++ b/roles/lib_openshift/src/test/integration/oc_project.yml
@@ -0,0 +1,83 @@
+#!/usr/bin/ansible-playbook --module-path=../../../library/
+# ./oc_project.yml -M ../../../library -e "cli_master_test=$OPENSHIFT_MASTER
+---
+- hosts: "{{ cli_master_test }}"
+ gather_facts: no
+ user: root
+ tasks:
+ - name: create a project
+ oc_project:
+ display_name: operations project
+ name: operations
+ state: present
+ description: All things operations
+ node_selector:
+ - ops_only=true
+ register: projout
+ - debug: var=projout
+
+ - assert:
+ that:
+ - "projout.results.results['metadata']['name'] == 'operations'"
+ - projout.changed
+ msg: project create failed.
+
+ - name: create a project
+ oc_project:
+ display_name: operations project
+ name: operations
+ state: present
+ description: All things operations
+ node_selector:
+ - ops_only=true
+ register: projout
+ - debug: var=projout
+
+ - assert:
+ that:
+ - "projout.results.results['metadata']['name'] == 'operations'"
+ - projout.changed == False
+ msg: project create failed.
+
+ - name: update a project
+ oc_project:
+ display_name: operations project one
+ name: operations
+ state: present
+ description: All things operations
+ node_selector:
+ - ops_only=true
+ register: projout
+ - debug: var=projout
+
+ - assert:
+ that:
+ - "projout.results.results['metadata']['annotations']['openshift.io/display-name'] == 'operations project one'"
+ - projout.changed == True
+ msg: project create failed.
+
+ - name: update a project
+ oc_project:
+ name: operations
+ state: list
+ register: projout
+ - debug: var=projout
+
+ - assert:
+ that:
+ - "projout.results['metadata']['annotations']['openshift.io/display-name'] == 'operations project one'"
+ - projout.changed == False
+ - projout.state == 'list'
+ msg: project list failed.
+
+ - name: delete a project
+ oc_project:
+ name: operations
+ state: absent
+ register: projout
+ - debug: var=projout
+
+ - assert:
+ that:
+ - projout.changed == True
+ msg: project delete failed.
diff --git a/roles/lib_openshift/src/test/unit/test_oadm_manage_node.py b/roles/lib_openshift/src/test/unit/test_oc_adm_manage_node.py
index 761c849fb..312b1ecbb 100755
--- a/roles/lib_openshift/src/test/unit/test_oadm_manage_node.py
+++ b/roles/lib_openshift/src/test/unit/test_oc_adm_manage_node.py
@@ -1,14 +1,6 @@
-#!/usr/bin/env python2
'''
- Unit tests for oadm_manage_node
+ Unit tests for oc_adm_manage_node
'''
-# To run
-# python -m unittest version
-#
-# .
-# Ran 2 tests in 0.001s
-#
-# OK
import os
import six
@@ -24,20 +16,16 @@ import mock
# place class in our python path
module_path = os.path.join('/'.join(os.path.realpath(__file__).split('/')[:-4]), 'library') # noqa: E501
sys.path.insert(0, module_path)
-from oadm_manage_node import ManageNode, locate_oc_binary # noqa: E402
+from oc_adm_manage_node import ManageNode, locate_oc_binary # noqa: E402
class ManageNodeTest(unittest.TestCase):
'''
- Test class for oadm_manage_node
+ Test class for oc_adm_manage_node
'''
- def setUp(self):
- ''' setup method will create a file and set to known configuration '''
- pass
-
- @mock.patch('oadm_manage_node.Utils.create_tmpfile_copy')
- @mock.patch('oadm_manage_node.ManageNode.openshift_cmd')
+ @mock.patch('oc_adm_manage_node.Utils.create_tmpfile_copy')
+ @mock.patch('oc_adm_manage_node.ManageNode.openshift_cmd')
def test_list_pods(self, mock_openshift_cmd, mock_tmpfile_copy):
''' Testing a get '''
params = {'node': ['ip-172-31-49-140.ec2.internal'],
@@ -119,8 +107,8 @@ class ManageNodeTest(unittest.TestCase):
# returned 2 pods
self.assertTrue(len(results['results']['nodes']['ip-172-31-49-140.ec2.internal']) == 2)
- @mock.patch('oadm_manage_node.Utils.create_tmpfile_copy')
- @mock.patch('oadm_manage_node.ManageNode.openshift_cmd')
+ @mock.patch('oc_adm_manage_node.Utils.create_tmpfile_copy')
+ @mock.patch('oc_adm_manage_node.ManageNode.openshift_cmd')
def test_schedulable_false(self, mock_openshift_cmd, mock_tmpfile_copy):
''' Testing a get '''
params = {'node': ['ip-172-31-49-140.ec2.internal'],
@@ -287,11 +275,3 @@ class ManageNodeTest(unittest.TestCase):
mock_shutil_which.side_effect = lambda _f, path=None: oc_bin
self.assertEqual(locate_oc_binary(), oc_bin)
-
- def tearDown(self):
- '''TearDown method'''
- pass
-
-
-if __name__ == "__main__":
- unittest.main()
diff --git a/roles/lib_openshift/src/test/unit/test_oc_adm_registry.py b/roles/lib_openshift/src/test/unit/test_oc_adm_registry.py
new file mode 100755
index 000000000..bab36fddc
--- /dev/null
+++ b/roles/lib_openshift/src/test/unit/test_oc_adm_registry.py
@@ -0,0 +1,369 @@
+#!/usr/bin/env python
+'''
+ Unit tests for oc adm registry
+'''
+
+import os
+import six
+import sys
+import unittest
+import mock
+
+# Removing invalid variable names for tests so that I can
+# keep them brief
+# pylint: disable=invalid-name,no-name-in-module
+# Disable import-error b/c our libraries aren't loaded in jenkins
+# pylint: disable=import-error
+# place class in our python path
+module_path = os.path.join('/'.join(os.path.realpath(__file__).split('/')[:-4]), 'library') # noqa: E501
+sys.path.insert(0, module_path)
+from oc_adm_registry import Registry, locate_oc_binary # noqa: E402
+
+
+# pylint: disable=too-many-public-methods
+class RegistryTest(unittest.TestCase):
+ '''
+ Test class for Registry
+ '''
+ dry_run = '''{
+ "kind": "List",
+ "apiVersion": "v1",
+ "metadata": {},
+ "items": [
+ {
+ "kind": "ServiceAccount",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "registry",
+ "creationTimestamp": null
+ }
+ },
+ {
+ "kind": "ClusterRoleBinding",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "registry-registry-role",
+ "creationTimestamp": null
+ },
+ "userNames": [
+ "system:serviceaccount:default:registry"
+ ],
+ "groupNames": null,
+ "subjects": [
+ {
+ "kind": "ServiceAccount",
+ "namespace": "default",
+ "name": "registry"
+ }
+ ],
+ "roleRef": {
+ "kind": "ClusterRole",
+ "name": "system:registry"
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "docker-registry",
+ "creationTimestamp": null,
+ "labels": {
+ "docker-registry": "default"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "resources": {}
+ },
+ "triggers": [
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "test": false,
+ "selector": {
+ "docker-registry": "default"
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "docker-registry": "default"
+ }
+ },
+ "spec": {
+ "volumes": [
+ {
+ "name": "registry-storage",
+ "emptyDir": {}
+ }
+ ],
+ "containers": [
+ {
+ "name": "registry",
+ "image": "openshift3/ose-docker-registry:v3.5.0.39",
+ "ports": [
+ {
+ "containerPort": 5000
+ }
+ ],
+ "env": [
+ {
+ "name": "REGISTRY_HTTP_ADDR",
+ "value": ":5000"
+ },
+ {
+ "name": "REGISTRY_HTTP_NET",
+ "value": "tcp"
+ },
+ {
+ "name": "REGISTRY_HTTP_SECRET",
+ "value": "WQjSGeUu5KFZRTwGeIXgwIjyraNDLmdJblsFbtzZdF8="
+ },
+ {
+ "name": "REGISTRY_MIDDLEWARE_REPOSITORY_OPENSHIFT_ENFORCEQUOTA",
+ "value": "false"
+ }
+ ],
+ "resources": {
+ "requests": {
+ "cpu": "100m",
+ "memory": "256Mi"
+ }
+ },
+ "volumeMounts": [
+ {
+ "name": "registry-storage",
+ "mountPath": "/registry"
+ }
+ ],
+ "livenessProbe": {
+ "httpGet": {
+ "path": "/healthz",
+ "port": 5000
+ },
+ "initialDelaySeconds": 10,
+ "timeoutSeconds": 5
+ },
+ "readinessProbe": {
+ "httpGet": {
+ "path": "/healthz",
+ "port": 5000
+ },
+ "timeoutSeconds": 5
+ },
+ "securityContext": {
+ "privileged": false
+ }
+ }
+ ],
+ "nodeSelector": {
+ "type": "infra"
+ },
+ "serviceAccountName": "registry",
+ "serviceAccount": "registry"
+ }
+ }
+ },
+ "status": {
+ "latestVersion": 0,
+ "observedGeneration": 0,
+ "replicas": 0,
+ "updatedReplicas": 0,
+ "availableReplicas": 0,
+ "unavailableReplicas": 0
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "docker-registry",
+ "creationTimestamp": null,
+ "labels": {
+ "docker-registry": "default"
+ }
+ },
+ "spec": {
+ "ports": [
+ {
+ "name": "5000-tcp",
+ "port": 5000,
+ "targetPort": 5000
+ }
+ ],
+ "selector": {
+ "docker-registry": "default"
+ },
+ "clusterIP": "172.30.119.110",
+ "sessionAffinity": "ClientIP"
+ },
+ "status": {
+ "loadBalancer": {}
+ }
+ }
+ ]}'''
+
+ @mock.patch('oc_adm_registry.Utils._write')
+ @mock.patch('oc_adm_registry.Utils.create_tmpfile_copy')
+ @mock.patch('oc_adm_registry.Registry._run')
+ def test_state_present(self, mock_cmd, mock_tmpfile_copy, mock_write):
+ ''' Testing state present '''
+ params = {'state': 'present',
+ 'debug': False,
+ 'namespace': 'default',
+ 'name': 'docker-registry',
+ 'kubeconfig': '/etc/origin/master/admin.kubeconfig',
+ 'images': None,
+ 'latest_images': None,
+ 'labels': None,
+ 'ports': ['5000'],
+ 'replicas': 1,
+ 'selector': 'type=infra',
+ 'service_account': 'registry',
+ 'mount_host': None,
+ 'volume_mounts': None,
+ 'env_vars': {},
+ 'enforce_quota': False,
+ 'force': False,
+ 'daemonset': False,
+ 'tls_key': None,
+ 'tls_certificate': None,
+ 'edits': []}
+
+ mock_cmd.side_effect = [
+ (1, '', 'Error from server (NotFound): deploymentconfigs "docker-registry" not found'),
+ (1, '', 'Error from server (NotFound): service "docker-registry" not found'),
+ (0, RegistryTest.dry_run, ''),
+ (0, '', ''),
+ (0, '', ''),
+ ]
+
+ mock_tmpfile_copy.side_effect = [
+ '/tmp/mocked_kubeconfig',
+ '/tmp/mocked_kubeconfig',
+ ]
+
+ results = Registry.run_ansible(params, False)
+
+ self.assertTrue(results['changed'])
+ for result in results['results']['results']:
+ self.assertEqual(result['returncode'], 0)
+
+ mock_cmd.assert_has_calls([
+ mock.call(['oc', 'get', 'dc', 'docker-registry', '-o', 'json', '-n', 'default'], None),
+ mock.call(['oc', 'get', 'svc', 'docker-registry', '-o', 'json', '-n', 'default'], None),
+ mock.call(['oc', 'adm', 'registry', '--daemonset=False', '--enforce-quota=False',
+ '--ports=5000', '--replicas=1', '--selector=type=infra',
+ '--service-account=registry', '--dry-run=True', '-o', 'json', '-n', 'default'], None),
+ mock.call(['oc', 'create', '-f', mock.ANY, '-n', 'default'], None),
+ mock.call(['oc', 'create', '-f', mock.ANY, '-n', 'default'], None), ])
+
+ @unittest.skipIf(six.PY3, 'py2 test only')
+ @mock.patch('os.path.exists')
+ @mock.patch('os.environ.get')
+ def test_binary_lookup_fallback(self, mock_env_get, mock_path_exists):
+ ''' Testing binary lookup fallback '''
+
+ mock_env_get.side_effect = lambda _v, _d: ''
+
+ mock_path_exists.side_effect = lambda _: False
+
+ self.assertEqual(locate_oc_binary(), 'oc')
+
+ @unittest.skipIf(six.PY3, 'py2 test only')
+ @mock.patch('os.path.exists')
+ @mock.patch('os.environ.get')
+ def test_binary_lookup_in_path(self, mock_env_get, mock_path_exists):
+ ''' Testing binary lookup in path '''
+
+ oc_bin = '/usr/bin/oc'
+
+ mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
+
+ mock_path_exists.side_effect = lambda f: f == oc_bin
+
+ self.assertEqual(locate_oc_binary(), oc_bin)
+
+ @unittest.skipIf(six.PY3, 'py2 test only')
+ @mock.patch('os.path.exists')
+ @mock.patch('os.environ.get')
+ def test_binary_lookup_in_usr_local(self, mock_env_get, mock_path_exists):
+ ''' Testing binary lookup in /usr/local/bin '''
+
+ oc_bin = '/usr/local/bin/oc'
+
+ mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
+
+ mock_path_exists.side_effect = lambda f: f == oc_bin
+
+ self.assertEqual(locate_oc_binary(), oc_bin)
+
+ @unittest.skipIf(six.PY3, 'py2 test only')
+ @mock.patch('os.path.exists')
+ @mock.patch('os.environ.get')
+ def test_binary_lookup_in_home(self, mock_env_get, mock_path_exists):
+ ''' Testing binary lookup in ~/bin '''
+
+ oc_bin = os.path.expanduser('~/bin/oc')
+
+ mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
+
+ mock_path_exists.side_effect = lambda f: f == oc_bin
+
+ self.assertEqual(locate_oc_binary(), oc_bin)
+
+ @unittest.skipIf(six.PY2, 'py3 test only')
+ @mock.patch('shutil.which')
+ @mock.patch('os.environ.get')
+ def test_binary_lookup_fallback_py3(self, mock_env_get, mock_shutil_which):
+ ''' Testing binary lookup fallback '''
+
+ mock_env_get.side_effect = lambda _v, _d: ''
+
+ mock_shutil_which.side_effect = lambda _f, path=None: None
+
+ self.assertEqual(locate_oc_binary(), 'oc')
+
+ @unittest.skipIf(six.PY2, 'py3 test only')
+ @mock.patch('shutil.which')
+ @mock.patch('os.environ.get')
+ def test_binary_lookup_in_path_py3(self, mock_env_get, mock_shutil_which):
+ ''' Testing binary lookup in path '''
+
+ oc_bin = '/usr/bin/oc'
+
+ mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
+
+ mock_shutil_which.side_effect = lambda _f, path=None: oc_bin
+
+ self.assertEqual(locate_oc_binary(), oc_bin)
+
+ @unittest.skipIf(six.PY2, 'py3 test only')
+ @mock.patch('shutil.which')
+ @mock.patch('os.environ.get')
+ def test_binary_lookup_in_usr_local_py3(self, mock_env_get, mock_shutil_which):
+ ''' Testing binary lookup in /usr/local/bin '''
+
+ oc_bin = '/usr/local/bin/oc'
+
+ mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
+
+ mock_shutil_which.side_effect = lambda _f, path=None: oc_bin
+
+ self.assertEqual(locate_oc_binary(), oc_bin)
+
+ @unittest.skipIf(six.PY2, 'py3 test only')
+ @mock.patch('shutil.which')
+ @mock.patch('os.environ.get')
+ def test_binary_lookup_in_home_py3(self, mock_env_get, mock_shutil_which):
+ ''' Testing binary lookup in ~/bin '''
+
+ oc_bin = os.path.expanduser('~/bin/oc')
+
+ mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
+
+ mock_shutil_which.side_effect = lambda _f, path=None: oc_bin
+
+ self.assertEqual(locate_oc_binary(), oc_bin)
diff --git a/roles/lib_openshift/src/test/unit/test_oc_adm_router.py b/roles/lib_openshift/src/test/unit/test_oc_adm_router.py
new file mode 100755
index 000000000..51393dbaf
--- /dev/null
+++ b/roles/lib_openshift/src/test/unit/test_oc_adm_router.py
@@ -0,0 +1,474 @@
+#!/usr/bin/env python
+'''
+ Unit tests for oc adm router
+'''
+
+import os
+import six
+import sys
+import unittest
+import mock
+
+# Removing invalid variable names for tests so that I can
+# keep them brief
+# pylint: disable=invalid-name,no-name-in-module
+# Disable import-error b/c our libraries aren't loaded in jenkins
+# pylint: disable=import-error
+# place class in our python path
+module_path = os.path.join('/'.join(os.path.realpath(__file__).split('/')[:-4]), 'library') # noqa: E501
+sys.path.insert(0, module_path)
+from oc_adm_router import Router, locate_oc_binary # noqa: E402
+
+
+# pylint: disable=too-many-public-methods
+class RouterTest(unittest.TestCase):
+ '''
+ Test class for Router
+ '''
+ dry_run = '''{
+ "kind": "List",
+ "apiVersion": "v1",
+ "metadata": {},
+ "items": [
+ {
+ "kind": "ServiceAccount",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "router",
+ "creationTimestamp": null
+ }
+ },
+ {
+ "kind": "ClusterRoleBinding",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "router-router-role",
+ "creationTimestamp": null
+ },
+ "userNames": [
+ "system:serviceaccount:default:router"
+ ],
+ "groupNames": null,
+ "subjects": [
+ {
+ "kind": "ServiceAccount",
+ "namespace": "default",
+ "name": "router"
+ }
+ ],
+ "roleRef": {
+ "kind": "ClusterRole",
+ "name": "system:router"
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "router",
+ "creationTimestamp": null,
+ "labels": {
+ "router": "router"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Rolling",
+ "rollingParams": {
+ "maxUnavailable": "25%",
+ "maxSurge": 0
+ },
+ "resources": {}
+ },
+ "triggers": [
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 2,
+ "test": false,
+ "selector": {
+ "router": "router"
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "router": "router"
+ }
+ },
+ "spec": {
+ "volumes": [
+ {
+ "name": "server-certificate",
+ "secret": {
+ "secretName": "router-certs"
+ }
+ }
+ ],
+ "containers": [
+ {
+ "name": "router",
+ "image": "openshift3/ose-haproxy-router:v3.5.0.39",
+ "ports": [
+ {
+ "containerPort": 80
+ },
+ {
+ "containerPort": 443
+ },
+ {
+ "name": "stats",
+ "containerPort": 1936,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "DEFAULT_CERTIFICATE_DIR",
+ "value": "/etc/pki/tls/private"
+ },
+ {
+ "name": "ROUTER_EXTERNAL_HOST_HOSTNAME"
+ },
+ {
+ "name": "ROUTER_EXTERNAL_HOST_HTTPS_VSERVER"
+ },
+ {
+ "name": "ROUTER_EXTERNAL_HOST_HTTP_VSERVER"
+ },
+ {
+ "name": "ROUTER_EXTERNAL_HOST_INSECURE",
+ "value": "false"
+ },
+ {
+ "name": "ROUTER_EXTERNAL_HOST_INTERNAL_ADDRESS"
+ },
+ {
+ "name": "ROUTER_EXTERNAL_HOST_PARTITION_PATH"
+ },
+ {
+ "name": "ROUTER_EXTERNAL_HOST_PASSWORD"
+ },
+ {
+ "name": "ROUTER_EXTERNAL_HOST_PRIVKEY",
+ "value": "/etc/secret-volume/router.pem"
+ },
+ {
+ "name": "ROUTER_EXTERNAL_HOST_USERNAME"
+ },
+ {
+ "name": "ROUTER_EXTERNAL_HOST_VXLAN_GW_CIDR"
+ },
+ {
+ "name": "ROUTER_SERVICE_HTTPS_PORT",
+ "value": "443"
+ },
+ {
+ "name": "ROUTER_SERVICE_HTTP_PORT",
+ "value": "80"
+ },
+ {
+ "name": "ROUTER_SERVICE_NAME",
+ "value": "router"
+ },
+ {
+ "name": "ROUTER_SERVICE_NAMESPACE",
+ "value": "default"
+ },
+ {
+ "name": "ROUTER_SUBDOMAIN"
+ },
+ {
+ "name": "STATS_PASSWORD",
+ "value": "eSfUICQyyr"
+ },
+ {
+ "name": "STATS_PORT",
+ "value": "1936"
+ },
+ {
+ "name": "STATS_USERNAME",
+ "value": "admin"
+ }
+ ],
+ "resources": {
+ "requests": {
+ "cpu": "100m",
+ "memory": "256Mi"
+ }
+ },
+ "volumeMounts": [
+ {
+ "name": "server-certificate",
+ "readOnly": true,
+ "mountPath": "/etc/pki/tls/private"
+ }
+ ],
+ "livenessProbe": {
+ "httpGet": {
+ "path": "/healthz",
+ "port": 1936,
+ "host": "localhost"
+ },
+ "initialDelaySeconds": 10
+ },
+ "readinessProbe": {
+ "httpGet": {
+ "path": "/healthz",
+ "port": 1936,
+ "host": "localhost"
+ },
+ "initialDelaySeconds": 10
+ },
+ "imagePullPolicy": "IfNotPresent"
+ }
+ ],
+ "nodeSelector": {
+ "type": "infra"
+ },
+ "serviceAccountName": "router",
+ "serviceAccount": "router",
+ "hostNetwork": true,
+ "securityContext": {}
+ }
+ }
+ },
+ "status": {
+ "latestVersion": 0,
+ "observedGeneration": 0,
+ "replicas": 0,
+ "updatedReplicas": 0,
+ "availableReplicas": 0,
+ "unavailableReplicas": 0
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "router",
+ "creationTimestamp": null,
+ "labels": {
+ "router": "router"
+ },
+ "annotations": {
+ "service.alpha.openshift.io/serving-cert-secret-name": "router-certs"
+ }
+ },
+ "spec": {
+ "ports": [
+ {
+ "name": "80-tcp",
+ "port": 80,
+ "targetPort": 80
+ },
+ {
+ "name": "443-tcp",
+ "port": 443,
+ "targetPort": 443
+ },
+ {
+ "name": "1936-tcp",
+ "protocol": "TCP",
+ "port": 1936,
+ "targetPort": 1936
+ }
+ ],
+ "selector": {
+ "router": "router"
+ }
+ },
+ "status": {
+ "loadBalancer": {}
+ }
+ }
+ ]
+}'''
+
+ @mock.patch('oc_adm_router.Utils._write')
+ @mock.patch('oc_adm_router.Utils.create_tmpfile_copy')
+ @mock.patch('oc_adm_router.Router._run')
+ def test_state_present(self, mock_cmd, mock_tmpfile_copy, mock_write):
+ ''' Testing a create '''
+ params = {'state': 'present',
+ 'debug': False,
+ 'namespace': 'default',
+ 'name': 'router',
+ 'default_cert': None,
+ 'cert_file': None,
+ 'key_file': None,
+ 'cacert_file': None,
+ 'labels': None,
+ 'ports': ['80:80', '443:443'],
+ 'images': None,
+ 'latest_images': None,
+ 'clusterip': None,
+ 'portalip': None,
+ 'session_affinity': None,
+ 'service_type': None,
+ 'kubeconfig': '/etc/origin/master/admin.kubeconfig',
+ 'replicas': 2,
+ 'selector': 'type=infra',
+ 'service_account': 'router',
+ 'router_type': None,
+ 'host_network': None,
+ 'external_host': None,
+ 'external_host_vserver': None,
+ 'external_host_insecure': False,
+ 'external_host_partition_path': None,
+ 'external_host_username': None,
+ 'external_host_password': None,
+ 'external_host_private_key': None,
+ 'expose_metrics': False,
+ 'metrics_image': None,
+ 'stats_user': None,
+ 'stats_password': None,
+ 'stats_port': 1936,
+ 'edits': []}
+
+ mock_cmd.side_effect = [
+ (1, '', 'Error from server (NotFound): deploymentconfigs "router" not found'),
+ (1, '', 'Error from server (NotFound): service "router" not found'),
+ (1, '', 'Error from server (NotFound): serviceaccount "router" not found'),
+ (1, '', 'Error from server (NotFound): secret "router-certs" not found'),
+ (1, '', 'Error from server (NotFound): clsuterrolebinding "router-router-role" not found'),
+ (0, RouterTest.dry_run, ''),
+ (0, '', ''),
+ (0, '', ''),
+ (0, '', ''),
+ (0, '', ''),
+ (0, '', ''),
+ ]
+
+ mock_tmpfile_copy.side_effect = [
+ '/tmp/mocked_kubeconfig',
+ ]
+
+ results = Router.run_ansible(params, False)
+
+ self.assertTrue(results['changed'])
+ for result in results['results']['results']:
+ self.assertEqual(result['returncode'], 0)
+
+ mock_cmd.assert_has_calls([
+ mock.call(['oc', 'get', 'dc', 'router', '-o', 'json', '-n', 'default'], None),
+ mock.call(['oc', 'get', 'svc', 'router', '-o', 'json', '-n', 'default'], None),
+ mock.call(['oc', 'get', 'sa', 'router', '-o', 'json', '-n', 'default'], None),
+ mock.call(['oc', 'get', 'secret', 'router-certs', '-o', 'json', '-n', 'default'], None),
+ mock.call(['oc', 'get', 'clusterrolebinding', 'router-router-role', '-o', 'json', '-n', 'default'], None),
+ mock.call(['oc', 'adm', 'router', 'router', '--expose-metrics=False', '--external-host-insecure=False',
+ '--ports=80:80,443:443', '--replicas=2', '--selector=type=infra', '--service-account=router',
+ '--stats-port=1936', '--dry-run=True', '-o', 'json', '-n', 'default'], None),
+ mock.call(['oc', 'create', '-f', mock.ANY, '-n', 'default'], None),
+ mock.call(['oc', 'create', '-f', mock.ANY, '-n', 'default'], None),
+ mock.call(['oc', 'create', '-f', mock.ANY, '-n', 'default'], None),
+ mock.call(['oc', 'create', '-f', mock.ANY, '-n', 'default'], None)])
+
+ @unittest.skipIf(six.PY3, 'py2 test only')
+ @mock.patch('os.path.exists')
+ @mock.patch('os.environ.get')
+ def test_binary_lookup_fallback(self, mock_env_get, mock_path_exists):
+ ''' Testing binary lookup fallback '''
+
+ mock_env_get.side_effect = lambda _v, _d: ''
+
+ mock_path_exists.side_effect = lambda _: False
+
+ self.assertEqual(locate_oc_binary(), 'oc')
+
+ @unittest.skipIf(six.PY3, 'py2 test only')
+ @mock.patch('os.path.exists')
+ @mock.patch('os.environ.get')
+ def test_binary_lookup_in_path(self, mock_env_get, mock_path_exists):
+ ''' Testing binary lookup in path '''
+
+ oc_bin = '/usr/bin/oc'
+
+ mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
+
+ mock_path_exists.side_effect = lambda f: f == oc_bin
+
+ self.assertEqual(locate_oc_binary(), oc_bin)
+
+ @unittest.skipIf(six.PY3, 'py2 test only')
+ @mock.patch('os.path.exists')
+ @mock.patch('os.environ.get')
+ def test_binary_lookup_in_usr_local(self, mock_env_get, mock_path_exists):
+ ''' Testing binary lookup in /usr/local/bin '''
+
+ oc_bin = '/usr/local/bin/oc'
+
+ mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
+
+ mock_path_exists.side_effect = lambda f: f == oc_bin
+
+ self.assertEqual(locate_oc_binary(), oc_bin)
+
+ @unittest.skipIf(six.PY3, 'py2 test only')
+ @mock.patch('os.path.exists')
+ @mock.patch('os.environ.get')
+ def test_binary_lookup_in_home(self, mock_env_get, mock_path_exists):
+ ''' Testing binary lookup in ~/bin '''
+
+ oc_bin = os.path.expanduser('~/bin/oc')
+
+ mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
+
+ mock_path_exists.side_effect = lambda f: f == oc_bin
+
+ self.assertEqual(locate_oc_binary(), oc_bin)
+
+ @unittest.skipIf(six.PY2, 'py3 test only')
+ @mock.patch('shutil.which')
+ @mock.patch('os.environ.get')
+ def test_binary_lookup_fallback_py3(self, mock_env_get, mock_shutil_which):
+ ''' Testing binary lookup fallback '''
+
+ mock_env_get.side_effect = lambda _v, _d: ''
+
+ mock_shutil_which.side_effect = lambda _f, path=None: None
+
+ self.assertEqual(locate_oc_binary(), 'oc')
+
+ @unittest.skipIf(six.PY2, 'py3 test only')
+ @mock.patch('shutil.which')
+ @mock.patch('os.environ.get')
+ def test_binary_lookup_in_path_py3(self, mock_env_get, mock_shutil_which):
+ ''' Testing binary lookup in path '''
+
+ oc_bin = '/usr/bin/oc'
+
+ mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
+
+ mock_shutil_which.side_effect = lambda _f, path=None: oc_bin
+
+ self.assertEqual(locate_oc_binary(), oc_bin)
+
+ @unittest.skipIf(six.PY2, 'py3 test only')
+ @mock.patch('shutil.which')
+ @mock.patch('os.environ.get')
+ def test_binary_lookup_in_usr_local_py3(self, mock_env_get, mock_shutil_which):
+ ''' Testing binary lookup in /usr/local/bin '''
+
+ oc_bin = '/usr/local/bin/oc'
+
+ mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
+
+ mock_shutil_which.side_effect = lambda _f, path=None: oc_bin
+
+ self.assertEqual(locate_oc_binary(), oc_bin)
+
+ @unittest.skipIf(six.PY2, 'py3 test only')
+ @mock.patch('shutil.which')
+ @mock.patch('os.environ.get')
+ def test_binary_lookup_in_home_py3(self, mock_env_get, mock_shutil_which):
+ ''' Testing binary lookup in ~/bin '''
+
+ oc_bin = os.path.expanduser('~/bin/oc')
+
+ mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
+
+ mock_shutil_which.side_effect = lambda _f, path=None: oc_bin
+
+ self.assertEqual(locate_oc_binary(), oc_bin)
diff --git a/roles/lib_openshift/src/test/unit/test_oc_env.py b/roles/lib_openshift/src/test/unit/test_oc_env.py
index dab5099c2..2f416c05e 100755
--- a/roles/lib_openshift/src/test/unit/test_oc_env.py
+++ b/roles/lib_openshift/src/test/unit/test_oc_env.py
@@ -1,14 +1,6 @@
-#!/usr/bin/env python2
'''
Unit tests for oc_env
'''
-# To run:
-# ./oc_env.py
-#
-# .
-# Ran 1 test in 0.002s
-#
-# OK
import os
import six
@@ -32,10 +24,6 @@ class OCEnvTest(unittest.TestCase):
Test class for OCEnv
'''
- def setUp(self):
- ''' setup method will create a file and set to known configuration '''
- pass
-
@mock.patch('oc_env.locate_oc_binary')
@mock.patch('oc_env.Utils.create_tmpfile_copy')
@mock.patch('oc_env.OCEnv._run')
@@ -147,7 +135,7 @@ class OCEnvTest(unittest.TestCase):
# Making sure our mocks were called as we expected
mock_cmd.assert_has_calls([
- mock.call(['oc', '-n', 'default', 'get', 'dc', 'router', '-o', 'json'], None),
+ mock.call(['oc', 'get', 'dc', 'router', '-o', 'json', '-n', 'default'], None),
])
@mock.patch('oc_env.locate_oc_binary')
@@ -333,7 +321,7 @@ class OCEnvTest(unittest.TestCase):
# Making sure our mocks were called as we expected
mock_cmd.assert_has_calls([
- mock.call(['oc', '-n', 'default', 'get', 'dc', 'router', '-o', 'json'], None),
+ mock.call(['oc', 'get', 'dc', 'router', '-o', 'json', '-n', 'default'], None),
])
@mock.patch('oc_env.locate_oc_binary')
@@ -448,7 +436,7 @@ class OCEnvTest(unittest.TestCase):
# Making sure our mocks were called as we expected
mock_cmd.assert_has_calls([
- mock.call(['oc', '-n', 'default', 'get', 'dc', 'router', '-o', 'json'], None),
+ mock.call(['oc', 'get', 'dc', 'router', '-o', 'json', '-n', 'default'], None),
])
@unittest.skipIf(six.PY3, 'py2 test only')
@@ -558,11 +546,3 @@ class OCEnvTest(unittest.TestCase):
mock_shutil_which.side_effect = lambda _f, path=None: oc_bin
self.assertEqual(locate_oc_binary(), oc_bin)
-
- def tearDown(self):
- '''TearDown method'''
- pass
-
-
-if __name__ == "__main__":
- unittest.main()
diff --git a/roles/lib_openshift/src/test/unit/test_oc_label.py b/roles/lib_openshift/src/test/unit/test_oc_label.py
index 933b5f221..5453266c1 100755
--- a/roles/lib_openshift/src/test/unit/test_oc_label.py
+++ b/roles/lib_openshift/src/test/unit/test_oc_label.py
@@ -1,14 +1,6 @@
-#!/usr/bin/env python2
'''
Unit tests for oc label
'''
-# To run
-# python -m unittest version
-#
-# .
-# Ran 1 test in 0.597s
-#
-# OK
import os
import six
@@ -32,10 +24,6 @@ class OCLabelTest(unittest.TestCase):
Test class for OCLabel
'''
- def setUp(self):
- ''' setup method will create a file and set to known configuration '''
- pass
-
@mock.patch('oc_label.Utils.create_tmpfile_copy')
@mock.patch('oc_label.OCLabel._run')
def test_state_list(self, mock_cmd, mock_tmpfile_copy):
@@ -295,11 +283,3 @@ class OCLabelTest(unittest.TestCase):
mock_shutil_which.side_effect = lambda _f, path=None: oc_bin
self.assertEqual(locate_oc_binary(), oc_bin)
-
- def tearDown(self):
- '''TearDown method'''
- pass
-
-
-if __name__ == "__main__":
- unittest.main()
diff --git a/roles/lib_openshift/src/test/unit/test_oc_objectvalidator.py b/roles/lib_openshift/src/test/unit/test_oc_objectvalidator.py
new file mode 100755
index 000000000..da326742f
--- /dev/null
+++ b/roles/lib_openshift/src/test/unit/test_oc_objectvalidator.py
@@ -0,0 +1,903 @@
+'''
+ Unit tests for oc_objectvalidator
+'''
+
+import os
+import sys
+import unittest
+import mock
+
+# Removing invalid variable names for tests so that I can
+# keep them brief
+# pylint: disable=invalid-name,no-name-in-module
+# Disable import-error b/c our libraries aren't loaded in jenkins
+# pylint: disable=import-error
+# place class in our python path
+module_path = os.path.join('/'.join(os.path.realpath(__file__).split('/')[:-4]), 'library') # noqa: E501
+sys.path.insert(0, module_path)
+from oc_objectvalidator import OCObjectValidator # noqa: E402
+
+
+class OCObjectValidatorTest(unittest.TestCase):
+ '''
+ Test class for OCObjectValidator
+ '''
+
+ maxDiff = None
+
+ @mock.patch('oc_objectvalidator.Utils.create_tmpfile_copy')
+ @mock.patch('oc_objectvalidator.OCObjectValidator._run')
+ def test_no_data(self, mock_cmd, mock_tmpfile_copy):
+ ''' Testing when both all objects are empty '''
+
+ # Arrange
+
+ # run_ansible input parameters
+ params = {
+ 'kubeconfig': '/etc/origin/master/admin.kubeconfig',
+ }
+
+ empty = '''{
+ "apiVersion": "v1",
+ "items": [],
+ "kind": "List",
+ "metadata": {},
+ "resourceVersion": "",
+ "selfLink": ""
+}'''
+
+ # Return values of our mocked function call. These get returned once per call.
+ mock_cmd.side_effect = [
+ # First call to mock
+ (0, empty, ''),
+
+ # Second call to mock
+ (0, empty, ''),
+
+ # Third call to mock
+ (0, empty, ''),
+ ]
+
+ mock_tmpfile_copy.side_effect = [
+ '/tmp/mocked_kubeconfig',
+ ]
+
+ # Act
+ results = OCObjectValidator.run_ansible(params)
+
+ # Assert
+ self.assertNotIn('failed', results)
+ self.assertEqual(results['msg'], 'All objects are valid.')
+
+ # Making sure our mock was called as we expected
+ mock_cmd.assert_has_calls([
+ mock.call(['oc', 'get', 'hostsubnet', '-o', 'json', '-n', 'default'], None),
+ mock.call(['oc', 'get', 'netnamespace', '-o', 'json', '-n', 'default'], None),
+ mock.call(['oc', 'get', 'namespace', '-o', 'json', '-n', 'default'], None),
+ ])
+
+ @mock.patch('oc_objectvalidator.Utils.create_tmpfile_copy')
+ @mock.patch('oc_objectvalidator.OCObjectValidator._run')
+ def test_error_code(self, mock_cmd, mock_tmpfile_copy):
+ ''' Testing when we fail to get objects '''
+
+ # Arrange
+
+ # run_ansible input parameters
+ params = {
+ 'kubeconfig': '/etc/origin/master/admin.kubeconfig',
+ }
+
+ # Return values of our mocked function call. These get returned once per call.
+ mock_cmd.side_effect = [
+ # First call to mock
+ (1, '', 'Error.'),
+ ]
+
+ mock_tmpfile_copy.side_effect = [
+ '/tmp/mocked_kubeconfig',
+ ]
+
+ error_results = {
+ 'returncode': 1,
+ 'stderr': 'Error.',
+ 'stdout': '',
+ 'cmd': 'oc get hostsubnet -o json -n default',
+ 'results': [{}]
+ }
+
+ # Act
+ results = OCObjectValidator.run_ansible(params)
+
+ # Assert
+ self.assertTrue(results['failed'])
+ self.assertEqual(results['msg'], 'Failed to GET hostsubnet.')
+ self.assertEqual(results['state'], 'list')
+ self.assertEqual(results['results'], error_results)
+
+ # Making sure our mock was called as we expected
+ mock_cmd.assert_has_calls([
+ mock.call(['oc', 'get', 'hostsubnet', '-o', 'json', '-n', 'default'], None),
+ ])
+
+ @mock.patch('oc_objectvalidator.Utils.create_tmpfile_copy')
+ @mock.patch('oc_objectvalidator.OCObjectValidator._run')
+ def test_valid_both(self, mock_cmd, mock_tmpfile_copy):
+ ''' Testing when both all objects are valid '''
+
+ # Arrange
+
+ # run_ansible input parameters
+ params = {
+ 'kubeconfig': '/etc/origin/master/admin.kubeconfig',
+ }
+
+ valid_hostsubnet = '''{
+ "apiVersion": "v1",
+ "items": [
+ {
+ "apiVersion": "v1",
+ "host": "bar0",
+ "hostIP": "1.1.1.1",
+ "kind": "HostSubnet",
+ "metadata": {
+ "creationTimestamp": "2017-02-16T18:47:09Z",
+ "name": "bar0",
+ "namespace": "",
+ "resourceVersion": "986",
+ "selfLink": "/oapi/v1/hostsubnetsbar0",
+ "uid": "528dbb41-f478-11e6-aae0-507b9dac97ff"
+ },
+ "subnet": "1.1.0.0/24"
+ },
+ {
+ "apiVersion": "v1",
+ "host": "bar1",
+ "hostIP": "1.1.1.1",
+ "kind": "HostSubnet",
+ "metadata": {
+ "creationTimestamp": "2017-02-16T18:47:18Z",
+ "name": "bar1",
+ "namespace": "",
+ "resourceVersion": "988",
+ "selfLink": "/oapi/v1/hostsubnetsbar1",
+ "uid": "57710d84-f478-11e6-aae0-507b9dac97ff"
+ },
+ "subnet": "1.1.0.0/24"
+ },
+ {
+ "apiVersion": "v1",
+ "host": "bar2",
+ "hostIP": "1.1.1.1",
+ "kind": "HostSubnet",
+ "metadata": {
+ "creationTimestamp": "2017-02-16T18:47:26Z",
+ "name": "bar2",
+ "namespace": "",
+ "resourceVersion": "991",
+ "selfLink": "/oapi/v1/hostsubnetsbar2",
+ "uid": "5c59a28c-f478-11e6-aae0-507b9dac97ff"
+ },
+ "subnet": "1.1.0.0/24"
+ }
+ ],
+ "kind": "List",
+ "metadata": {},
+ "resourceVersion": "",
+ "selfLink": ""
+ }'''
+
+ valid_netnamespace = '''{
+ "apiVersion": "v1",
+ "items": [
+ {
+ "apiVersion": "v1",
+ "kind": "NetNamespace",
+ "metadata": {
+ "creationTimestamp": "2017-02-16T18:45:16Z",
+ "name": "foo0",
+ "namespace": "",
+ "resourceVersion": "959",
+ "selfLink": "/oapi/v1/netnamespacesfoo0",
+ "uid": "0f1c85b2-f478-11e6-aae0-507b9dac97ff"
+ },
+ "netid": 100,
+ "netname": "foo0"
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "NetNamespace",
+ "metadata": {
+ "creationTimestamp": "2017-02-16T18:45:26Z",
+ "name": "foo1",
+ "namespace": "",
+ "resourceVersion": "962",
+ "selfLink": "/oapi/v1/netnamespacesfoo1",
+ "uid": "14effa0d-f478-11e6-aae0-507b9dac97ff"
+ },
+ "netid": 100,
+ "netname": "foo1"
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "NetNamespace",
+ "metadata": {
+ "creationTimestamp": "2017-02-16T18:45:36Z",
+ "name": "foo2",
+ "namespace": "",
+ "resourceVersion": "965",
+ "selfLink": "/oapi/v1/netnamespacesfoo2",
+ "uid": "1aabdf84-f478-11e6-aae0-507b9dac97ff"
+ },
+ "netid": 100,
+ "netname": "foo2"
+ }
+ ],
+ "kind": "List",
+ "metadata": {},
+ "resourceVersion": "",
+ "selfLink": ""
+ }'''
+
+ valid_namespace = '''{
+ "apiVersion": "v1",
+ "items": [
+ {
+ "apiVersion": "v1",
+ "kind": "Namespace",
+ "metadata": {
+ "annotations": {
+ "openshift.io/sa.scc.mcs": "s0:c1,c0",
+ "openshift.io/sa.scc.supplemental-groups": "1000000000/10000",
+ "openshift.io/sa.scc.uid-range": "1000000000/10000"
+ },
+ "creationTimestamp": "2017-03-02T00:49:49Z",
+ "name": "default",
+ "namespace": "",
+ "resourceVersion": "165",
+ "selfLink": "/api/v1/namespacesdefault",
+ "uid": "23c0c6aa-fee2-11e6-b45a-507b9dac97ff"
+ },
+ "spec": {
+ "finalizers": [
+ "kubernetes",
+ "openshift.io/origin"
+ ]
+ },
+ "status": {
+ "phase": "Active"
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "Namespace",
+ "metadata": {
+ "annotations": {
+ "openshift.io/sa.scc.mcs": "s0:c3,c2",
+ "openshift.io/sa.scc.supplemental-groups": "1000010000/10000",
+ "openshift.io/sa.scc.uid-range": "1000010000/10000"
+ },
+ "creationTimestamp": "2017-03-02T00:49:49Z",
+ "name": "kube-system",
+ "namespace": "",
+ "resourceVersion": "533",
+ "selfLink": "/api/v1/namespaceskube-system",
+ "uid": "23c21758-fee2-11e6-b45a-507b9dac97ff"
+ },
+ "spec": {
+ "finalizers": [
+ "kubernetes",
+ "openshift.io/origin"
+ ]
+ },
+ "status": {
+ "phase": "Active"
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "Namespace",
+ "metadata": {
+ "annotations": {
+ "openshift.io/description": "",
+ "openshift.io/display-name": "",
+ "openshift.io/requester": "developer",
+ "openshift.io/sa.scc.mcs": "s0:c9,c4",
+ "openshift.io/sa.scc.supplemental-groups": "1000080000/10000",
+ "openshift.io/sa.scc.uid-range": "1000080000/10000"
+ },
+ "creationTimestamp": "2017-03-02T02:17:16Z",
+ "name": "myproject",
+ "namespace": "",
+ "resourceVersion": "2898",
+ "selfLink": "/api/v1/namespacesmyproject",
+ "uid": "5ae3764d-feee-11e6-b45a-507b9dac97ff"
+ },
+ "spec": {
+ "finalizers": [
+ "openshift.io/origin",
+ "kubernetes"
+ ]
+ },
+ "status": {
+ "phase": "Active"
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "Namespace",
+ "metadata": {
+ "annotations": {
+ "openshift.io/sa.scc.mcs": "s0:c6,c0",
+ "openshift.io/sa.scc.supplemental-groups": "1000030000/10000",
+ "openshift.io/sa.scc.uid-range": "1000030000/10000"
+ },
+ "creationTimestamp": "2017-03-02T00:49:51Z",
+ "name": "openshift",
+ "namespace": "",
+ "resourceVersion": "171",
+ "selfLink": "/api/v1/namespacesopenshift",
+ "uid": "24f7b34d-fee2-11e6-b45a-507b9dac97ff"
+ },
+ "spec": {
+ "finalizers": [
+ "kubernetes",
+ "openshift.io/origin"
+ ]
+ },
+ "status": {
+ "phase": "Active"
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "Namespace",
+ "metadata": {
+ "annotations": {
+ "openshift.io/sa.scc.mcs": "s0:c5,c0",
+ "openshift.io/sa.scc.supplemental-groups": "1000020000/10000",
+ "openshift.io/sa.scc.uid-range": "1000020000/10000"
+ },
+ "creationTimestamp": "2017-03-02T00:49:51Z",
+ "name": "openshift-infra",
+ "namespace": "",
+ "resourceVersion": "169",
+ "selfLink": "/api/v1/namespacesopenshift-infra",
+ "uid": "24a2ed75-fee2-11e6-b45a-507b9dac97ff"
+ },
+ "spec": {
+ "finalizers": [
+ "kubernetes",
+ "openshift.io/origin"
+ ]
+ },
+ "status": {
+ "phase": "Active"
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "Namespace",
+ "metadata": {
+ "annotations": {
+ "openshift.io/description": "",
+ "openshift.io/display-name": "",
+ "openshift.io/requester": "developer1",
+ "openshift.io/sa.scc.mcs": "s0:c10,c0",
+ "openshift.io/sa.scc.supplemental-groups": "1000090000/10000",
+ "openshift.io/sa.scc.uid-range": "1000090000/10000"
+ },
+ "creationTimestamp": "2017-03-02T02:17:56Z",
+ "name": "yourproject",
+ "namespace": "",
+ "resourceVersion": "2955",
+ "selfLink": "/api/v1/namespacesyourproject",
+ "uid": "72df7fb9-feee-11e6-b45a-507b9dac97ff"
+ },
+ "spec": {
+ "finalizers": [
+ "openshift.io/origin",
+ "kubernetes"
+ ]
+ },
+ "status": {
+ "phase": "Active"
+ }
+ }
+ ],
+ "kind": "List",
+ "metadata": {},
+ "resourceVersion": "",
+ "selfLink": ""
+}'''
+
+ # Return values of our mocked function call. These get returned once per call.
+ mock_cmd.side_effect = [
+ # First call to mock
+ (0, valid_hostsubnet, ''),
+
+ # Second call to mock
+ (0, valid_netnamespace, ''),
+
+ # Third call to mock
+ (0, valid_namespace, ''),
+ ]
+
+ mock_tmpfile_copy.side_effect = [
+ '/tmp/mocked_kubeconfig',
+ ]
+
+ # Act
+ results = OCObjectValidator.run_ansible(params)
+
+ # Assert
+ self.assertNotIn('failed', results)
+ self.assertEqual(results['msg'], 'All objects are valid.')
+
+ # Making sure our mock was called as we expected
+ mock_cmd.assert_has_calls([
+ mock.call(['oc', 'get', 'hostsubnet', '-o', 'json', '-n', 'default'], None),
+ mock.call(['oc', 'get', 'netnamespace', '-o', 'json', '-n', 'default'], None),
+ mock.call(['oc', 'get', 'namespace', '-o', 'json', '-n', 'default'], None),
+ ])
+
+ @mock.patch('oc_objectvalidator.Utils.create_tmpfile_copy')
+ @mock.patch('oc_objectvalidator.OCObjectValidator._run')
+ def test_invalid_both(self, mock_cmd, mock_tmpfile_copy):
+ ''' Testing when all objects are invalid '''
+
+ # Arrange
+
+ # run_ansible input parameters
+ params = {
+ 'kubeconfig': '/etc/origin/master/admin.kubeconfig',
+ }
+
+ invalid_hostsubnet = '''{
+ "apiVersion": "v1",
+ "items": [
+ {
+ "apiVersion": "v1",
+ "host": "bar0",
+ "hostIP": "1.1.1.1",
+ "kind": "HostSubnet",
+ "metadata": {
+ "creationTimestamp": "2017-02-16T18:47:09Z",
+ "name": "bar0",
+ "namespace": "",
+ "resourceVersion": "986",
+ "selfLink": "/oapi/v1/hostsubnetsbar0",
+ "uid": "528dbb41-f478-11e6-aae0-507b9dac97ff"
+ },
+ "subnet": "1.1.0.0/24"
+ },
+ {
+ "apiVersion": "v1",
+ "host": "bar1",
+ "hostIP": "1.1.1.1",
+ "kind": "HostSubnet",
+ "metadata": {
+ "creationTimestamp": "2017-02-16T18:47:18Z",
+ "name": "bar1",
+ "namespace": "",
+ "resourceVersion": "988",
+ "selfLink": "/oapi/v1/hostsubnetsbar1",
+ "uid": "57710d84-f478-11e6-aae0-507b9dac97ff"
+ },
+ "subnet": "1.1.0.0/24"
+ },
+ {
+ "apiVersion": "v1",
+ "host": "bar2",
+ "hostIP": "1.1.1.1",
+ "kind": "HostSubnet",
+ "metadata": {
+ "creationTimestamp": "2017-02-16T18:47:26Z",
+ "name": "bar2",
+ "namespace": "",
+ "resourceVersion": "991",
+ "selfLink": "/oapi/v1/hostsubnetsbar2",
+ "uid": "5c59a28c-f478-11e6-aae0-507b9dac97ff"
+ },
+ "subnet": "1.1.0.0/24"
+ },
+ {
+ "apiVersion": "v1",
+ "host": "baz1",
+ "hostIP": "1.1.1.1",
+ "kind": "HostSubnet",
+ "metadata": {
+ "creationTimestamp": "2017-02-16T18:47:49Z",
+ "name": "baz0",
+ "namespace": "",
+ "resourceVersion": "996",
+ "selfLink": "/oapi/v1/hostsubnetsbaz0",
+ "uid": "69f75f87-f478-11e6-aae0-507b9dac97ff"
+ },
+ "subnet": "1.1.0.0/24"
+ }
+ ],
+ "kind": "List",
+ "metadata": {},
+ "resourceVersion": "",
+ "selfLink": ""
+}'''
+
+ invalid_netnamespace = '''{
+ "apiVersion": "v1",
+ "items": [
+ {
+ "apiVersion": "v1",
+ "kind": "NetNamespace",
+ "metadata": {
+ "creationTimestamp": "2017-02-16T18:45:52Z",
+ "name": "bar0",
+ "namespace": "",
+ "resourceVersion": "969",
+ "selfLink": "/oapi/v1/netnamespacesbar0",
+ "uid": "245d416e-f478-11e6-aae0-507b9dac97ff"
+ },
+ "netid": 100,
+ "netname": "bar1"
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "NetNamespace",
+ "metadata": {
+ "creationTimestamp": "2017-02-16T18:45:16Z",
+ "name": "foo0",
+ "namespace": "",
+ "resourceVersion": "959",
+ "selfLink": "/oapi/v1/netnamespacesfoo0",
+ "uid": "0f1c85b2-f478-11e6-aae0-507b9dac97ff"
+ },
+ "netid": 100,
+ "netname": "foo0"
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "NetNamespace",
+ "metadata": {
+ "creationTimestamp": "2017-02-16T18:45:26Z",
+ "name": "foo1",
+ "namespace": "",
+ "resourceVersion": "962",
+ "selfLink": "/oapi/v1/netnamespacesfoo1",
+ "uid": "14effa0d-f478-11e6-aae0-507b9dac97ff"
+ },
+ "netid": 100,
+ "netname": "foo1"
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "NetNamespace",
+ "metadata": {
+ "creationTimestamp": "2017-02-16T18:45:36Z",
+ "name": "foo2",
+ "namespace": "",
+ "resourceVersion": "965",
+ "selfLink": "/oapi/v1/netnamespacesfoo2",
+ "uid": "1aabdf84-f478-11e6-aae0-507b9dac97ff"
+ },
+ "netid": 100,
+ "netname": "foo2"
+ }
+ ],
+ "kind": "List",
+ "metadata": {},
+ "resourceVersion": "",
+ "selfLink": ""
+}'''
+
+ invalid_namespace = '''{
+ "apiVersion": "v1",
+ "items": [
+ {
+ "apiVersion": "v1",
+ "kind": "Namespace",
+ "metadata": {
+ "annotations": {
+ "openshift.io/sa.scc.mcs": "s0:c1,c0",
+ "openshift.io/sa.scc.supplemental-groups": "1000000000/10000",
+ "openshift.io/sa.scc.uid-range": "1000000000/10000"
+ },
+ "creationTimestamp": "2017-03-02T00:49:49Z",
+ "name": "default",
+ "namespace": "",
+ "resourceVersion": "165",
+ "selfLink": "/api/v1/namespacesdefault",
+ "uid": "23c0c6aa-fee2-11e6-b45a-507b9dac97ff"
+ },
+ "spec": {
+ "finalizers": [
+ "kubernetes",
+ "openshift.io/origin"
+ ]
+ },
+ "status": {
+ "phase": "Active"
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "Namespace",
+ "metadata": {
+ "annotations": {
+ "openshift.io/requester": "",
+ "openshift.io/sa.scc.mcs": "s0:c3,c2",
+ "openshift.io/sa.scc.supplemental-groups": "1000010000/10000",
+ "openshift.io/sa.scc.uid-range": "1000010000/10000"
+ },
+ "creationTimestamp": "2017-03-02T00:49:49Z",
+ "name": "kube-system",
+ "namespace": "",
+ "resourceVersion": "3052",
+ "selfLink": "/api/v1/namespaceskube-system",
+ "uid": "23c21758-fee2-11e6-b45a-507b9dac97ff"
+ },
+ "spec": {
+ "finalizers": [
+ "kubernetes",
+ "openshift.io/origin"
+ ]
+ },
+ "status": {
+ "phase": "Active"
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "Namespace",
+ "metadata": {
+ "annotations": {
+ "openshift.io/description": "",
+ "openshift.io/display-name": "",
+ "openshift.io/requester": "developer",
+ "openshift.io/sa.scc.mcs": "s0:c9,c4",
+ "openshift.io/sa.scc.supplemental-groups": "1000080000/10000",
+ "openshift.io/sa.scc.uid-range": "1000080000/10000"
+ },
+ "creationTimestamp": "2017-03-02T02:17:16Z",
+ "name": "myproject",
+ "namespace": "",
+ "resourceVersion": "2898",
+ "selfLink": "/api/v1/namespacesmyproject",
+ "uid": "5ae3764d-feee-11e6-b45a-507b9dac97ff"
+ },
+ "spec": {
+ "finalizers": [
+ "openshift.io/origin",
+ "kubernetes"
+ ]
+ },
+ "status": {
+ "phase": "Active"
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "Namespace",
+ "metadata": {
+ "annotations": {
+ "openshift.io/requester": "",
+ "openshift.io/sa.scc.mcs": "s0:c6,c0",
+ "openshift.io/sa.scc.supplemental-groups": "1000030000/10000",
+ "openshift.io/sa.scc.uid-range": "1000030000/10000"
+ },
+ "creationTimestamp": "2017-03-02T00:49:51Z",
+ "name": "openshift",
+ "namespace": "",
+ "resourceVersion": "3057",
+ "selfLink": "/api/v1/namespacesopenshift",
+ "uid": "24f7b34d-fee2-11e6-b45a-507b9dac97ff"
+ },
+ "spec": {
+ "finalizers": [
+ "kubernetes",
+ "openshift.io/origin"
+ ]
+ },
+ "status": {
+ "phase": "Active"
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "Namespace",
+ "metadata": {
+ "annotations": {
+ "openshift.io/description": "",
+ "openshift.io/display-name": "",
+ "openshift.io/requester": "system:admin",
+ "openshift.io/sa.scc.mcs": "s0:c10,c5",
+ "openshift.io/sa.scc.supplemental-groups": "1000100000/10000",
+ "openshift.io/sa.scc.uid-range": "1000100000/10000"
+ },
+ "creationTimestamp": "2017-03-02T02:21:15Z",
+ "name": "openshift-fancy",
+ "namespace": "",
+ "resourceVersion": "3072",
+ "selfLink": "/api/v1/namespacesopenshift-fancy",
+ "uid": "e958063c-feee-11e6-b45a-507b9dac97ff"
+ },
+ "spec": {
+ "finalizers": [
+ "openshift.io/origin",
+ "kubernetes"
+ ]
+ },
+ "status": {
+ "phase": "Active"
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "Namespace",
+ "metadata": {
+ "annotations": {
+ "openshift.io/sa.scc.mcs": "s0:c5,c0",
+ "openshift.io/sa.scc.supplemental-groups": "1000020000/10000",
+ "openshift.io/sa.scc.uid-range": "1000020000/10000"
+ },
+ "creationTimestamp": "2017-03-02T00:49:51Z",
+ "name": "openshift-infra",
+ "namespace": "",
+ "resourceVersion": "169",
+ "selfLink": "/api/v1/namespacesopenshift-infra",
+ "uid": "24a2ed75-fee2-11e6-b45a-507b9dac97ff"
+ },
+ "spec": {
+ "finalizers": [
+ "kubernetes",
+ "openshift.io/origin"
+ ]
+ },
+ "status": {
+ "phase": "Active"
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "Namespace",
+ "metadata": {
+ "annotations": {
+ "openshift.io/description": "",
+ "openshift.io/display-name": "",
+ "openshift.io/requester": "developer1",
+ "openshift.io/sa.scc.mcs": "s0:c10,c0",
+ "openshift.io/sa.scc.supplemental-groups": "1000090000/10000",
+ "openshift.io/sa.scc.uid-range": "1000090000/10000"
+ },
+ "creationTimestamp": "2017-03-02T02:17:56Z",
+ "name": "yourproject",
+ "namespace": "",
+ "resourceVersion": "2955",
+ "selfLink": "/api/v1/namespacesyourproject",
+ "uid": "72df7fb9-feee-11e6-b45a-507b9dac97ff"
+ },
+ "spec": {
+ "finalizers": [
+ "openshift.io/origin",
+ "kubernetes"
+ ]
+ },
+ "status": {
+ "phase": "Active"
+ }
+ }
+ ],
+ "kind": "List",
+ "metadata": {},
+ "resourceVersion": "",
+ "selfLink": ""
+}'''
+
+ invalid_results = {
+ 'hostsubnets where metadata.name != host': [{
+ 'apiVersion': 'v1',
+ 'host': 'baz1',
+ 'hostIP': '1.1.1.1',
+ 'kind': 'HostSubnet',
+ 'metadata': {
+ 'creationTimestamp': '2017-02-16T18:47:49Z',
+ 'name': 'baz0',
+ 'namespace': '',
+ 'resourceVersion': '996',
+ 'selfLink': '/oapi/v1/hostsubnetsbaz0',
+ 'uid': '69f75f87-f478-11e6-aae0-507b9dac97ff'
+ },
+ 'subnet': '1.1.0.0/24'
+ }],
+ 'netnamespaces where metadata.name != netname': [{
+ 'apiVersion': 'v1',
+ 'kind': 'NetNamespace',
+ 'metadata': {
+ 'creationTimestamp': '2017-02-16T18:45:52Z',
+ 'name': 'bar0',
+ 'namespace': '',
+ 'resourceVersion': '969',
+ 'selfLink': '/oapi/v1/netnamespacesbar0',
+ 'uid': '245d416e-f478-11e6-aae0-507b9dac97ff'
+ },
+ 'netid': 100,
+ 'netname': 'bar1'
+ }],
+ 'namespaces that use reserved names and were not created by infrastructure components': [{
+ 'apiVersion': 'v1',
+ 'kind': 'Namespace',
+ 'metadata': {'annotations': {'openshift.io/requester': '',
+ 'openshift.io/sa.scc.mcs': 's0:c3,c2',
+ 'openshift.io/sa.scc.supplemental-groups': '1000010000/10000',
+ 'openshift.io/sa.scc.uid-range': '1000010000/10000'},
+ 'creationTimestamp': '2017-03-02T00:49:49Z',
+ 'name': 'kube-system',
+ 'namespace': '',
+ 'resourceVersion': '3052',
+ 'selfLink': '/api/v1/namespaceskube-system',
+ 'uid': '23c21758-fee2-11e6-b45a-507b9dac97ff'},
+ 'spec': {'finalizers': ['kubernetes', 'openshift.io/origin']},
+ 'status': {'phase': 'Active'}},
+ {'apiVersion': 'v1',
+ 'kind': 'Namespace',
+ 'metadata': {'annotations': {'openshift.io/requester': '',
+ 'openshift.io/sa.scc.mcs': 's0:c6,c0',
+ 'openshift.io/sa.scc.supplemental-groups': '1000030000/10000',
+ 'openshift.io/sa.scc.uid-range': '1000030000/10000'},
+ 'creationTimestamp': '2017-03-02T00:49:51Z',
+ 'name': 'openshift',
+ 'namespace': '',
+ 'resourceVersion': '3057',
+ 'selfLink': '/api/v1/namespacesopenshift',
+ 'uid': '24f7b34d-fee2-11e6-b45a-507b9dac97ff'},
+ 'spec': {'finalizers': ['kubernetes', 'openshift.io/origin']},
+ 'status': {'phase': 'Active'}},
+ {'apiVersion': 'v1',
+ 'kind': 'Namespace',
+ 'metadata': {'annotations': {'openshift.io/description': '',
+ 'openshift.io/display-name': '',
+ 'openshift.io/requester': 'system:admin',
+ 'openshift.io/sa.scc.mcs': 's0:c10,c5',
+ 'openshift.io/sa.scc.supplemental-groups': '1000100000/10000',
+ 'openshift.io/sa.scc.uid-range': '1000100000/10000'},
+ 'creationTimestamp': '2017-03-02T02:21:15Z',
+ 'name': 'openshift-fancy',
+ 'namespace': '',
+ 'resourceVersion': '3072',
+ 'selfLink': '/api/v1/namespacesopenshift-fancy',
+ 'uid': 'e958063c-feee-11e6-b45a-507b9dac97ff'},
+ 'spec': {'finalizers': ['openshift.io/origin', 'kubernetes']},
+ 'status': {'phase': 'Active'}
+ }],
+ }
+
+ # Return values of our mocked function call. These get returned once per call.
+ mock_cmd.side_effect = [
+ # First call to mock
+ (0, invalid_hostsubnet, ''),
+
+ # Second call to mock
+ (0, invalid_netnamespace, ''),
+
+ # Third call to mock
+ (0, invalid_namespace, ''),
+ ]
+
+ mock_tmpfile_copy.side_effect = [
+ '/tmp/mocked_kubeconfig',
+ ]
+
+ # Act
+ results = OCObjectValidator.run_ansible(params)
+
+ # Assert
+ self.assertTrue(results['failed'])
+ self.assertIn('All objects are not valid.', results['msg'])
+ self.assertEqual(results['state'], 'list')
+ self.assertEqual(results['results'], invalid_results)
+
+ # Making sure our mock was called as we expected
+ mock_cmd.assert_has_calls([
+ mock.call(['oc', 'get', 'hostsubnet', '-o', 'json', '-n', 'default'], None),
+ mock.call(['oc', 'get', 'netnamespace', '-o', 'json', '-n', 'default'], None),
+ mock.call(['oc', 'get', 'namespace', '-o', 'json', '-n', 'default'], None),
+ ])
diff --git a/roles/lib_openshift/src/test/unit/test_oc_process.py b/roles/lib_openshift/src/test/unit/test_oc_process.py
index c4b36928b..d887f7636 100755
--- a/roles/lib_openshift/src/test/unit/test_oc_process.py
+++ b/roles/lib_openshift/src/test/unit/test_oc_process.py
@@ -1,14 +1,6 @@
-#!/usr/bin/env python2
'''
Unit tests for oc process
'''
-# To run
-# python -m unittest version
-#
-# .
-# Ran 1 test in 0.597s
-#
-# OK
import os
import six
@@ -254,10 +246,6 @@ class OCProcessTest(unittest.TestCase):
}
}'''
- def setUp(self):
- ''' setup method will set to known configuration '''
- pass
-
@mock.patch('oc_process.Utils.create_tmpfile_copy')
@mock.patch('oc_process.OCProcess._run')
def test_state_list(self, mock_cmd, mock_tmpfile_copy):
@@ -582,11 +570,3 @@ class OCProcessTest(unittest.TestCase):
mock_shutil_which.side_effect = lambda _f, path=None: oc_bin
self.assertEqual(locate_oc_binary(), oc_bin)
-
- def tearDown(self):
- '''TearDown method'''
- pass
-
-
-if __name__ == "__main__":
- unittest.main()
diff --git a/roles/lib_openshift/src/test/unit/test_oc_project.py b/roles/lib_openshift/src/test/unit/test_oc_project.py
new file mode 100755
index 000000000..8e1a76323
--- /dev/null
+++ b/roles/lib_openshift/src/test/unit/test_oc_project.py
@@ -0,0 +1,280 @@
+'''
+ Unit tests for oc project
+'''
+
+import copy
+import os
+import sys
+import unittest
+import mock
+
+# Removing invalid variable names for tests so that I can
+# keep them brief
+# pylint: disable=invalid-name,no-name-in-module
+# Disable import-error b/c our libraries aren't loaded in jenkins
+# pylint: disable=import-error,wrong-import-position
+# place class in our python path
+module_path = os.path.join('/'.join(os.path.realpath(__file__).split('/')[:-4]), 'library') # noqa: E501
+sys.path.insert(0, module_path)
+from oc_project import OCProject # noqa: E402
+
+
+class OCProjectTest(unittest.TestCase):
+ '''
+ Test class for OCSecret
+ '''
+
+ # run_ansible input parameters
+ params = {
+ 'state': 'present',
+ 'display_name': 'operations project',
+ 'name': 'operations',
+ 'node_selector': ['ops_only=True'],
+ 'kubeconfig': '/etc/origin/master/admin.kubeconfig',
+ 'debug': False,
+ 'admin': None,
+ 'admin_role': 'admin',
+ 'description': 'All things operations project',
+ }
+
+ @mock.patch('oc_project.locate_oc_binary')
+ @mock.patch('oc_project.Utils.create_tmpfile_copy')
+ @mock.patch('oc_project.Utils._write')
+ @mock.patch('oc_project.OCProject._run')
+ def test_adding_a_project(self, mock_cmd, mock_write, mock_tmpfile_copy, mock_loc_oc_bin):
+ ''' Testing adding a project '''
+
+ params = copy.deepcopy(OCProjectTest.params)
+
+ # run_ansible input parameters
+ project_results = '''{
+ "kind": "Project",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "operations",
+ "selfLink": "/oapi/v1/projects/operations",
+ "uid": "5e52afb8-ee33-11e6-89f4-0edc441d9666",
+ "resourceVersion": "1584",
+ "labels": {},
+ "annotations": {
+ "openshift.io/node-selector": "ops_only=True",
+ "openshift.io/sa.initialized-roles": "true",
+ "openshift.io/sa.scc.mcs": "s0:c3,c2",
+ "openshift.io/sa.scc.supplemental-groups": "1000010000/10000",
+ "openshift.io/sa.scc.uid-range": "1000010000/10000"
+ }
+ },
+ "spec": {
+ "finalizers": [
+ "kubernetes",
+ "openshift.io/origin"
+ ]
+ },
+ "status": {
+ "phase": "Active"
+ }
+ }'''
+
+ # Return values of our mocked function call. These get returned once per call.
+ mock_cmd.side_effect = [
+ (1, '', 'Error from server: namespaces "operations" not found'),
+ (1, '', 'Error from server: namespaces "operations" not found'),
+ (0, '', ''), # created
+ (0, project_results, ''), # fetch it
+ ]
+
+ mock_tmpfile_copy.side_effect = [
+ '/tmp/mocked_kubeconfig',
+ ]
+
+ mock_loc_oc_bin.side_effect = [
+ 'oc',
+ ]
+
+ # Act
+ results = OCProject.run_ansible(params, False)
+
+ # Assert
+ self.assertTrue(results['changed'])
+ self.assertEqual(results['results']['returncode'], 0)
+ self.assertEqual(results['results']['results']['metadata']['name'], 'operations')
+ self.assertEqual(results['state'], 'present')
+
+ # Making sure our mock was called as we expected
+ mock_cmd.assert_has_calls([
+ mock.call(['oc', 'get', 'namespace', 'operations', '-o', 'json'], None),
+ mock.call(['oc', 'get', 'namespace', 'operations', '-o', 'json'], None),
+ mock.call(['oc', 'adm', 'new-project', 'operations', mock.ANY,
+ mock.ANY, mock.ANY, mock.ANY], None),
+ mock.call(['oc', 'get', 'namespace', 'operations', '-o', 'json'], None),
+
+ ])
+
+ @mock.patch('oc_project.locate_oc_binary')
+ @mock.patch('oc_project.Utils.create_tmpfile_copy')
+ @mock.patch('oc_project.Utils._write')
+ @mock.patch('oc_project.OCProject._run')
+ def test_modifying_a_project_no_attributes(self, mock_cmd, mock_write, mock_tmpfile_copy, mock_loc_oc_bin):
+ ''' Testing adding a project '''
+ params = copy.deepcopy(self.params)
+ params['display_name'] = None
+ params['node_selector'] = None
+ params['description'] = None
+
+ # run_ansible input parameters
+ project_results = '''{
+ "kind": "Project",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "operations",
+ "selfLink": "/oapi/v1/projects/operations",
+ "uid": "5e52afb8-ee33-11e6-89f4-0edc441d9666",
+ "resourceVersion": "1584",
+ "labels": {},
+ "annotations": {
+ "openshift.io/node-selector": "",
+ "openshift.io/description: "This is a description",
+ "openshift.io/sa.initialized-roles": "true",
+ "openshift.io/sa.scc.mcs": "s0:c3,c2",
+ "openshift.io/sa.scc.supplemental-groups": "1000010000/10000",
+ "openshift.io/sa.scc.uid-range": "1000010000/10000"
+ }
+ },
+ "spec": {
+ "finalizers": [
+ "kubernetes",
+ "openshift.io/origin"
+ ]
+ },
+ "status": {
+ "phase": "Active"
+ }
+ }'''
+
+ # Return values of our mocked function call. These get returned once per call.
+ mock_cmd.side_effect = [
+ (0, project_results, ''),
+ ]
+
+ mock_tmpfile_copy.side_effect = [
+ '/tmp/mocked_kubeconfig',
+ ]
+
+ mock_loc_oc_bin.side_effect = [
+ 'oc',
+ ]
+
+ # Act
+ results = OCProject.run_ansible(params, False)
+
+ # Assert
+ self.assertFalse(results['changed'])
+
+ # Making sure our mock was called as we expected
+ mock_cmd.assert_has_calls([
+ mock.call(['oc', 'get', 'namespace', 'operations', '-o', 'json'], None),
+ ])
+
+ @mock.patch('oc_project.locate_oc_binary')
+ @mock.patch('oc_project.Utils.create_tmpfile_copy')
+ @mock.patch('oc_project.Utils._write')
+ @mock.patch('oc_project.OCProject._run')
+ def test_modifying_project_attributes(self, mock_cmd, mock_write, mock_tmpfile_copy, mock_loc_oc_bin):
+ ''' Testing adding a project '''
+ params = copy.deepcopy(self.params)
+ params['display_name'] = 'updated display name'
+ params['node_selector'] = 'type=infra'
+ params['description'] = 'updated description'
+
+ # run_ansible input parameters
+ project_results = '''{
+ "kind": "Project",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "operations",
+ "selfLink": "/oapi/v1/projects/operations",
+ "uid": "5e52afb8-ee33-11e6-89f4-0edc441d9666",
+ "resourceVersion": "1584",
+ "labels": {},
+ "annotations": {
+ "openshift.io/node-selector": "",
+ "openshift.io/description": "This is a description",
+ "openshift.io/sa.initialized-roles": "true",
+ "openshift.io/sa.scc.mcs": "s0:c3,c2",
+ "openshift.io/sa.scc.supplemental-groups": "1000010000/10000",
+ "openshift.io/sa.scc.uid-range": "1000010000/10000"
+ }
+ },
+ "spec": {
+ "finalizers": [
+ "kubernetes",
+ "openshift.io/origin"
+ ]
+ },
+ "status": {
+ "phase": "Active"
+ }
+ }'''
+
+ mod_project_results = '''{
+ "kind": "Project",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "operations",
+ "selfLink": "/oapi/v1/projects/operations",
+ "uid": "5e52afb8-ee33-11e6-89f4-0edc441d9666",
+ "resourceVersion": "1584",
+ "labels": {},
+ "annotations": {
+ "openshift.io/node-selector": "type=infra",
+ "openshift.io/description": "updated description",
+ "openshift.io/display-name": "updated display name",
+ "openshift.io/sa.initialized-roles": "true",
+ "openshift.io/sa.scc.mcs": "s0:c3,c2",
+ "openshift.io/sa.scc.supplemental-groups": "1000010000/10000",
+ "openshift.io/sa.scc.uid-range": "1000010000/10000"
+ }
+ },
+ "spec": {
+ "finalizers": [
+ "kubernetes",
+ "openshift.io/origin"
+ ]
+ },
+ "status": {
+ "phase": "Active"
+ }
+ }'''
+
+ # Return values of our mocked function call. These get returned once per call.
+ mock_cmd.side_effect = [
+ (0, project_results, ''),
+ (0, project_results, ''),
+ (0, '', ''),
+ (0, mod_project_results, ''),
+ ]
+
+ mock_tmpfile_copy.side_effect = [
+ '/tmp/mocked_kubeconfig',
+ ]
+
+ mock_loc_oc_bin.side_effect = [
+ 'oc',
+ ]
+
+ # Act
+ results = OCProject.run_ansible(params, False)
+
+ # Assert
+ self.assertTrue(results['changed'])
+ self.assertEqual(results['results']['returncode'], 0)
+ self.assertEqual(results['results']['results']['metadata']['annotations']['openshift.io/description'], 'updated description')
+ self.assertEqual(results['state'], 'present')
+
+ # Making sure our mock was called as we expected
+ mock_cmd.assert_has_calls([
+ mock.call(['oc', 'get', 'namespace', 'operations', '-o', 'json'], None),
+ mock.call(['oc', 'get', 'namespace', 'operations', '-o', 'json'], None),
+ mock.call(['oc', 'replace', '-f', mock.ANY], None),
+ mock.call(['oc', 'get', 'namespace', 'operations', '-o', 'json'], None),
+ ])
diff --git a/roles/lib_openshift/src/test/unit/test_oc_route.py b/roles/lib_openshift/src/test/unit/test_oc_route.py
index ea94bfabd..09c52a461 100755
--- a/roles/lib_openshift/src/test/unit/test_oc_route.py
+++ b/roles/lib_openshift/src/test/unit/test_oc_route.py
@@ -1,14 +1,6 @@
-#!/usr/bin/env python2
'''
Unit tests for oc route
'''
-# To run:
-# ./oc_serviceaccount.py
-#
-# .
-# Ran 1 test in 0.002s
-#
-# OK
import os
import six
@@ -32,10 +24,6 @@ class OCRouteTest(unittest.TestCase):
Test class for OCServiceAccount
'''
- def setUp(self):
- ''' setup method will create a file and set to known configuration '''
- pass
-
@mock.patch('oc_route.locate_oc_binary')
@mock.patch('oc_route.Utils.create_tmpfile_copy')
@mock.patch('oc_route.OCRoute._run')
@@ -135,7 +123,7 @@ class OCRouteTest(unittest.TestCase):
# Making sure our mock was called as we expected
mock_cmd.assert_has_calls([
- mock.call(['oc', '-n', 'default', 'get', 'route', 'test', '-o', 'json'], None),
+ mock.call(['oc', 'get', 'route', 'test', '-o', 'json', '-n', 'default'], None),
])
@mock.patch('oc_route.locate_oc_binary')
@@ -265,9 +253,9 @@ metadata:
# Making sure our mock was called as we expected
mock_cmd.assert_has_calls([
- mock.call(['oc', '-n', 'default', 'get', 'route', 'test', '-o', 'json'], None),
- mock.call(['oc', '-n', 'default', 'create', '-f', mock.ANY], None),
- mock.call(['oc', '-n', 'default', 'get', 'route', 'test', '-o', 'json'], None),
+ mock.call(['oc', 'get', 'route', 'test', '-o', 'json', '-n', 'default'], None),
+ mock.call(['oc', 'create', '-f', mock.ANY, '-n', 'default'], None),
+ mock.call(['oc', 'get', 'route', 'test', '-o', 'json', '-n', 'default'], None),
])
@unittest.skipIf(six.PY3, 'py2 test only')
@@ -377,11 +365,3 @@ metadata:
mock_shutil_which.side_effect = lambda _f, path=None: oc_bin
self.assertEqual(locate_oc_binary(), oc_bin)
-
- def tearDown(self):
- '''TearDown method'''
- pass
-
-
-if __name__ == "__main__":
- unittest.main()
diff --git a/roles/lib_openshift/src/test/unit/test_oc_scale.py b/roles/lib_openshift/src/test/unit/test_oc_scale.py
index b2dec2fbe..d810735f2 100755
--- a/roles/lib_openshift/src/test/unit/test_oc_scale.py
+++ b/roles/lib_openshift/src/test/unit/test_oc_scale.py
@@ -1,14 +1,6 @@
-#!/usr/bin/env python2
'''
Unit tests for oc scale
'''
-# To run
-# python -m unittest version
-#
-# .
-# Ran 1 test in 0.597s
-#
-# OK
import os
import six
@@ -32,10 +24,6 @@ class OCScaleTest(unittest.TestCase):
Test class for OCVersion
'''
- def setUp(self):
- ''' setup method will create a file and set to known configuration '''
- pass
-
@mock.patch('oc_scale.Utils.create_tmpfile_copy')
@mock.patch('oc_scale.OCScale.openshift_cmd')
def test_state_list(self, mock_openshift_cmd, mock_tmpfile_copy):
@@ -266,11 +254,3 @@ class OCScaleTest(unittest.TestCase):
mock_shutil_which.side_effect = lambda _f, path=None: oc_bin
self.assertEqual(locate_oc_binary(), oc_bin)
-
- def tearDown(self):
- '''TearDown method'''
- pass
-
-
-if __name__ == "__main__":
- unittest.main()
diff --git a/roles/lib_openshift/src/test/unit/test_oc_secret.py b/roles/lib_openshift/src/test/unit/test_oc_secret.py
index 087c62dcf..e31393793 100755
--- a/roles/lib_openshift/src/test/unit/test_oc_secret.py
+++ b/roles/lib_openshift/src/test/unit/test_oc_secret.py
@@ -1,14 +1,6 @@
-#!/usr/bin/env python2
'''
Unit tests for oc secret
'''
-# To run:
-# ./oc_secret.py
-#
-# .
-# Ran 1 test in 0.002s
-#
-# OK
import os
import six
@@ -32,10 +24,6 @@ class OCSecretTest(unittest.TestCase):
Test class for OCSecret
'''
- def setUp(self):
- ''' setup method will create a file and set to known configuration '''
- pass
-
@mock.patch('oc_secret.locate_oc_binary')
@mock.patch('oc_secret.Utils.create_tmpfile_copy')
@mock.patch('oc_secret.Utils._write')
@@ -85,8 +73,8 @@ class OCSecretTest(unittest.TestCase):
# Making sure our mock was called as we expected
mock_cmd.assert_has_calls([
- mock.call(['oc', '-n', 'default', 'get', 'secrets', 'testsecretname', '-o', 'json'], None),
- mock.call(['oc', '-n', 'default', 'secrets', 'new', 'testsecretname', mock.ANY], None),
+ mock.call(['oc', 'get', 'secrets', 'testsecretname', '-o', 'json', '-n', 'default'], None),
+ mock.call(['oc', 'secrets', 'new', 'testsecretname', mock.ANY, '-n', 'default'], None),
])
mock_write.assert_has_calls([
@@ -200,11 +188,3 @@ class OCSecretTest(unittest.TestCase):
mock_shutil_which.side_effect = lambda _f, path=None: oc_bin
self.assertEqual(locate_oc_binary(), oc_bin)
-
- def tearDown(self):
- '''TearDown method'''
- pass
-
-
-if __name__ == "__main__":
- unittest.main()
diff --git a/roles/lib_openshift/src/test/unit/test_oc_service.py b/roles/lib_openshift/src/test/unit/test_oc_service.py
index 8974eb6c6..e74c66665 100755
--- a/roles/lib_openshift/src/test/unit/test_oc_service.py
+++ b/roles/lib_openshift/src/test/unit/test_oc_service.py
@@ -1,14 +1,6 @@
-#!/usr/bin/env python2
'''
Unit tests for oc service
'''
-# To run
-# python -m unittest version
-#
-# .
-# Ran 1 test in 0.597s
-#
-# OK
import os
import six
@@ -33,10 +25,6 @@ class OCServiceTest(unittest.TestCase):
Test class for OCService
'''
- def setUp(self):
- ''' setup method will create a file and set to known configuration '''
- pass
-
@mock.patch('oc_service.Utils.create_tmpfile_copy')
@mock.patch('oc_service.OCService._run')
def test_state_list(self, mock_cmd, mock_tmpfile_copy):
@@ -315,11 +303,3 @@ class OCServiceTest(unittest.TestCase):
mock_shutil_which.side_effect = lambda _f, path=None: oc_bin
self.assertEqual(locate_oc_binary(), oc_bin)
-
- def tearDown(self):
- '''TearDown method'''
- pass
-
-
-if __name__ == "__main__":
- unittest.main()
diff --git a/roles/lib_openshift/src/test/unit/test_oc_serviceaccount.py b/roles/lib_openshift/src/test/unit/test_oc_serviceaccount.py
index b02b37053..5772d2f00 100755
--- a/roles/lib_openshift/src/test/unit/test_oc_serviceaccount.py
+++ b/roles/lib_openshift/src/test/unit/test_oc_serviceaccount.py
@@ -1,14 +1,6 @@
-#!/usr/bin/env python2
'''
Unit tests for oc serviceaccount
'''
-# To run:
-# ./oc_serviceaccount.py
-#
-# .
-# Ran 1 test in 0.002s
-#
-# OK
import os
import six
@@ -32,10 +24,6 @@ class OCServiceAccountTest(unittest.TestCase):
Test class for OCServiceAccount
'''
- def setUp(self):
- ''' setup method will create a file and set to known configuration '''
- pass
-
@mock.patch('oc_serviceaccount.locate_oc_binary')
@mock.patch('oc_serviceaccount.Utils.create_tmpfile_copy')
@mock.patch('oc_serviceaccount.OCServiceAccount._run')
@@ -111,9 +99,9 @@ class OCServiceAccountTest(unittest.TestCase):
# Making sure our mock was called as we expected
mock_cmd.assert_has_calls([
- mock.call(['oc', '-n', 'default', 'get', 'sa', 'testserviceaccountname', '-o', 'json'], None),
- mock.call(['oc', '-n', 'default', 'create', '-f', mock.ANY], None),
- mock.call(['oc', '-n', 'default', 'get', 'sa', 'testserviceaccountname', '-o', 'json'], None),
+ mock.call(['oc', 'get', 'sa', 'testserviceaccountname', '-o', 'json', '-n', 'default'], None),
+ mock.call(['oc', 'create', '-f', mock.ANY, '-n', 'default'], None),
+ mock.call(['oc', 'get', 'sa', 'testserviceaccountname', '-o', 'json', '-n', 'default'], None),
])
@unittest.skipIf(six.PY3, 'py2 test only')
@@ -223,11 +211,3 @@ class OCServiceAccountTest(unittest.TestCase):
mock_shutil_which.side_effect = lambda _f, path=None: oc_bin
self.assertEqual(locate_oc_binary(), oc_bin)
-
- def tearDown(self):
- '''TearDown method'''
- pass
-
-
-if __name__ == "__main__":
- unittest.main()
diff --git a/roles/lib_openshift/src/test/unit/test_oc_serviceaccount_secret.py b/roles/lib_openshift/src/test/unit/test_oc_serviceaccount_secret.py
index ab8ccd18c..b22525068 100755
--- a/roles/lib_openshift/src/test/unit/test_oc_serviceaccount_secret.py
+++ b/roles/lib_openshift/src/test/unit/test_oc_serviceaccount_secret.py
@@ -1,14 +1,6 @@
-#!/usr/bin/env python2
'''
Unit tests for oc secret add
'''
-# To run:
-# ./oc_serviceaccount_secret.py
-#
-# .
-# Ran 1 test in 0.002s
-#
-# OK
import os
import six
@@ -38,10 +30,6 @@ class OCServiceAccountSecretTest(unittest.TestCase):
Test class for OCServiceAccountSecret
'''
- def setUp(self):
- ''' setup method will create a file and set to known configuration '''
- pass
-
@mock.patch('oc_serviceaccount_secret.locate_oc_binary')
@mock.patch('oc_serviceaccount_secret.Utils.create_tmpfile_copy')
@mock.patch('oc_serviceaccount_secret.Yedit._write')
@@ -181,10 +169,10 @@ secrets:
# Making sure our mocks were called as we expected
mock_cmd.assert_has_calls([
- mock.call(['oc', '-n', 'default', 'get', 'sa', 'builder', '-o', 'json'], None),
- mock.call(['oc', '-n', 'default', 'get', 'sa', 'builder', '-o', 'json'], None),
- mock.call(['oc', '-n', 'default', 'replace', '-f', mock.ANY], None),
- mock.call(['oc', '-n', 'default', 'get', 'sa', 'builder', '-o', 'json'], None)
+ mock.call(['oc', 'get', 'sa', 'builder', '-o', 'json', '-n', 'default'], None),
+ mock.call(['oc', 'get', 'sa', 'builder', '-o', 'json', '-n', 'default'], None),
+ mock.call(['oc', 'replace', '-f', mock.ANY, '-n', 'default'], None),
+ mock.call(['oc', 'get', 'sa', 'builder', '-o', 'json', '-n', 'default'], None)
])
yaml_file = builder_pyyaml_file
@@ -304,9 +292,9 @@ secrets:
# Making sure our mocks were called as we expected
mock_cmd.assert_has_calls([
- mock.call(['oc', '-n', 'default', 'get', 'sa', 'builder', '-o', 'json'], None),
- mock.call(['oc', '-n', 'default', 'get', 'sa', 'builder', '-o', 'json'], None),
- mock.call(['oc', '-n', 'default', 'replace', '-f', mock.ANY], None),
+ mock.call(['oc', 'get', 'sa', 'builder', '-o', 'json', '-n', 'default'], None),
+ mock.call(['oc', 'get', 'sa', 'builder', '-o', 'json', '-n', 'default'], None),
+ mock.call(['oc', 'replace', '-f', mock.ANY, '-n', 'default'], None),
])
yaml_file = builder_pyyaml_file
@@ -424,11 +412,3 @@ secrets:
mock_shutil_which.side_effect = lambda _f, path=None: oc_bin
self.assertEqual(locate_oc_binary(), oc_bin)
-
- def tearDown(self):
- '''TearDown method'''
- pass
-
-
-if __name__ == "__main__":
- unittest.main()
diff --git a/roles/lib_openshift/src/test/unit/test_oc_version.py b/roles/lib_openshift/src/test/unit/test_oc_version.py
index 6daf5b00d..c287bad0b 100755
--- a/roles/lib_openshift/src/test/unit/test_oc_version.py
+++ b/roles/lib_openshift/src/test/unit/test_oc_version.py
@@ -1,14 +1,6 @@
-#!/usr/bin/env python2
'''
Unit tests for oc version
'''
-# To run
-# python -m unittest version
-#
-# .
-# Ran 1 test in 0.597s
-#
-# OK
import os
import six
@@ -32,10 +24,6 @@ class OCVersionTest(unittest.TestCase):
Test class for OCVersion
'''
- def setUp(self):
- ''' setup method will create a file and set to known configuration '''
- pass
-
@mock.patch('oc_version.Utils.create_tmpfile_copy')
@mock.patch('oc_version.OCVersion.openshift_cmd')
def test_get(self, mock_openshift_cmd, mock_tmpfile_copy):
@@ -172,11 +160,3 @@ class OCVersionTest(unittest.TestCase):
mock_shutil_which.side_effect = lambda _f, path=None: oc_bin
self.assertEqual(locate_oc_binary(), oc_bin)
-
- def tearDown(self):
- '''TearDown method'''
- pass
-
-
-if __name__ == "__main__":
- unittest.main()
diff --git a/roles/lib_openshift/tasks/main.yml b/roles/lib_openshift/tasks/main.yml
index b8af7c7c9..ed97d539c 100644
--- a/roles/lib_openshift/tasks/main.yml
+++ b/roles/lib_openshift/tasks/main.yml
@@ -1,11 +1 @@
---
-- name: lib_openshift detect ostree
- stat:
- path: /run/ostree-booted
- register: ostree_booted
-
-- name: lib_openshift ensure python-ruamel-yaml package is on target
- package:
- name: python2-ruamel-yaml
- state: present
- when: not ostree_booted.stat.exists
diff --git a/roles/lib_utils/library/yedit.py b/roles/lib_utils/library/yedit.py
index b1d9d6869..a2ae6b4f6 100644
--- a/roles/lib_utils/library/yedit.py
+++ b/roles/lib_utils/library/yedit.py
@@ -311,7 +311,8 @@ class Yedit(object):
continue
elif data and not isinstance(data, dict):
- return None
+ raise YeditException("Unexpected item type found while going through key " +
+ "path: {} (at key: {})".format(key, dict_key))
data[dict_key] = {}
data = data[dict_key]
@@ -320,7 +321,7 @@ class Yedit(object):
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
- return None
+ raise YeditException("Unexpected item type found while going through key path: {}".format(key))
if key == '':
data = item
@@ -334,6 +335,12 @@ class Yedit(object):
elif key_indexes[-1][1] and isinstance(data, dict):
data[key_indexes[-1][1]] = item
+ # didn't add/update to an existing list, nor add/update key to a dict
+ # so we must have been provided some syntax like a.b.c[<int>] = "data" for a
+ # non-existent array
+ else:
+ raise YeditException("Error adding to object at path: {}".format(key))
+
return data
@staticmethod
diff --git a/roles/lib_utils/meta/main.yml b/roles/lib_utils/meta/main.yml
index e06b9a0f1..cc18c453c 100644
--- a/roles/lib_utils/meta/main.yml
+++ b/roles/lib_utils/meta/main.yml
@@ -11,5 +11,4 @@ galaxy_info:
- 7
categories:
- cloud
-dependencies:
-- { role: openshift_repos }
+dependencies: []
diff --git a/roles/lib_utils/src/class/yedit.py b/roles/lib_utils/src/class/yedit.py
index 74ee52fe3..533665db2 100644
--- a/roles/lib_utils/src/class/yedit.py
+++ b/roles/lib_utils/src/class/yedit.py
@@ -125,7 +125,8 @@ class Yedit(object):
continue
elif data and not isinstance(data, dict):
- return None
+ raise YeditException("Unexpected item type found while going through key " +
+ "path: {} (at key: {})".format(key, dict_key))
data[dict_key] = {}
data = data[dict_key]
@@ -134,7 +135,7 @@ class Yedit(object):
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
- return None
+ raise YeditException("Unexpected item type found while going through key path: {}".format(key))
if key == '':
data = item
@@ -148,6 +149,12 @@ class Yedit(object):
elif key_indexes[-1][1] and isinstance(data, dict):
data[key_indexes[-1][1]] = item
+ # didn't add/update to an existing list, nor add/update key to a dict
+ # so we must have been provided some syntax like a.b.c[<int>] = "data" for a
+ # non-existent array
+ else:
+ raise YeditException("Error adding to object at path: {}".format(key))
+
return data
@staticmethod
diff --git a/roles/lib_utils/src/test/unit/test_repoquery.py b/roles/lib_utils/src/test/unit/test_repoquery.py
index c487ab254..e39d9d83f 100755
--- a/roles/lib_utils/src/test/unit/test_repoquery.py
+++ b/roles/lib_utils/src/test/unit/test_repoquery.py
@@ -1,14 +1,6 @@
-#!/usr/bin/env python2
'''
Unit tests for repoquery
'''
-# To run:
-# ./repoquery.py
-#
-# .
-# Ran 1 test in 0.002s
-#
-# OK
import os
import sys
@@ -31,10 +23,6 @@ class RepoQueryTest(unittest.TestCase):
Test class for RepoQuery
'''
- def setUp(self):
- ''' setup method for other tests '''
- pass
-
@mock.patch('repoquery._run')
def test_querying_a_package(self, mock_cmd):
''' Testing querying a package '''
@@ -77,11 +65,3 @@ class RepoQueryTest(unittest.TestCase):
mock_cmd.assert_has_calls([
mock.call(['/usr/bin/repoquery', '--plugins', '--quiet', '--pkgnarrow=repos', '--queryformat=%{version}|%{release}|%{arch}|%{repo}|%{version}-%{release}', 'bash']),
])
-
- def tearDown(self):
- '''TearDown method'''
- pass
-
-
-if __name__ == "__main__":
- unittest.main()
diff --git a/roles/lib_utils/src/test/unit/test_yedit.py b/roles/lib_utils/src/test/unit/test_yedit.py
index ed07ac96e..23a3f7353 100755
--- a/roles/lib_utils/src/test/unit/test_yedit.py
+++ b/roles/lib_utils/src/test/unit/test_yedit.py
@@ -1,14 +1,6 @@
-#!/usr/bin/env python2
'''
Unit tests for yedit
'''
-# To run
-# python -m unittest yedit_test
-#
-# .............................
-# ----------------------------------------------------------------------
-# Ran 29 tests in 0.133s
-# OK
import os
import sys
@@ -23,7 +15,7 @@ import unittest
yedit_path = os.path.join('/'.join(os.path.realpath(__file__).split('/')[:-4]), 'library') # noqa: E501
sys.path.insert(0, yedit_path)
-from yedit import Yedit # noqa: E402
+from yedit import Yedit, YeditException # noqa: E402
# pylint: disable=too-many-public-methods
# Silly pylint, moar tests!
@@ -200,7 +192,6 @@ class YeditTest(unittest.TestCase):
yed.append('x:y:z', [5, 6])
yed.append('x:y:z', [5, 6])
self.assertTrue(yed.get('x:y:z') == [1, 2, 3, [5, 6], [5, 6]])
- self.assertTrue(2 == yed.get('x:y:z').count([5, 6]))
self.assertFalse(yed.exists('x:y:z', 4))
def test_add_item_to_dict(self):
@@ -268,10 +259,24 @@ class YeditTest(unittest.TestCase):
yed.pop('a#b', 'c')
self.assertTrue({'a': {'b': {'d': 2}}} == yed.yaml_dict)
+ def test_accessing_path_with_unexpected_objects(self):
+ '''test providing source path objects that differ from current object state'''
+ yed = Yedit(content={'a': {'b': {'c': ['d', 'e']}}})
+ with self.assertRaises(YeditException):
+ yed.put('a.b.c.d', 'x')
+
+ def test_creating_new_objects_with_embedded_list(self):
+ '''test creating new objects with an embedded list in the creation path'''
+ yed = Yedit(content={'a': {'b': 12}})
+ with self.assertRaises(YeditException):
+ yed.put('new.stuff[0].here', 'value')
+
+ def test_creating_new_objects_with_trailing_list(self):
+ '''test creating new object(s) where the final piece is a list'''
+ yed = Yedit(content={'a': {'b': 12}})
+ with self.assertRaises(YeditException):
+ yed.put('new.stuff.here[0]', 'item')
+
def tearDown(self):
'''TearDown method'''
os.unlink(YeditTest.filename)
-
-
-if __name__ == "__main__":
- unittest.main()
diff --git a/roles/lib_utils/tasks/main.yml b/roles/lib_utils/tasks/main.yml
index 32ab9e0c6..ed97d539c 100644
--- a/roles/lib_utils/tasks/main.yml
+++ b/roles/lib_utils/tasks/main.yml
@@ -1,11 +1 @@
---
-- name: lib_utils detect ostree
- stat:
- path: /run/ostree-booted
- register: ostree_booted
-
-- name: lib_utils ensure python-ruamel-yaml package is on target
- package:
- name: python-ruamel-yaml
- state: present
- when: not ostree_booted.stat.exists
diff --git a/roles/nuage_master/tasks/main.yaml b/roles/nuage_master/tasks/main.yaml
index d211d30e8..fefd28bbd 100644
--- a/roles/nuage_master/tasks/main.yaml
+++ b/roles/nuage_master/tasks/main.yaml
@@ -22,6 +22,15 @@
- nuage.key
- nuage.kubeconfig
+- name: Copy the certificates and keys
+ become: yes
+ copy: src="/tmp/{{ item }}" dest="{{ cert_output_dir }}/{{ item }}"
+ with_items:
+ - ca.crt
+ - nuage.crt
+ - nuage.key
+ - nuage.kubeconfig
+
- include: certificates.yml
- name: Create nuage-openshift-monitor.yaml
diff --git a/roles/nuage_master/tasks/serviceaccount.yml b/roles/nuage_master/tasks/serviceaccount.yml
index 16ea08244..eee448e2c 100644
--- a/roles/nuage_master/tasks/serviceaccount.yml
+++ b/roles/nuage_master/tasks/serviceaccount.yml
@@ -3,14 +3,20 @@
command: mktemp -u /tmp/openshift-ansible-XXXXXXX.kubeconfig
register: nuage_tmp_conf_mktemp
changed_when: False
+ run_once: True
+ delegate_to: "{{ nuage_ca_master }}"
- set_fact:
nuage_tmp_conf: "{{ nuage_tmp_conf_mktemp.stdout }}"
+ run_once: True
+ delegate_to: "{{ nuage_ca_master }}"
- name: Copy Configuration to temporary conf
command: >
cp {{ openshift.common.config_base }}/master/admin.kubeconfig {{nuage_tmp_conf}}
changed_when: false
+ run_once: True
+ delegate_to: "{{ nuage_ca_master }}"
- name: Create Admin Service Account
oc_serviceaccount:
@@ -18,6 +24,8 @@
name: nuage
namespace: default
state: present
+ run_once: True
+ delegate_to: "{{ nuage_ca_master }}"
- name: Configure role/user permissions
command: >
@@ -27,6 +35,8 @@
register: osnuage_perm_task
failed_when: "'the object has been modified' not in osnuage_perm_task.stderr and osnuage_perm_task.rc != 0"
changed_when: osnuage_perm_task.rc == 0
+ run_once: True
+ delegate_to: "{{ nuage_ca_master }}"
- name: Generate the node client config
command: >
@@ -40,8 +50,12 @@
--signer-serial={{ openshift_master_ca_serial }}
--basename='nuage'
--user={{ nuage_service_account }}
+ delegate_to: "{{ nuage_ca_master }}"
+ run_once: True
- name: Clean temporary configuration file
command: >
rm -f {{nuage_tmp_conf}}
changed_when: false
+ delegate_to: "{{ nuage_ca_master }}"
+ run_once: True
diff --git a/roles/openshift_ca/tasks/main.yml b/roles/openshift_ca/tasks/main.yml
index ae3ad31c3..70c2a9121 100644
--- a/roles/openshift_ca/tasks/main.yml
+++ b/roles/openshift_ca/tasks/main.yml
@@ -60,6 +60,7 @@
copy:
src: "{{ item.src }}"
dest: "{{ openshift_ca_config_dir }}/{{ item.dest }}"
+ force: no
with_items:
- src: "{{ (openshift_master_ca_certificate | default({'certfile':none})).certfile }}"
dest: ca.crt
diff --git a/roles/openshift_certificate_expiry/library/openshift_cert_expiry.py b/roles/openshift_certificate_expiry/library/openshift_cert_expiry.py
index b093d84fe..c204b5341 100644
--- a/roles/openshift_certificate_expiry/library/openshift_cert_expiry.py
+++ b/roles/openshift_certificate_expiry/library/openshift_cert_expiry.py
@@ -8,15 +8,13 @@ import datetime
import io
import os
import subprocess
-import sys
-import tempfile
+import yaml
-# File pointers from io.open require unicode inputs when using their
-# `write` method
-import six
-from six.moves import configparser
+# pylint import-error disabled because pylint cannot find the package
+# when installed in a virtualenv
+from ansible.module_utils.six.moves import configparser # pylint: disable=import-error
+from ansible.module_utils.basic import AnsibleModule
-import yaml
try:
# You can comment this import out and include a 'pass' in this
# block if you're manually testing this module on a NON-ATOMIC
@@ -24,13 +22,14 @@ try:
# available). That will force the `load_and_handle_cert` function
# to use the Fake OpenSSL classes.
import OpenSSL.crypto
+ HAS_OPENSSL = True
except ImportError:
# Some platforms (such as RHEL Atomic) may not have the Python
# OpenSSL library installed. In this case we will use a manual
# work-around to parse each certificate.
#
# Check for 'OpenSSL.crypto' in `sys.modules` later.
- pass
+ HAS_OPENSSL = False
DOCUMENTATION = '''
---
@@ -158,6 +157,10 @@ might return: [('O=system', 'nodes'), ('CN=system', 'node:m01.example.com')]
'subjectAltName'"""
return self.extensions[i]
+ def get_extension_count(self):
+ """ get_extension_count """
+ return len(self.extensions)
+
def get_notAfter(self):
"""Returns a date stamp as a string in the form
'20180922170439Z'. strptime the result with format param:
@@ -268,30 +271,23 @@ A tuple of the form:
# around a missing library on the target host.
#
# pylint: disable=redefined-variable-type
- if 'OpenSSL.crypto' in sys.modules:
+ if HAS_OPENSSL:
# No work-around required
cert_loaded = OpenSSL.crypto.load_certificate(
OpenSSL.crypto.FILETYPE_PEM, _cert_string)
else:
- # Missing library, work-around required. We need to write the
- # cert out to disk temporarily so we can run the 'openssl'
+ # Missing library, work-around required. Run the 'openssl'
# command on it to decode it
- _, path = tempfile.mkstemp()
- with io.open(path, 'w') as fp:
- fp.write(six.u(_cert_string))
- fp.flush()
-
- cmd = 'openssl x509 -in {} -text'.format(path)
+ cmd = 'openssl x509 -text'
try:
- openssl_decoded = subprocess.Popen(cmd.split(),
- stdout=subprocess.PIPE)
+ openssl_proc = subprocess.Popen(cmd.split(),
+ stdout=subprocess.PIPE,
+ stdin=subprocess.PIPE)
except OSError:
ans_module.fail_json(msg="Error: The 'OpenSSL' python library and CLI command were not found on the target host. Unable to parse any certificates. This host will not be included in generated reports.")
else:
- openssl_decoded = openssl_decoded.communicate()[0]
+ openssl_decoded = openssl_proc.communicate(_cert_string.encode('utf-8'))[0].decode('utf-8')
cert_loaded = FakeOpenSSLCertificate(openssl_decoded)
- finally:
- os.remove(path)
######################################################################
# Read all possible names from the cert
@@ -301,34 +297,12 @@ A tuple of the form:
# To read SANs from a cert we must read the subjectAltName
# extension from the X509 Object. What makes this more difficult
- # is that pyOpenSSL does not give extensions as a list, nor does
- # it provide a count of all loaded extensions.
- #
- # Rather, extensions are REQUESTED by index. We must iterate over
- # all extensions until we find the one called 'subjectAltName'. If
- # we don't find that extension we'll eventually request an
- # extension at an index where no extension exists (IndexError is
- # raised). When that happens we know that the cert has no SANs so
- # we break out of the loop.
- i = 0
- checked_all_extensions = False
- while not checked_all_extensions:
- try:
- # Read the extension at index 'i'
- ext = cert_loaded.get_extension(i)
- except IndexError:
- # We tried to read an extension but it isn't there, that
- # means we ran out of extensions to check. Abort
- san = None
- checked_all_extensions = True
- else:
- # We were able to load the extension at index 'i'
- if ext.get_short_name() == 'subjectAltName':
- san = ext
- checked_all_extensions = True
- else:
- # Try reading the next extension
- i += 1
+ # is that pyOpenSSL does not give extensions as an iterable
+ san = None
+ for i in range(cert_loaded.get_extension_count()):
+ ext = cert_loaded.get_extension(i)
+ if ext.get_short_name() == 'subjectAltName':
+ san = ext
if san is not None:
# The X509Extension object for subjectAltName prints as a
@@ -341,9 +315,13 @@ A tuple of the form:
######################################################################
# Grab the expiration date
+ not_after = cert_loaded.get_notAfter()
+ # example get_notAfter() => 20180922170439Z
+ if isinstance(not_after, bytes):
+ not_after = not_after.decode('utf-8')
+
cert_expiry_date = datetime.datetime.strptime(
- cert_loaded.get_notAfter(),
- # example get_notAfter() => 20180922170439Z
+ not_after,
'%Y%m%d%H%M%SZ')
time_remaining = cert_expiry_date - now
@@ -455,13 +433,11 @@ an OpenShift Container Platform cluster
)
# Basic scaffolding for OpenShift specific certs
- openshift_base_config_path = module.params['config_base']
- openshift_master_config_path = os.path.normpath(
- os.path.join(openshift_base_config_path, "master/master-config.yaml")
- )
- openshift_node_config_path = os.path.normpath(
- os.path.join(openshift_base_config_path, "node/node-config.yaml")
- )
+ openshift_base_config_path = os.path.realpath(module.params['config_base'])
+ openshift_master_config_path = os.path.join(openshift_base_config_path,
+ "master", "master-config.yaml")
+ openshift_node_config_path = os.path.join(openshift_base_config_path,
+ "node", "node-config.yaml")
openshift_cert_check_paths = [
openshift_master_config_path,
openshift_node_config_path,
@@ -476,9 +452,7 @@ an OpenShift Container Platform cluster
kubeconfig_paths = []
for m_kube_config in master_kube_configs:
kubeconfig_paths.append(
- os.path.normpath(
- os.path.join(openshift_base_config_path, "master/%s.kubeconfig" % m_kube_config)
- )
+ os.path.join(openshift_base_config_path, "master", m_kube_config + ".kubeconfig")
)
# Validate some paths we have the ability to do ahead of time
@@ -527,7 +501,7 @@ an OpenShift Container Platform cluster
######################################################################
for os_cert in filter_paths(openshift_cert_check_paths):
# Open up that config file and locate the cert and CA
- with open(os_cert, 'r') as fp:
+ with io.open(os_cert, 'r', encoding='utf-8') as fp:
cert_meta = {}
cfg = yaml.load(fp)
# cert files are specified in parsed `fp` as relative to the path
@@ -542,7 +516,7 @@ an OpenShift Container Platform cluster
# Load the certificate and the CA, parse their expiration dates into
# datetime objects so we can manipulate them later
for _, v in cert_meta.items():
- with open(v, 'r') as fp:
+ with io.open(v, 'r', encoding='utf-8') as fp:
cert = fp.read()
(cert_subject,
cert_expiry_date,
@@ -575,7 +549,7 @@ an OpenShift Container Platform cluster
try:
# Try to read the standard 'node-config.yaml' file to check if
# this host is a node.
- with open(openshift_node_config_path, 'r') as fp:
+ with io.open(openshift_node_config_path, 'r', encoding='utf-8') as fp:
cfg = yaml.load(fp)
# OK, the config file exists, therefore this is a
@@ -588,7 +562,7 @@ an OpenShift Container Platform cluster
cfg_path = os.path.dirname(fp.name)
node_kubeconfig = os.path.join(cfg_path, node_masterKubeConfig)
- with open(node_kubeconfig, 'r') as fp:
+ with io.open(node_kubeconfig, 'r', encoding='utf8') as fp:
# Read in the nodes kubeconfig file and grab the good stuff
cfg = yaml.load(fp)
@@ -613,7 +587,7 @@ an OpenShift Container Platform cluster
pass
for kube in filter_paths(kubeconfig_paths):
- with open(kube, 'r') as fp:
+ with io.open(kube, 'r', encoding='utf-8') as fp:
# TODO: Maybe consider catching exceptions here?
cfg = yaml.load(fp)
@@ -656,7 +630,7 @@ an OpenShift Container Platform cluster
etcd_certs = []
etcd_cert_params.append('dne')
try:
- with open('/etc/etcd/etcd.conf', 'r') as fp:
+ with io.open('/etc/etcd/etcd.conf', 'r', encoding='utf-8') as fp:
etcd_config = configparser.ConfigParser()
# Reason: This check is disabled because the issue was introduced
# during a period where the pylint checks weren't enabled for this file
@@ -675,7 +649,7 @@ an OpenShift Container Platform cluster
pass
for etcd_cert in filter_paths(etcd_certs_to_check):
- with open(etcd_cert, 'r') as fp:
+ with io.open(etcd_cert, 'r', encoding='utf-8') as fp:
c = fp.read()
(cert_subject,
cert_expiry_date,
@@ -697,7 +671,7 @@ an OpenShift Container Platform cluster
# Now the embedded etcd
######################################################################
try:
- with open('/etc/origin/master/master-config.yaml', 'r') as fp:
+ with io.open('/etc/origin/master/master-config.yaml', 'r', encoding='utf-8') as fp:
cfg = yaml.load(fp)
except IOError:
# Not present
@@ -864,10 +838,5 @@ an OpenShift Container Platform cluster
)
-######################################################################
-# It's just the way we do things in Ansible. So disable this warning
-#
-# pylint: disable=wrong-import-position,import-error
-from ansible.module_utils.basic import AnsibleModule # noqa: E402
if __name__ == '__main__':
main()
diff --git a/roles/openshift_certificate_expiry/test/conftest.py b/roles/openshift_certificate_expiry/test/conftest.py
new file mode 100644
index 000000000..4ca35ecbc
--- /dev/null
+++ b/roles/openshift_certificate_expiry/test/conftest.py
@@ -0,0 +1,116 @@
+# pylint: disable=missing-docstring,invalid-name,redefined-outer-name
+import pytest
+from OpenSSL import crypto
+
+# Parameter list for valid_cert fixture
+VALID_CERTIFICATE_PARAMS = [
+ {
+ 'short_name': 'client',
+ 'cn': 'client.example.com',
+ 'serial': 4,
+ 'uses': b'clientAuth',
+ 'dns': [],
+ 'ip': [],
+ },
+ {
+ 'short_name': 'server',
+ 'cn': 'server.example.com',
+ 'serial': 5,
+ 'uses': b'serverAuth',
+ 'dns': ['kubernetes', 'openshift'],
+ 'ip': ['10.0.0.1', '192.168.0.1']
+ },
+ {
+ 'short_name': 'combined',
+ 'cn': 'combined.example.com',
+ 'serial': 6,
+ 'uses': b'clientAuth, serverAuth',
+ 'dns': ['etcd'],
+ 'ip': ['10.0.0.2', '192.168.0.2']
+ }
+]
+
+# Extract the short_name from VALID_CERTIFICATE_PARAMS to provide
+# friendly naming for the valid_cert fixture
+VALID_CERTIFICATE_IDS = [param['short_name'] for param in VALID_CERTIFICATE_PARAMS]
+
+
+@pytest.fixture(scope='session')
+def ca(tmpdir_factory):
+ ca_dir = tmpdir_factory.mktemp('ca')
+
+ key = crypto.PKey()
+ key.generate_key(crypto.TYPE_RSA, 2048)
+
+ cert = crypto.X509()
+ cert.set_version(3)
+ cert.set_serial_number(1)
+ cert.get_subject().commonName = 'test-signer'
+ cert.gmtime_adj_notBefore(0)
+ cert.gmtime_adj_notAfter(24 * 60 * 60)
+ cert.set_issuer(cert.get_subject())
+ cert.set_pubkey(key)
+ cert.add_extensions([
+ crypto.X509Extension(b'basicConstraints', True, b'CA:TRUE, pathlen:0'),
+ crypto.X509Extension(b'keyUsage', True,
+ b'digitalSignature, keyEncipherment, keyCertSign, cRLSign'),
+ crypto.X509Extension(b'subjectKeyIdentifier', False, b'hash', subject=cert)
+ ])
+ cert.add_extensions([
+ crypto.X509Extension(b'authorityKeyIdentifier', False, b'keyid:always', issuer=cert)
+ ])
+ cert.sign(key, 'sha256')
+
+ return {
+ 'dir': ca_dir,
+ 'key': key,
+ 'cert': cert,
+ }
+
+
+@pytest.fixture(scope='session',
+ ids=VALID_CERTIFICATE_IDS,
+ params=VALID_CERTIFICATE_PARAMS)
+def valid_cert(request, ca):
+ common_name = request.param['cn']
+
+ key = crypto.PKey()
+ key.generate_key(crypto.TYPE_RSA, 2048)
+
+ cert = crypto.X509()
+ cert.set_serial_number(request.param['serial'])
+ cert.gmtime_adj_notBefore(0)
+ cert.gmtime_adj_notAfter(24 * 60 * 60)
+ cert.set_issuer(ca['cert'].get_subject())
+ cert.set_pubkey(key)
+ cert.set_version(3)
+ cert.get_subject().commonName = common_name
+ cert.add_extensions([
+ crypto.X509Extension(b'basicConstraints', True, b'CA:FALSE'),
+ crypto.X509Extension(b'keyUsage', True, b'digitalSignature, keyEncipherment'),
+ crypto.X509Extension(b'extendedKeyUsage', False, request.param['uses']),
+ ])
+
+ if request.param['dns'] or request.param['ip']:
+ san_list = ['DNS:{}'.format(common_name)]
+ san_list.extend(['DNS:{}'.format(x) for x in request.param['dns']])
+ san_list.extend(['IP:{}'.format(x) for x in request.param['ip']])
+
+ cert.add_extensions([
+ crypto.X509Extension(b'subjectAltName', False, ', '.join(san_list).encode('utf8'))
+ ])
+ cert.sign(ca['key'], 'sha256')
+
+ cert_contents = crypto.dump_certificate(crypto.FILETYPE_PEM, cert)
+ cert_file = ca['dir'].join('{}.crt'.format(common_name))
+ cert_file.write_binary(cert_contents)
+
+ return {
+ 'common_name': common_name,
+ 'serial': request.param['serial'],
+ 'dns': request.param['dns'],
+ 'ip': request.param['ip'],
+ 'uses': request.param['uses'],
+ 'cert_file': cert_file,
+ 'cert': cert
+ }
diff --git a/roles/openshift_certificate_expiry/test/master.server.crt b/roles/openshift_certificate_expiry/test/master.server.crt
deleted file mode 100644
index 51aa85c8c..000000000
--- a/roles/openshift_certificate_expiry/test/master.server.crt
+++ /dev/null
@@ -1,42 +0,0 @@
------BEGIN CERTIFICATE-----
-MIID7zCCAtegAwIBAgIBBDANBgkqhkiG9w0BAQsFADAmMSQwIgYDVQQDDBtvcGVu
-c2hpZnQtc2lnbmVyQDE0ODY0OTExNTgwHhcNMTcwMjA3MTgxMjM5WhcNMTkwMjA3
-MTgxMjQwWjAVMRMwEQYDVQQDEwoxNzIuMzAuMC4xMIIBIjANBgkqhkiG9w0BAQEF
-AAOCAQ8AMIIBCgKCAQEA44n6kVlnRXSwgnKhXX7JrRvxZm+nCqEE/vpKRfNtrMDP
-AuVtcLUWdEDdT0L7QdceLTCBFe7VugrfokPhVi0XavrC2xFpYJ6+wPpuo7HyBRhf
-z/8rOxftAnMeFU5JhFDaeLwSbDjiRgjE1ZYYz8Hcq9YlPujptD6j6YaW1Inae+Vs
-QKXc1uAobemhClLKazEzccVGu53CaSHe4kJoKUZwJ8Ujt/nRHUr+wekbkpx0NfmF
-UEGgNRXN46cq7ZwkLHsjtuR2pttC6JhF+KHgXTRyWM9ssfvL2McmhTFxrToAlhsq
-8MuHMn0y9DMzmAK6EntvlC5AscxTRljtwHZEicspFwIDAQABo4IBNzCCATMwDgYD
-VR0PAQH/BAQDAgWgMBMGA1UdJQQMMAoGCCsGAQUFBwMBMAwGA1UdEwEB/wQCMAAw
-gf0GA1UdEQSB9TCB8oIKa3ViZXJuZXRlc4ISa3ViZXJuZXRlcy5kZWZhdWx0ghZr
-dWJlcm5ldGVzLmRlZmF1bHQuc3ZjgiRrdWJlcm5ldGVzLmRlZmF1bHQuc3ZjLmNs
-dXN0ZXIubG9jYWyCD20wMS5leGFtcGxlLmNvbYIJb3BlbnNoaWZ0ghFvcGVuc2hp
-ZnQuZGVmYXVsdIIVb3BlbnNoaWZ0LmRlZmF1bHQuc3ZjgiNvcGVuc2hpZnQuZGVm
-YXVsdC5zdmMuY2x1c3Rlci5sb2NhbIIKMTcyLjMwLjAuMYIPMTkyLjE2OC4xMjIu
-MjQxhwSsHgABhwTAqHrxMA0GCSqGSIb3DQEBCwUAA4IBAQDSdKBpUVB5Sgn1JB//
-bk804+zrUf01koaT83/17WMI+zG8IOwCZ9Be5+zDe4ThXH+PQC6obbwUi9dn8SN6
-rlihvrhNvAJaknY1YRjW07L7aus2RFKXpzsLuWoWLVlLXBTpmfWhQ2w40bCo4Kri
-jQqvezBQ+u1otFzozWmF7nrI/JK+7o89hLvaobx+mDj5wCPQLO+cM/q11Jcz3htv
-VOTFsMh2VnuKOxZqLGJz2CXkr6YXvAhJiFQWaRCnJEaA2ogTYbDahV5ixFKwqpGZ
-o+yDEroPlCw54Bxs0P1ewUx4TRsqd+Qzhnr73xiFBQ0M7JjhKHF6EczHt87XPvsn
-HEL2
------END CERTIFICATE-----
------BEGIN CERTIFICATE-----
-MIIC6jCCAdKgAwIBAgIBATANBgkqhkiG9w0BAQsFADAmMSQwIgYDVQQDDBtvcGVu
-c2hpZnQtc2lnbmVyQDE0ODY0OTExNTgwHhcNMTcwMjA3MTgxMjM3WhcNMjIwMjA2
-MTgxMjM4WjAmMSQwIgYDVQQDDBtvcGVuc2hpZnQtc2lnbmVyQDE0ODY0OTExNTgw
-ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDdyU8AD7sTXHP5jk04i1HY
-cmUXuSiXdIByeIAtTZqiHU46Od0qnZib7tY1NSbo9FtGRl5YEvrfNL+1ig0hZjDh
-gKZK4fNbsanuKgj2SWx11bt4yJH0YSbm9+H45y0E15IY1h30jGHnHFNFZDdYwxtO
-8u+GShb4MOqZL9aUEfvfaoGJIIpfR+eW5NaBZQr6tiM89Z1wJYqoqzgzI/XIyUXR
-zWLOayP1M/eeSXPvBncwZfTPLzphZjB2rz3MdloPrdYMm2b5tfbEOjD7L2aYOJJU
-nVSkgjoFXAazL8KuXSIGcdrdDecyJ4ta8ijD4VIZRN9PnBlYiKaz0DsagkGjUVRd
-AgMBAAGjIzAhMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMBAf8EBTADAQH/MA0GCSqG
-SIb3DQEBCwUAA4IBAQAZ/Kerb5eJXbByQ29fq60V+MQgLIJ1iTo3+zfaXxGlmV9v
-fTp3S1xQhdGyhww7UC0Ze940eRq6BQ5/I6qPcgSGNpUB064pnjSf0CexCY4qoGqK
-4VSvHRrG9TP5V+YIlX9UR1zuPy//a+wuCwKaqiWedTMb4jpvj5jsEOGIrciSmurg
-/9nKvvJXRbgqRYQeJGLT5QW5clHywsyTrE7oYytYSEcAvEs3UZT37H74wj2RFxk6
-KcEzsxUB3W+iYst0QdOPByt64OCwAaUJ96VJstaOYMmyWSShAxGAKDSjcrr4JJnF
-KtqOC1K56x0ONuBsY4MB15TNGPp8SbOhVV6OfIWj
------END CERTIFICATE-----
diff --git a/roles/openshift_certificate_expiry/test/master.server.crt.txt b/roles/openshift_certificate_expiry/test/master.server.crt.txt
deleted file mode 100644
index 6b3c8fb03..000000000
--- a/roles/openshift_certificate_expiry/test/master.server.crt.txt
+++ /dev/null
@@ -1,82 +0,0 @@
-Certificate:
- Data:
- Version: 3 (0x2)
- Serial Number: 4 (0x4)
- Signature Algorithm: sha256WithRSAEncryption
- Issuer: CN=openshift-signer@1486491158
- Validity
- Not Before: Feb 7 18:12:39 2017 GMT
- Not After : Feb 7 18:12:40 2019 GMT
- Subject: CN=172.30.0.1
- Subject Public Key Info:
- Public Key Algorithm: rsaEncryption
- Public-Key: (2048 bit)
- Modulus:
- 00:e3:89:fa:91:59:67:45:74:b0:82:72:a1:5d:7e:
- c9:ad:1b:f1:66:6f:a7:0a:a1:04:fe:fa:4a:45:f3:
- 6d:ac:c0:cf:02:e5:6d:70:b5:16:74:40:dd:4f:42:
- fb:41:d7:1e:2d:30:81:15:ee:d5:ba:0a:df:a2:43:
- e1:56:2d:17:6a:fa:c2:db:11:69:60:9e:be:c0:fa:
- 6e:a3:b1:f2:05:18:5f:cf:ff:2b:3b:17:ed:02:73:
- 1e:15:4e:49:84:50:da:78:bc:12:6c:38:e2:46:08:
- c4:d5:96:18:cf:c1:dc:ab:d6:25:3e:e8:e9:b4:3e:
- a3:e9:86:96:d4:89:da:7b:e5:6c:40:a5:dc:d6:e0:
- 28:6d:e9:a1:0a:52:ca:6b:31:33:71:c5:46:bb:9d:
- c2:69:21:de:e2:42:68:29:46:70:27:c5:23:b7:f9:
- d1:1d:4a:fe:c1:e9:1b:92:9c:74:35:f9:85:50:41:
- a0:35:15:cd:e3:a7:2a:ed:9c:24:2c:7b:23:b6:e4:
- 76:a6:db:42:e8:98:45:f8:a1:e0:5d:34:72:58:cf:
- 6c:b1:fb:cb:d8:c7:26:85:31:71:ad:3a:00:96:1b:
- 2a:f0:cb:87:32:7d:32:f4:33:33:98:02:ba:12:7b:
- 6f:94:2e:40:b1:cc:53:46:58:ed:c0:76:44:89:cb:
- 29:17
- Exponent: 65537 (0x10001)
- X509v3 extensions:
- X509v3 Key Usage: critical
- Digital Signature, Key Encipherment
- X509v3 Extended Key Usage:
- TLS Web Server Authentication
- X509v3 Basic Constraints: critical
- CA:FALSE
- X509v3 Subject Alternative Name:
- DNS:kubernetes, DNS:kubernetes.default, DNS:kubernetes.default.svc, DNS:kubernetes.default.svc.cluster.local, DNS:m01.example.com, DNS:openshift, DNS:openshift.default, DNS:openshift.default.svc, DNS:openshift.default.svc.cluster.local, DNS:172.30.0.1, DNS:192.168.122.241, IP Address:172.30.0.1, IP Address:192.168.122.241
- Signature Algorithm: sha256WithRSAEncryption
- d2:74:a0:69:51:50:79:4a:09:f5:24:1f:ff:6e:4f:34:e3:ec:
- eb:51:fd:35:92:86:93:f3:7f:f5:ed:63:08:fb:31:bc:20:ec:
- 02:67:d0:5e:e7:ec:c3:7b:84:e1:5c:7f:8f:40:2e:a8:6d:bc:
- 14:8b:d7:67:f1:23:7a:ae:58:a1:be:b8:4d:bc:02:5a:92:76:
- 35:61:18:d6:d3:b2:fb:6a:eb:36:44:52:97:a7:3b:0b:b9:6a:
- 16:2d:59:4b:5c:14:e9:99:f5:a1:43:6c:38:d1:b0:a8:e0:aa:
- e2:8d:0a:af:7b:30:50:fa:ed:68:b4:5c:e8:cd:69:85:ee:7a:
- c8:fc:92:be:ee:8f:3d:84:bb:da:a1:bc:7e:98:38:f9:c0:23:
- d0:2c:ef:9c:33:fa:b5:d4:97:33:de:1b:6f:54:e4:c5:b0:c8:
- 76:56:7b:8a:3b:16:6a:2c:62:73:d8:25:e4:af:a6:17:bc:08:
- 49:88:54:16:69:10:a7:24:46:80:da:88:13:61:b0:da:85:5e:
- 62:c4:52:b0:aa:91:99:a3:ec:83:12:ba:0f:94:2c:39:e0:1c:
- 6c:d0:fd:5e:c1:4c:78:4d:1b:2a:77:e4:33:86:7a:fb:df:18:
- 85:05:0d:0c:ec:98:e1:28:71:7a:11:cc:c7:b7:ce:d7:3e:fb:
- 27:1c:42:f6
------BEGIN CERTIFICATE-----
-MIID7zCCAtegAwIBAgIBBDANBgkqhkiG9w0BAQsFADAmMSQwIgYDVQQDDBtvcGVu
-c2hpZnQtc2lnbmVyQDE0ODY0OTExNTgwHhcNMTcwMjA3MTgxMjM5WhcNMTkwMjA3
-MTgxMjQwWjAVMRMwEQYDVQQDEwoxNzIuMzAuMC4xMIIBIjANBgkqhkiG9w0BAQEF
-AAOCAQ8AMIIBCgKCAQEA44n6kVlnRXSwgnKhXX7JrRvxZm+nCqEE/vpKRfNtrMDP
-AuVtcLUWdEDdT0L7QdceLTCBFe7VugrfokPhVi0XavrC2xFpYJ6+wPpuo7HyBRhf
-z/8rOxftAnMeFU5JhFDaeLwSbDjiRgjE1ZYYz8Hcq9YlPujptD6j6YaW1Inae+Vs
-QKXc1uAobemhClLKazEzccVGu53CaSHe4kJoKUZwJ8Ujt/nRHUr+wekbkpx0NfmF
-UEGgNRXN46cq7ZwkLHsjtuR2pttC6JhF+KHgXTRyWM9ssfvL2McmhTFxrToAlhsq
-8MuHMn0y9DMzmAK6EntvlC5AscxTRljtwHZEicspFwIDAQABo4IBNzCCATMwDgYD
-VR0PAQH/BAQDAgWgMBMGA1UdJQQMMAoGCCsGAQUFBwMBMAwGA1UdEwEB/wQCMAAw
-gf0GA1UdEQSB9TCB8oIKa3ViZXJuZXRlc4ISa3ViZXJuZXRlcy5kZWZhdWx0ghZr
-dWJlcm5ldGVzLmRlZmF1bHQuc3ZjgiRrdWJlcm5ldGVzLmRlZmF1bHQuc3ZjLmNs
-dXN0ZXIubG9jYWyCD20wMS5leGFtcGxlLmNvbYIJb3BlbnNoaWZ0ghFvcGVuc2hp
-ZnQuZGVmYXVsdIIVb3BlbnNoaWZ0LmRlZmF1bHQuc3ZjgiNvcGVuc2hpZnQuZGVm
-YXVsdC5zdmMuY2x1c3Rlci5sb2NhbIIKMTcyLjMwLjAuMYIPMTkyLjE2OC4xMjIu
-MjQxhwSsHgABhwTAqHrxMA0GCSqGSIb3DQEBCwUAA4IBAQDSdKBpUVB5Sgn1JB//
-bk804+zrUf01koaT83/17WMI+zG8IOwCZ9Be5+zDe4ThXH+PQC6obbwUi9dn8SN6
-rlihvrhNvAJaknY1YRjW07L7aus2RFKXpzsLuWoWLVlLXBTpmfWhQ2w40bCo4Kri
-jQqvezBQ+u1otFzozWmF7nrI/JK+7o89hLvaobx+mDj5wCPQLO+cM/q11Jcz3htv
-VOTFsMh2VnuKOxZqLGJz2CXkr6YXvAhJiFQWaRCnJEaA2ogTYbDahV5ixFKwqpGZ
-o+yDEroPlCw54Bxs0P1ewUx4TRsqd+Qzhnr73xiFBQ0M7JjhKHF6EczHt87XPvsn
-HEL2
------END CERTIFICATE-----
diff --git a/roles/openshift_certificate_expiry/test/system-node-m01.example.com.crt b/roles/openshift_certificate_expiry/test/system-node-m01.example.com.crt
deleted file mode 100644
index cd13ddc38..000000000
--- a/roles/openshift_certificate_expiry/test/system-node-m01.example.com.crt
+++ /dev/null
@@ -1,19 +0,0 @@
------BEGIN CERTIFICATE-----
-MIIDEzCCAfugAwIBAgIBCzANBgkqhkiG9w0BAQsFADAmMSQwIgYDVQQDDBtvcGVu
-c2hpZnQtc2lnbmVyQDE0ODY0OTExNTgwHhcNMTcwMjA3MTgxOTM0WhcNMTkwMjA3
-MTgxOTM1WjA9MRUwEwYDVQQKEwxzeXN0ZW06bm9kZXMxJDAiBgNVBAMTG3N5c3Rl
-bTpub2RlOm0wMS5leGFtcGxlLmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC
-AQoCggEBAOcVdDaSmeXuSp+7VCHUjEDeTP3j9aH0nreBj3079sEzethlLoQmwAqf
-CZp23qXGYm0R89+CC55buaH1FN/ltQ8QDGUzi4tdow9Af/0OcD0EINO2ukmyG5/9
-N+X905mo+y923wppvrchAA6AcxxeDyA63zouGS4exI98iuZlcdS48zbsGERkRPGg
-hoGCo7HoiKtUNL5X8MYibnFYnA4EUngdHZsRKuKte4t8GY4PYq4cxIOYXsJsNmT5
-mkFy4ThGFfR9IGg/VfyeWIkRe2VWyaUgzL0gHytAhlRJ9l54ynx96YEWrjCtp/kh
-d3KeVj0IUcMzvoXX5hipYUPkoezcxI8CAwEAAaM1MDMwDgYDVR0PAQH/BAQDAgWg
-MBMGA1UdJQQMMAoGCCsGAQUFBwMCMAwGA1UdEwEB/wQCMAAwDQYJKoZIhvcNAQEL
-BQADggEBAM1jexLtuOOTsOPfEal/ICzfP9aX3m0R/yGPjwQv43jOc81NcL5d+CeD
-MX36tKAxFIe+wvXo0kUQOzTK3D7ol4x2YTtB4uDzNE5tVh5dWi2LrKYSqZDIrhKO
-MOmJRWR3AFEopaaGQpxsD/FSfZ5Mg0OMMBPHABxMrsiserHO1nh4ax3+SI0i7Jen
-gVsB4B/Xxg9Lw9JDX3/XMcI+fyLVw5ctO62BaluljpT+HkdbRWnH8ar7TmcJjzTo
-/TyXOeOLkbozx0BQK16d/CbtLomJ+PO4cdwCNs2Z6HGSPTL7S9y0pct52N0kfJfx
-ZGXMsW+N62S2vVSXEekMR0GJgJnLNSo=
------END CERTIFICATE-----
diff --git a/roles/openshift_certificate_expiry/test/system-node-m01.example.com.crt.txt b/roles/openshift_certificate_expiry/test/system-node-m01.example.com.crt.txt
deleted file mode 100644
index 67a3eb81c..000000000
--- a/roles/openshift_certificate_expiry/test/system-node-m01.example.com.crt.txt
+++ /dev/null
@@ -1,75 +0,0 @@
-Certificate:
- Data:
- Version: 3 (0x2)
- Serial Number: 11 (0xb)
- Signature Algorithm: sha256WithRSAEncryption
- Issuer: CN=openshift-signer@1486491158
- Validity
- Not Before: Feb 7 18:19:34 2017 GMT
- Not After : Feb 7 18:19:35 2019 GMT
- Subject: O=system:nodes, CN=system:node:m01.example.com
- Subject Public Key Info:
- Public Key Algorithm: rsaEncryption
- Public-Key: (2048 bit)
- Modulus:
- 00:e7:15:74:36:92:99:e5:ee:4a:9f:bb:54:21:d4:
- 8c:40:de:4c:fd:e3:f5:a1:f4:9e:b7:81:8f:7d:3b:
- f6:c1:33:7a:d8:65:2e:84:26:c0:0a:9f:09:9a:76:
- de:a5:c6:62:6d:11:f3:df:82:0b:9e:5b:b9:a1:f5:
- 14:df:e5:b5:0f:10:0c:65:33:8b:8b:5d:a3:0f:40:
- 7f:fd:0e:70:3d:04:20:d3:b6:ba:49:b2:1b:9f:fd:
- 37:e5:fd:d3:99:a8:fb:2f:76:df:0a:69:be:b7:21:
- 00:0e:80:73:1c:5e:0f:20:3a:df:3a:2e:19:2e:1e:
- c4:8f:7c:8a:e6:65:71:d4:b8:f3:36:ec:18:44:64:
- 44:f1:a0:86:81:82:a3:b1:e8:88:ab:54:34:be:57:
- f0:c6:22:6e:71:58:9c:0e:04:52:78:1d:1d:9b:11:
- 2a:e2:ad:7b:8b:7c:19:8e:0f:62:ae:1c:c4:83:98:
- 5e:c2:6c:36:64:f9:9a:41:72:e1:38:46:15:f4:7d:
- 20:68:3f:55:fc:9e:58:89:11:7b:65:56:c9:a5:20:
- cc:bd:20:1f:2b:40:86:54:49:f6:5e:78:ca:7c:7d:
- e9:81:16:ae:30:ad:a7:f9:21:77:72:9e:56:3d:08:
- 51:c3:33:be:85:d7:e6:18:a9:61:43:e4:a1:ec:dc:
- c4:8f
- Exponent: 65537 (0x10001)
- X509v3 extensions:
- X509v3 Key Usage: critical
- Digital Signature, Key Encipherment
- X509v3 Extended Key Usage:
- TLS Web Client Authentication
- X509v3 Basic Constraints: critical
- CA:FALSE
- Signature Algorithm: sha256WithRSAEncryption
- cd:63:7b:12:ed:b8:e3:93:b0:e3:df:11:a9:7f:20:2c:df:3f:
- d6:97:de:6d:11:ff:21:8f:8f:04:2f:e3:78:ce:73:cd:4d:70:
- be:5d:f8:27:83:31:7d:fa:b4:a0:31:14:87:be:c2:f5:e8:d2:
- 45:10:3b:34:ca:dc:3e:e8:97:8c:76:61:3b:41:e2:e0:f3:34:
- 4e:6d:56:1e:5d:5a:2d:8b:ac:a6:12:a9:90:c8:ae:12:8e:30:
- e9:89:45:64:77:00:51:28:a5:a6:86:42:9c:6c:0f:f1:52:7d:
- 9e:4c:83:43:8c:30:13:c7:00:1c:4c:ae:c8:ac:7a:b1:ce:d6:
- 78:78:6b:1d:fe:48:8d:22:ec:97:a7:81:5b:01:e0:1f:d7:c6:
- 0f:4b:c3:d2:43:5f:7f:d7:31:c2:3e:7f:22:d5:c3:97:2d:3b:
- ad:81:6a:5b:a5:8e:94:fe:1e:47:5b:45:69:c7:f1:aa:fb:4e:
- 67:09:8f:34:e8:fd:3c:97:39:e3:8b:91:ba:33:c7:40:50:2b:
- 5e:9d:fc:26:ed:2e:89:89:f8:f3:b8:71:dc:02:36:cd:99:e8:
- 71:92:3d:32:fb:4b:dc:b4:a5:cb:79:d8:dd:24:7c:97:f1:64:
- 65:cc:b1:6f:8d:eb:64:b6:bd:54:97:11:e9:0c:47:41:89:80:
- 99:cb:35:2a
------BEGIN CERTIFICATE-----
-MIIDEzCCAfugAwIBAgIBCzANBgkqhkiG9w0BAQsFADAmMSQwIgYDVQQDDBtvcGVu
-c2hpZnQtc2lnbmVyQDE0ODY0OTExNTgwHhcNMTcwMjA3MTgxOTM0WhcNMTkwMjA3
-MTgxOTM1WjA9MRUwEwYDVQQKEwxzeXN0ZW06bm9kZXMxJDAiBgNVBAMTG3N5c3Rl
-bTpub2RlOm0wMS5leGFtcGxlLmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC
-AQoCggEBAOcVdDaSmeXuSp+7VCHUjEDeTP3j9aH0nreBj3079sEzethlLoQmwAqf
-CZp23qXGYm0R89+CC55buaH1FN/ltQ8QDGUzi4tdow9Af/0OcD0EINO2ukmyG5/9
-N+X905mo+y923wppvrchAA6AcxxeDyA63zouGS4exI98iuZlcdS48zbsGERkRPGg
-hoGCo7HoiKtUNL5X8MYibnFYnA4EUngdHZsRKuKte4t8GY4PYq4cxIOYXsJsNmT5
-mkFy4ThGFfR9IGg/VfyeWIkRe2VWyaUgzL0gHytAhlRJ9l54ynx96YEWrjCtp/kh
-d3KeVj0IUcMzvoXX5hipYUPkoezcxI8CAwEAAaM1MDMwDgYDVR0PAQH/BAQDAgWg
-MBMGA1UdJQQMMAoGCCsGAQUFBwMCMAwGA1UdEwEB/wQCMAAwDQYJKoZIhvcNAQEL
-BQADggEBAM1jexLtuOOTsOPfEal/ICzfP9aX3m0R/yGPjwQv43jOc81NcL5d+CeD
-MX36tKAxFIe+wvXo0kUQOzTK3D7ol4x2YTtB4uDzNE5tVh5dWi2LrKYSqZDIrhKO
-MOmJRWR3AFEopaaGQpxsD/FSfZ5Mg0OMMBPHABxMrsiserHO1nh4ax3+SI0i7Jen
-gVsB4B/Xxg9Lw9JDX3/XMcI+fyLVw5ctO62BaluljpT+HkdbRWnH8ar7TmcJjzTo
-/TyXOeOLkbozx0BQK16d/CbtLomJ+PO4cdwCNs2Z6HGSPTL7S9y0pct52N0kfJfx
-ZGXMsW+N62S2vVSXEekMR0GJgJnLNSo=
------END CERTIFICATE-----
diff --git a/roles/openshift_certificate_expiry/test/test_fakeopensslclasses.py b/roles/openshift_certificate_expiry/test/test_fakeopensslclasses.py
index 2e245191f..ccdd48fa8 100644
--- a/roles/openshift_certificate_expiry/test/test_fakeopensslclasses.py
+++ b/roles/openshift_certificate_expiry/test/test_fakeopensslclasses.py
@@ -1,82 +1,89 @@
-#!/usr/bin/env python
'''
Unit tests for the FakeOpenSSL classes
'''
-
import os
+import subprocess
import sys
-import unittest
+
import pytest
-# Disable import-error b/c our libraries aren't loaded in jenkins
-# pylint: disable=import-error,wrong-import-position
-# place class in our python path
-module_path = os.path.join('/'.join(os.path.realpath(__file__).split(os.path.sep)[:-1]), 'library')
-sys.path.insert(0, module_path)
-openshift_cert_expiry = pytest.importorskip("openshift_cert_expiry")
+MODULE_PATH = os.path.realpath(os.path.join(__file__, os.pardir, os.pardir, 'library'))
+sys.path.insert(1, MODULE_PATH)
+
+# pylint: disable=import-error,wrong-import-position,missing-docstring
+# pylint: disable=invalid-name,redefined-outer-name
+from openshift_cert_expiry import FakeOpenSSLCertificate # noqa: E402
+
+
+@pytest.fixture(scope='module')
+def fake_valid_cert(valid_cert):
+ cmd = ['openssl', 'x509', '-in', str(valid_cert['cert_file']), '-text']
+ cert = subprocess.check_output(cmd)
+ return FakeOpenSSLCertificate(cert.decode('utf8'))
+
+def test_not_after(valid_cert, fake_valid_cert):
+ ''' Validate value returned back from get_notAfter() '''
+ real_cert = valid_cert['cert']
-@pytest.mark.skip('Skipping all tests because of unresolved import errors')
-class TestFakeOpenSSLClasses(unittest.TestCase):
- '''
- Test class for FakeOpenSSL classes
- '''
+ # Internal representation of pyOpenSSL is bytes, while FakeOpenSSLCertificate
+ # is text, so decode the result from pyOpenSSL prior to comparing
+ assert real_cert.get_notAfter().decode('utf8') == fake_valid_cert.get_notAfter()
- def setUp(self):
- ''' setup method for other tests '''
- with open('test/system-node-m01.example.com.crt.txt', 'r') as fp:
- self.cert_string = fp.read()
- self.fake_cert = openshift_cert_expiry.FakeOpenSSLCertificate(self.cert_string)
+def test_serial(valid_cert, fake_valid_cert):
+ ''' Validate value returned back form get_serialnumber() '''
+ real_cert = valid_cert['cert']
+ assert real_cert.get_serial_number() == fake_valid_cert.get_serial_number()
- with open('test/master.server.crt.txt', 'r') as fp:
- self.cert_san_string = fp.read()
- self.fake_san_cert = openshift_cert_expiry.FakeOpenSSLCertificate(self.cert_san_string)
+def test_get_subject(valid_cert, fake_valid_cert):
+ ''' Validate the certificate subject '''
- def test_FakeOpenSSLCertificate_get_serial_number(self):
- """We can read the serial number from the cert"""
- self.assertEqual(11, self.fake_cert.get_serial_number())
+ # Gather the subject components and create a list of colon separated strings.
+ # Since the internal representation of pyOpenSSL uses bytes, we need to decode
+ # the results before comparing.
+ c_subjects = valid_cert['cert'].get_subject().get_components()
+ c_subj = ', '.join(['{}:{}'.format(x.decode('utf8'), y.decode('utf8')) for x, y in c_subjects])
+ f_subjects = fake_valid_cert.get_subject().get_components()
+ f_subj = ', '.join(['{}:{}'.format(x, y) for x, y in f_subjects])
+ assert c_subj == f_subj
- def test_FakeOpenSSLCertificate_get_notAfter(self):
- """We can read the cert expiry date"""
- expiry = self.fake_cert.get_notAfter()
- self.assertEqual('20190207181935Z', expiry)
- def test_FakeOpenSSLCertificate_get_sans(self):
- """We can read Subject Alt Names from a cert"""
- ext = self.fake_san_cert.get_extension(0)
+def get_san_extension(cert):
+ # Internal representation of pyOpenSSL is bytes, while FakeOpenSSLCertificate
+ # is text, so we need to set the value to search for accordingly.
+ if isinstance(cert, FakeOpenSSLCertificate):
+ san_short_name = 'subjectAltName'
+ else:
+ san_short_name = b'subjectAltName'
- if ext.get_short_name() == 'subjectAltName':
- sans = str(ext)
+ for i in range(cert.get_extension_count()):
+ ext = cert.get_extension(i)
+ if ext.get_short_name() == san_short_name:
+ # return the string representation to compare the actual SAN
+ # values instead of the data types
+ return str(ext)
- self.assertEqual('DNS:kubernetes, DNS:kubernetes.default, DNS:kubernetes.default.svc, DNS:kubernetes.default.svc.cluster.local, DNS:m01.example.com, DNS:openshift, DNS:openshift.default, DNS:openshift.default.svc, DNS:openshift.default.svc.cluster.local, DNS:172.30.0.1, DNS:192.168.122.241, IP Address:172.30.0.1, IP Address:192.168.122.241', sans)
+ return None
- def test_FakeOpenSSLCertificate_get_sans_no_sans(self):
- """We can tell when there are no Subject Alt Names in a cert"""
- with self.assertRaises(IndexError):
- self.fake_cert.get_extension(0)
- def test_FakeOpenSSLCertificate_get_subject(self):
- """We can read the Subject from a cert"""
- # Subject: O=system:nodes, CN=system:node:m01.example.com
- subject = self.fake_cert.get_subject()
- subjects = []
- for name, value in subject.get_components():
- subjects.append('{}={}'.format(name, value))
+def test_subject_alt_names(valid_cert, fake_valid_cert):
+ real_cert = valid_cert['cert']
- self.assertEqual('O=system:nodes, CN=system:node:m01.example.com', ', '.join(subjects))
+ san = get_san_extension(real_cert)
+ f_san = get_san_extension(fake_valid_cert)
- def test_FakeOpenSSLCertificate_get_subject_san_cert(self):
- """We can read the Subject from a cert with sans"""
- # Subject: O=system:nodes, CN=system:node:m01.example.com
- subject = self.fake_san_cert.get_subject()
- subjects = []
- for name, value in subject.get_components():
- subjects.append('{}={}'.format(name, value))
+ assert san == f_san
- self.assertEqual('CN=172.30.0.1', ', '.join(subjects))
+ # If there are either dns or ip sans defined, verify common_name present
+ if valid_cert['ip'] or valid_cert['dns']:
+ assert 'DNS:' + valid_cert['common_name'] in f_san
+ # Verify all ip sans are present
+ for ip in valid_cert['ip']:
+ assert 'IP Address:' + ip in f_san
-if __name__ == "__main__":
- unittest.main()
+ # Verify all dns sans are present
+ for name in valid_cert['dns']:
+ assert 'DNS:' + name in f_san
diff --git a/roles/openshift_certificate_expiry/test/test_load_and_handle_cert.py b/roles/openshift_certificate_expiry/test/test_load_and_handle_cert.py
new file mode 100644
index 000000000..98792e2ee
--- /dev/null
+++ b/roles/openshift_certificate_expiry/test/test_load_and_handle_cert.py
@@ -0,0 +1,67 @@
+'''
+ Unit tests for the load_and_handle_cert method
+'''
+import datetime
+import os
+import sys
+
+import pytest
+
+MODULE_PATH = os.path.realpath(os.path.join(__file__, os.pardir, os.pardir, 'library'))
+sys.path.insert(1, MODULE_PATH)
+
+# pylint: disable=import-error,wrong-import-position,missing-docstring
+# pylint: disable=invalid-name,redefined-outer-name
+import openshift_cert_expiry # noqa: E402
+
+# TODO: More testing on the results of the load_and_handle_cert function
+# could be implemented here as well, such as verifying subjects
+# match up.
+
+
+@pytest.fixture(params=['OpenSSLCertificate', 'FakeOpenSSLCertificate'])
+def loaded_cert(request, valid_cert):
+ """ parameterized fixture to provide load_and_handle_cert results
+ for both OpenSSL and FakeOpenSSL parsed certificates
+ """
+ now = datetime.datetime.now()
+
+ openshift_cert_expiry.HAS_OPENSSL = request.param == 'OpenSSLCertificate'
+
+ # valid_cert['cert_file'] is a `py.path.LocalPath` object and
+ # provides a read_text() method for reading the file contents.
+ cert_string = valid_cert['cert_file'].read_text('utf8')
+
+ (subject,
+ expiry_date,
+ time_remaining,
+ serial) = openshift_cert_expiry.load_and_handle_cert(cert_string, now)
+
+ return {
+ 'now': now,
+ 'subject': subject,
+ 'expiry_date': expiry_date,
+ 'time_remaining': time_remaining,
+ 'serial': serial,
+ }
+
+
+def test_serial(loaded_cert, valid_cert):
+ """Params:
+
+ * `loaded_cert` comes from the `loaded_cert` fixture in this file
+ * `valid_cert` comes from the 'valid_cert' fixture in conftest.py
+ """
+ valid_cert_serial = valid_cert['cert'].get_serial_number()
+ assert loaded_cert['serial'] == valid_cert_serial
+
+
+def test_expiry(loaded_cert):
+ """Params:
+
+ * `loaded_cert` comes from the `loaded_cert` fixture in this file
+ """
+ expiry_date = loaded_cert['expiry_date']
+ time_remaining = loaded_cert['time_remaining']
+ now = loaded_cert['now']
+ assert expiry_date == now + time_remaining
diff --git a/roles/openshift_common/tasks/main.yml b/roles/openshift_common/tasks/main.yml
index 0a476ac26..e55c288a8 100644
--- a/roles/openshift_common/tasks/main.yml
+++ b/roles/openshift_common/tasks/main.yml
@@ -12,6 +12,18 @@
when: openshift_use_flannel | default(false) | bool and openshift_use_nuage | default(false) | bool
- fail:
+ msg: Contiv can not be used with openshift sdn, set openshift_use_openshift_sdn=false if you want to use contiv
+ when: openshift_use_openshift_sdn | default(true) | bool and openshift_use_contiv | default(false) | bool
+
+- fail:
+ msg: Contiv can not be used with flannel
+ when: openshift_use_flannel | default(false) | bool and openshift_use_contiv | default(false) | bool
+
+- fail:
+ msg: Contiv can not be used with nuage
+ when: openshift_use_nuage | default(false) | bool and openshift_use_contiv | default(false) | bool
+
+- fail:
msg: openshift_hostname must be 64 characters or less
when: openshift_hostname is defined and openshift_hostname | length > 64
@@ -24,6 +36,7 @@
sdn_network_plugin_name: "{{ os_sdn_network_plugin_name | default(None) }}"
use_flannel: "{{ openshift_use_flannel | default(None) }}"
use_nuage: "{{ openshift_use_nuage | default(None) }}"
+ use_contiv: "{{ openshift_use_contiv | default(None) }}"
use_manageiq: "{{ openshift_use_manageiq | default(None) }}"
data_dir: "{{ openshift_data_dir | default(None) }}"
use_dnsmasq: "{{ openshift_use_dnsmasq | default(None) }}"
diff --git a/roles/openshift_serviceaccounts/meta/main.yml b/roles/openshift_etcd_ca/meta/main.yml
index 7a30c220f..d73d27356 100644
--- a/roles/openshift_serviceaccounts/meta/main.yml
+++ b/roles/openshift_etcd_ca/meta/main.yml
@@ -1,16 +1,17 @@
---
galaxy_info:
- author: OpenShift Operations
- description: OpenShift Service Accounts
+ author: Tim Bielawa
+ description: Meta role around the etcd_ca role
company: Red Hat, Inc.
license: Apache License, Version 2.0
- min_ansible_version: 1.9
+ min_ansible_version: 2.2
platforms:
- name: EL
versions:
- 7
categories:
- cloud
+ - system
dependencies:
-- { role: openshift_facts }
-- { role: lib_openshift }
+- role: openshift_etcd_facts
+- role: etcd_ca
diff --git a/roles/openshift_etcd_ca/tasks/main.yml b/roles/openshift_etcd_ca/tasks/main.yml
new file mode 100644
index 000000000..ed97d539c
--- /dev/null
+++ b/roles/openshift_etcd_ca/tasks/main.yml
@@ -0,0 +1 @@
+---
diff --git a/roles/openshift_examples/examples-sync.sh b/roles/openshift_examples/examples-sync.sh
index 65e0e2a5a..e3cc3a9b4 100755
--- a/roles/openshift_examples/examples-sync.sh
+++ b/roles/openshift_examples/examples-sync.sh
@@ -15,16 +15,21 @@ TEMP=`mktemp -d`
pushd $TEMP
wget https://github.com/openshift/origin/archive/master.zip -O origin-master.zip
+wget https://github.com/jboss-fuse/application-templates/archive/GA.zip -O fis-GA.zip
wget https://github.com/jboss-openshift/application-templates/archive/${XPAAS_VERSION}.zip -O application-templates-master.zip
unzip origin-master.zip
unzip application-templates-master.zip
-cp origin-master/examples/db-templates/* ${EXAMPLES_BASE}/db-templates/
-cp origin-master/examples/quickstarts/* ${EXAMPLES_BASE}/quickstart-templates/
-cp origin-master/examples/jenkins/jenkins-*template.json ${EXAMPLES_BASE}/quickstart-templates/
-cp origin-master/examples/image-streams/* ${EXAMPLES_BASE}/image-streams/
+unzip fis-GA.zip
+mv origin-master/examples/db-templates/* ${EXAMPLES_BASE}/db-templates/
+mv origin-master/examples/quickstarts/* ${EXAMPLES_BASE}/quickstart-templates/
+mv origin-master/examples/jenkins/jenkins-*template.json ${EXAMPLES_BASE}/quickstart-templates/
+mv origin-master/examples/image-streams/* ${EXAMPLES_BASE}/image-streams/
mv application-templates-${XPAAS_VERSION}/jboss-image-streams.json ${EXAMPLES_BASE}/xpaas-streams/
+# fis content from jboss-fuse/application-templates-GA would collide with jboss-openshift/application-templates
+# as soon as they use the same branch/tag names
+mv application-templates-GA/fis-image-streams.json ${EXAMPLES_BASE}/xpaas-streams/fis-image-streams.json
+mv application-templates-GA/quickstarts/* ${EXAMPLES_BASE}/xpaas-templates/
find application-templates-${XPAAS_VERSION}/ -name '*.json' ! -wholename '*secret*' ! -wholename '*demo*' -exec mv {} ${EXAMPLES_BASE}/xpaas-templates/ \;
-wget https://raw.githubusercontent.com/jboss-fuse/application-templates/GA/fis-image-streams.json -O ${EXAMPLES_BASE}/xpaas-streams/fis-image-streams.json
wget https://raw.githubusercontent.com/redhat-developer/s2i-dotnetcore/master/dotnet_imagestreams.json -O ${EXAMPLES_BASE}/image-streams/dotnet_imagestreams.json
wget https://raw.githubusercontent.com/openshift/origin-metrics/master/metrics.yaml -O ../openshift_hosted_templates/files/${ORIGIN_VERSION}/origin/metrics-deployer.yaml
wget https://raw.githubusercontent.com/openshift/origin-metrics/enterprise/metrics.yaml -O ../openshift_hosted_templates/files/${ORIGIN_VERSION}/enterprise/metrics-deployer.yaml
diff --git a/roles/openshift_examples/files/examples/v1.3/xpaas-streams/fis-image-streams.json b/roles/openshift_examples/files/examples/v1.3/xpaas-streams/fis-image-streams.json
index ed0e94bed..9d99973be 100644
--- a/roles/openshift_examples/files/examples/v1.3/xpaas-streams/fis-image-streams.json
+++ b/roles/openshift_examples/files/examples/v1.3/xpaas-streams/fis-image-streams.json
@@ -20,12 +20,22 @@
{
"name": "1.0",
"annotations": {
- "description": "JBoss Fuse Integration Services 6.2.1 Java S2I images.",
+ "description": "JBoss Fuse Integration Services 1.0 Java S2I images.",
"iconClass": "icon-jboss",
"tags": "builder,jboss-fuse,java,xpaas",
"supports":"jboss-fuse:6.2.1,java:8,xpaas:1.2",
"version": "1.0"
}
+ },
+ {
+ "name": "2.0",
+ "annotations": {
+ "description": "JBoss Fuse Integration Services 2.0 Java S2I images.",
+ "iconClass": "icon-jboss",
+ "tags": "builder,jboss-fuse,java,xpaas",
+ "supports":"jboss-fuse:6.3.0,java:8,xpaas:1.2",
+ "version": "2.0"
+ }
}
]
}
@@ -42,12 +52,22 @@
{
"name": "1.0",
"annotations": {
- "description": "JBoss Fuse Integration Services 6.2.1 Karaf S2I images.",
+ "description": "JBoss Fuse Integration Services 1.0 Karaf S2I images.",
"iconClass": "icon-jboss",
"tags": "builder,jboss-fuse,java,karaf,xpaas",
"supports":"jboss-fuse:6.2.1,java:8,xpaas:1.2",
"version": "1.0"
}
+ },
+ {
+ "name": "2.0",
+ "annotations": {
+ "description": "JBoss Fuse Integration Services 2.0 Karaf S2I images.",
+ "iconClass": "icon-jboss",
+ "tags": "builder,jboss-fuse,java,karaf,xpaas",
+ "supports":"jboss-fuse:6.3.0,java:8,xpaas:1.2",
+ "version": "2.0"
+ }
}
]
}
diff --git a/roles/openshift_examples/files/examples/v1.3/xpaas-templates/karaf2-camel-amq-template.json b/roles/openshift_examples/files/examples/v1.3/xpaas-templates/karaf2-camel-amq-template.json
new file mode 100644
index 000000000..cd0bec3c1
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.3/xpaas-templates/karaf2-camel-amq-template.json
@@ -0,0 +1,362 @@
+{
+ "apiVersion": "v1",
+ "kind": "Template",
+ "metadata": {
+ "annotations": {
+ "description": "Camel route using ActiveMQ in Karaf container.",
+ "tags": "quickstart,java,karaf,fis",
+ "iconClass": "icon-jboss",
+ "version": "2.0"
+ },
+ "name": "s2i-karaf2-camel-amq"
+ },
+ "labels": {
+ "template": "s2i-karaf2-camel-amq"
+ },
+ "parameters": [
+ {
+ "name": "APP_NAME",
+ "displayName": "Application Name",
+ "required": true,
+ "value": "s2i-karaf2-camel-amq",
+ "description": "The name assigned to the application."
+ },
+ {
+ "name": "GIT_REPO",
+ "displayName": "Git Repository URL",
+ "value": "https://github.com/fabric8-quickstarts/karaf2-camel-amq.git",
+ "required": true,
+ "description": "The URL of the repository with your application source code."
+ },
+ {
+ "name": "GIT_REF",
+ "displayName": "Git Reference",
+ "value": "karaf2-camel-amq-1.0.0.redhat-000010",
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch."
+ },
+ {
+ "name": "SERVICE_NAME",
+ "displayName": "Service Name",
+ "value": "s2i-karaf2-camel-amq",
+ "description": "Exposed Service name."
+ },
+ {
+ "name": "BUILDER_VERSION",
+ "displayName": "Builder version",
+ "value": "2.0",
+ "description": "The version of the FIS S2I builder image to use."
+ },
+ {
+ "name": "APP_VERSION",
+ "displayName": "Application Version",
+ "value": "1.0.0.redhat-000010",
+ "description": "The application version."
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "displayName": "Maven Arguments",
+ "value": "install -DskipTests -Dfabric8.skip -e -B",
+ "description": "Arguments passed to mvn in the build."
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "displayName": "Extra Maven Arguments",
+ "description": "Extra arguments passed to mvn, e.g. for multi-module builds."
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "displayName": "Maven build directory",
+ "description": "Directory of the artifact to be built, e.g. for multi-module builds."
+ },
+ {
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "displayName": "Image Stream Namespace",
+ "value": "openshift",
+ "required": true,
+ "description": "Namespace in which the Fuse ImageStreams are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project."
+ },
+ {
+ "name": "BUILD_SECRET",
+ "displayName": "Git Build Secret",
+ "generate": "expression",
+ "description": "The secret needed to trigger a build.",
+ "from": "[a-zA-Z0-9]{40}"
+ },
+ {
+ "name": "ACTIVEMQ_SERVICE_NAME",
+ "displayName": "ActiveMQ Broker Service",
+ "required": true,
+ "value": "broker-amq-tcp",
+ "description": "Set this to the name of the TCP service of the ActiveMQ broker. You may need to create a broker first."
+ },
+ {
+ "name": "ACTIVEMQ_USERNAME",
+ "displayName": "ActiveMQ Broker Username",
+ "description": "The username used to authenticate with the ActiveMQ broker. Leave it empty if authentication is disabled."
+ },
+ {
+ "name": "ACTIVEMQ_PASSWORD",
+ "displayName": "ActiveMQ Broker Password",
+ "description": "The password used to authenticate with the ActiveMQ broker. Leave it empty if authentication is disabled."
+ },
+ {
+ "name": "CPU_REQUEST",
+ "displayName": "CPU request",
+ "value": "0.2",
+ "required": true,
+ "description": "The amount of CPU to requests."
+ },
+ {
+ "name": "MEMORY_REQUEST",
+ "displayName": "Memory request",
+ "value": "1.5G",
+ "required": true,
+ "description": "The amount of memory required for the container to run."
+ },
+ {
+ "name": "CPU_LIMIT",
+ "displayName": "CPU limit",
+ "value": "1.0",
+ "required": true,
+ "description": "The amount of CPU the container is limited to use."
+ },
+ {
+ "name": "MEMORY_LIMIT",
+ "displayName": "Memory limit",
+ "value": "2G",
+ "required": true,
+ "description": "The amount of memory the container is limited to use."
+ }
+ ],
+ "objects": [
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "container": "karaf",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {},
+ "status": {
+ "dockerImageRepository": ""
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "container": "karaf",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ }
+ ],
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${GIT_REPO}",
+ "ref": "${GIT_REF}"
+ }
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "fis-karaf-openshift:${BUILDER_VERSION}"
+ },
+ "forcePull": true,
+ "incremental": true,
+ "env": [
+ {
+ "name": "BUILD_LOGLEVEL",
+ "value": "5"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "value": "${MAVEN_ARGS}"
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "value": "${MAVEN_ARGS_APPEND}"
+ }
+ ]
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ },
+ "resources": {}
+ },
+ "status": {
+ "lastVersion": 0
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "container": "karaf",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "resources": {}
+ },
+ "triggers": [
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APP_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ }
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "component": "${APP_NAME}",
+ "container": "karaf",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "container": "karaf",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "${APP_NAME}",
+ "image": "library/${APP_NAME}:latest",
+ "readinessProbe" : {
+ "httpGet" : {
+ "path" : "/readiness-check",
+ "port" : 8181
+ },
+ "initialDelaySeconds" : 10
+ },
+ "livenessProbe" : {
+ "httpGet" : {
+ "path" : "/health-check",
+ "port" : 8181
+ },
+ "initialDelaySeconds" : 180
+ },
+ "ports": [
+ {
+ "containerPort": 8181,
+ "name": "http"
+ },
+ {
+ "containerPort": 8778,
+ "name": "jolokia"
+ }
+ ],
+ "env" : [ {
+ "name" : "KUBERNETES_NAMESPACE",
+ "valueFrom" : {
+ "fieldRef" : {
+ "fieldPath" : "metadata.namespace"
+ }
+ }
+ }, {
+ "name": "ACTIVEMQ_SERVICE_NAME",
+ "value": "${ACTIVEMQ_SERVICE_NAME}"
+ }, {
+ "name": "ACTIVEMQ_USERNAME",
+ "value": "${ACTIVEMQ_USERNAME}"
+ }, {
+ "name": "ACTIVEMQ_PASSWORD",
+ "value": "${ACTIVEMQ_PASSWORD}"
+ } ],
+ "resources": {
+ "requests": {
+ "cpu": "${CPU_REQUEST}",
+ "memory": "${MEMORY_REQUEST}"
+ },
+ "limits": {
+ "cpu": "${CPU_LIMIT}",
+ "memory": "${MEMORY_LIMIT}"
+ }
+ }
+ }
+ ]
+ }
+ }
+ },
+ "status": {}
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.3/xpaas-templates/karaf2-camel-log-template.json b/roles/openshift_examples/files/examples/v1.3/xpaas-templates/karaf2-camel-log-template.json
new file mode 100644
index 000000000..2ecce08a9
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.3/xpaas-templates/karaf2-camel-log-template.json
@@ -0,0 +1,336 @@
+{
+ "apiVersion": "v1",
+ "kind": "Template",
+ "metadata": {
+ "annotations": {
+ "description": "A simple Camel route in Karaf container.",
+ "tags": "quickstart,java,karaf,fis",
+ "iconClass": "icon-jboss",
+ "version": "2.0"
+ },
+ "name": "s2i-karaf2-camel-log"
+ },
+ "labels": {
+ "template": "s2i-karaf2-camel-log"
+ },
+ "parameters": [
+ {
+ "name": "APP_NAME",
+ "displayName": "Application Name",
+ "required": true,
+ "value": "s2i-karaf2-camel-log",
+ "description": "The name assigned to the application."
+ },
+ {
+ "name": "GIT_REPO",
+ "displayName": "Git Repository URL",
+ "value": "https://github.com/fabric8-quickstarts/karaf2-camel-log.git",
+ "required": true,
+ "description": "The URL of the repository with your application source code."
+ },
+ {
+ "name": "GIT_REF",
+ "displayName": "Git Reference",
+ "value": "karaf2-camel-log-1.0.0.redhat-000010",
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch."
+ },
+ {
+ "name": "SERVICE_NAME",
+ "displayName": "Service Name",
+ "value": "s2i-karaf2-camel-log",
+ "description": "Exposed Service name."
+ },
+ {
+ "name": "BUILDER_VERSION",
+ "displayName": "Builder version",
+ "value": "2.0",
+ "description": "The version of the FIS S2I builder image to use."
+ },
+ {
+ "name": "APP_VERSION",
+ "displayName": "Application Version",
+ "value": "1.0.0.redhat-000010",
+ "description": "The application version."
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "displayName": "Maven Arguments",
+ "value": "install -DskipTests -Dfabric8.skip -e -B",
+ "description": "Arguments passed to mvn in the build."
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "displayName": "Extra Maven Arguments",
+ "description": "Extra arguments passed to mvn, e.g. for multi-module builds."
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "displayName": "Maven build directory",
+ "description": "Directory of the artifact to be built, e.g. for multi-module builds."
+ },
+ {
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "displayName": "Image Stream Namespace",
+ "value": "openshift",
+ "required": true,
+ "description": "Namespace in which the Fuse ImageStreams are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project."
+ },
+ {
+ "name": "BUILD_SECRET",
+ "displayName": "Git Build Secret",
+ "generate": "expression",
+ "description": "The secret needed to trigger a build.",
+ "from": "[a-zA-Z0-9]{40}"
+ },
+ {
+ "name": "CPU_REQUEST",
+ "displayName": "CPU request",
+ "value": "0.2",
+ "required": true,
+ "description": "The amount of CPU to requests."
+ },
+ {
+ "name": "MEMORY_REQUEST",
+ "displayName": "Memory request",
+ "value": "1.5G",
+ "required": true,
+ "description": "The amount of memory required for the container to run."
+ },
+ {
+ "name": "CPU_LIMIT",
+ "displayName": "CPU limit",
+ "value": "1.0",
+ "required": true,
+ "description": "The amount of CPU the container is limited to use."
+ },
+ {
+ "name": "MEMORY_LIMIT",
+ "displayName": "Memory limit",
+ "value": "2G",
+ "required": true,
+ "description": "The amount of memory the container is limited to use."
+ }
+ ],
+ "objects": [
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "container": "karaf",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {},
+ "status": {
+ "dockerImageRepository": ""
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "container": "karaf",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ }
+ ],
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${GIT_REPO}",
+ "ref": "${GIT_REF}"
+ }
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "fis-karaf-openshift:${BUILDER_VERSION}"
+ },
+ "forcePull": true,
+ "incremental": true,
+ "env": [
+ {
+ "name": "BUILD_LOGLEVEL",
+ "value": "5"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "value": "${MAVEN_ARGS}"
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "value": "${MAVEN_ARGS_APPEND}"
+ }
+ ]
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ },
+ "resources": {}
+ },
+ "status": {
+ "lastVersion": 0
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "container": "karaf",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "resources": {}
+ },
+ "triggers": [
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APP_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ }
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "component": "${APP_NAME}",
+ "container": "karaf",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "container": "karaf",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "${APP_NAME}",
+ "image": "library/${APP_NAME}:latest",
+ "readinessProbe" : {
+ "httpGet" : {
+ "path" : "/readiness-check",
+ "port" : 8181
+ },
+ "initialDelaySeconds" : 10
+ },
+ "livenessProbe" : {
+ "httpGet" : {
+ "path" : "/health-check",
+ "port" : 8181
+ },
+ "initialDelaySeconds" : 180
+ },
+ "ports": [
+ {
+ "containerPort": 8181,
+ "name": "http"
+ },
+ {
+ "containerPort": 8778,
+ "name": "jolokia"
+ }
+ ],
+ "env" : [ {
+ "name" : "KUBERNETES_NAMESPACE",
+ "valueFrom" : {
+ "fieldRef" : {
+ "fieldPath" : "metadata.namespace"
+ }
+ }
+ } ],
+ "resources": {
+ "requests": {
+ "cpu": "${CPU_REQUEST}",
+ "memory": "${MEMORY_REQUEST}"
+ },
+ "limits": {
+ "cpu": "${CPU_LIMIT}",
+ "memory": "${MEMORY_LIMIT}"
+ }
+ }
+ }
+ ]
+ }
+ }
+ },
+ "status": {}
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.3/xpaas-templates/karaf2-camel-rest-sql-template.json b/roles/openshift_examples/files/examples/v1.3/xpaas-templates/karaf2-camel-rest-sql-template.json
new file mode 100644
index 000000000..d80939efb
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.3/xpaas-templates/karaf2-camel-rest-sql-template.json
@@ -0,0 +1,421 @@
+{
+ "apiVersion": "v1",
+ "kind": "Template",
+ "metadata": {
+ "annotations": {
+ "description": "Camel example using Rest DSL with SQL Database in Karaf container.",
+ "tags": "quickstart,java,karaf,fis",
+ "iconClass": "icon-jboss",
+ "version": "2.0"
+ },
+ "name": "s2i-karaf2-camel-rest-sql"
+ },
+ "labels": {
+ "template": "s2i-karaf2-camel-rest-sql"
+ },
+ "parameters": [
+ {
+ "name": "APP_NAME",
+ "displayName": "Application Name",
+ "required": true,
+ "value": "s2i-karaf2-camel-rest-sql",
+ "description": "The name assigned to the application."
+ },
+ {
+ "name": "GIT_REPO",
+ "displayName": "Git Repository URL",
+ "value": "https://github.com/fabric8-quickstarts/karaf2-camel-rest-sql.git",
+ "required": true,
+ "description": "The URL of the repository with your application source code."
+ },
+ {
+ "name": "GIT_REF",
+ "displayName": "Git Reference",
+ "value": "karaf2-camel-rest-sql-1.0.0.redhat-000010",
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch."
+ },
+ {
+ "name": "SERVICE_NAME",
+ "displayName": "Service Name",
+ "value": "s2i-karaf2-camel-rest",
+ "description": "Exposed Service name."
+ },
+ {
+ "name": "MYSQL_SERVICE_NAME",
+ "displayName": "MySQL Server Service",
+ "required": true,
+ "value": "mysql",
+ "description": "Set this to the name of the TCP service of the MySQL server. You may need to create a server first."
+ },
+ {
+ "name": "MYSQL_SERVICE_DATABASE",
+ "displayName": "MySQL Server Database",
+ "required": true,
+ "value": "sampledb",
+ "description": "The database hosted by the MySQL server to be used by the application."
+ },
+ {
+ "name": "MYSQL_SERVICE_USERNAME",
+ "displayName": "MySQL Server Username",
+ "description": "The username used to authenticate with the MySQL server. Leave it empty if authentication is disabled."
+ },
+ {
+ "name": "MYSQL_SERVICE_PASSWORD",
+ "displayName": "MySQL Server Password",
+ "description": "The password used to authenticate with the MySQL server. Leave it empty if authentication is disabled."
+ },
+ {
+ "name": "BUILDER_VERSION",
+ "displayName": "Builder version",
+ "value": "2.0",
+ "description": "The version of the FIS S2I builder image to use."
+ },
+ {
+ "name": "APP_VERSION",
+ "displayName": "Application Version",
+ "value": "1.0.0.redhat-000010",
+ "description": "The application version."
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "displayName": "Maven Arguments",
+ "value": "install -DskipTests -Dfabric8.skip -e -B",
+ "description": "Arguments passed to mvn in the build."
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "displayName": "Extra Maven Arguments",
+ "description": "Extra arguments passed to mvn, e.g. for multi-module builds."
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "displayName": "Maven build directory",
+ "description": "Directory of the artifact to be built, e.g. for multi-module builds."
+ },
+ {
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "displayName": "Image Stream Namespace",
+ "value": "openshift",
+ "required": true,
+ "description": "Namespace in which the Fuse ImageStreams are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project."
+ },
+ {
+ "name": "CPU_REQUEST",
+ "displayName": "CPU request",
+ "value": "0.2",
+ "required": true,
+ "description": "The amount of CPU to requests."
+ },
+ {
+ "name": "MEMORY_REQUEST",
+ "displayName": "Memory request",
+ "value": "1.5G",
+ "required": true,
+ "description": "The amount of memory required for the container to run."
+ },
+ {
+ "name": "CPU_LIMIT",
+ "displayName": "CPU limit",
+ "value": "1.0",
+ "required": true,
+ "description": "The amount of CPU the container is limited to use."
+ },
+ {
+ "name": "MEMORY_LIMIT",
+ "displayName": "Memory limit",
+ "value": "2G",
+ "required": true,
+ "description": "The amount of memory the container is limited to use."
+ }
+ ],
+ "objects": [
+ {
+ "apiVersion": "v1",
+ "kind": "Route",
+ "metadata": {
+ "labels": {
+ "container": "karaf",
+ "component": "${APP_NAME}",
+ "provider": "s2i",
+ "project": "${APP_NAME}",
+ "version": "${APP_VERSION}",
+ "group": "quickstarts"
+ },
+ "name": "${SERVICE_NAME}-route"
+ },
+ "spec": {
+ "to": {
+ "kind": "Service",
+ "name": "${SERVICE_NAME}"
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "Service",
+ "metadata": {
+ "annotations": {
+ },
+ "labels": {
+ "container": "karaf",
+ "component": "${APP_NAME}",
+ "provider": "s2i",
+ "project": "${APP_NAME}",
+ "version": "${APP_VERSION}",
+ "group": "quickstarts"
+ },
+ "name": "${SERVICE_NAME}"
+ },
+ "spec": {
+ "clusterIP": "None",
+ "deprecatedPublicIPs": [],
+ "ports": [
+ {
+ "port": 9411,
+ "protocol": "TCP",
+ "targetPort": 8181
+ }
+ ],
+ "selector": {
+ "container": "karaf",
+ "project": "${APP_NAME}",
+ "component": "${APP_NAME}",
+ "provider": "s2i",
+ "group": "quickstarts"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "container": "karaf",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {},
+ "status": {
+ "dockerImageRepository": ""
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "container": "karaf",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ }
+ ],
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${GIT_REPO}",
+ "ref": "${GIT_REF}"
+ }
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "fis-karaf-openshift:${BUILDER_VERSION}"
+ },
+ "forcePull": true,
+ "incremental": true,
+ "env": [
+ {
+ "name": "BUILD_LOGLEVEL",
+ "value": "5"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "value": "${MAVEN_ARGS}"
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "value": "${MAVEN_ARGS_APPEND}"
+ }
+ ]
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ },
+ "resources": {}
+ },
+ "status": {
+ "lastVersion": 0
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "container": "karaf",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "resources": {}
+ },
+ "triggers": [
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APP_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ }
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "component": "${APP_NAME}",
+ "container": "karaf",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "container": "karaf",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "${APP_NAME}",
+ "image": "library/${APP_NAME}:latest",
+ "readinessProbe" : {
+ "httpGet" : {
+ "path" : "/readiness-check",
+ "port" : 8181
+ },
+ "initialDelaySeconds" : 10
+ },
+ "livenessProbe" : {
+ "httpGet" : {
+ "path" : "/health-check",
+ "port" : 8181
+ },
+ "initialDelaySeconds" : 180
+ },
+ "ports": [
+ {
+ "containerPort": 8181,
+ "name": "http"
+ },
+ {
+ "containerPort": 8778,
+ "name": "jolokia"
+ }
+ ],
+ "env" : [ {
+ "name" : "KUBERNETES_NAMESPACE",
+ "valueFrom" : {
+ "fieldRef" : {
+ "fieldPath" : "metadata.namespace"
+ }
+ }
+ } , {
+ "name": "MYSQL_SERVICE_NAME",
+ "value": "${MYSQL_SERVICE_NAME}"
+ }, {
+ "name": "MYSQL_SERVICE_DATABASE",
+ "value": "${MYSQL_SERVICE_DATABASE}"
+ }, {
+ "name": "MYSQL_SERVICE_USERNAME",
+ "value": "${MYSQL_SERVICE_USERNAME}"
+ }, {
+ "name": "MYSQL_SERVICE_PASSWORD",
+ "value": "${MYSQL_SERVICE_PASSWORD}"
+ } ],
+ "resources": {
+ "requests": {
+ "cpu": "${CPU_REQUEST}",
+ "memory": "${MEMORY_REQUEST}"
+ },
+ "limits": {
+ "cpu": "${CPU_LIMIT}",
+ "memory": "${MEMORY_LIMIT}"
+ }
+ }
+ }
+ ]
+ }
+ }
+ },
+ "status": {}
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.3/xpaas-templates/karaf2-cxf-rest-template.json b/roles/openshift_examples/files/examples/v1.3/xpaas-templates/karaf2-cxf-rest-template.json
new file mode 100644
index 000000000..f99099868
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.3/xpaas-templates/karaf2-cxf-rest-template.json
@@ -0,0 +1,385 @@
+{
+ "apiVersion": "v1",
+ "kind": "Template",
+ "metadata": {
+ "annotations": {
+ "description": "REST example using CXF in Karaf container.",
+ "tags": "quickstart,java,karaf,fis",
+ "iconClass": "icon-jboss",
+ "version": "2.0"
+ },
+ "name": "s2i-karaf2-cxf-rest"
+ },
+ "labels": {
+ "template": "s2i-karaf2-cxf-rest"
+ },
+ "parameters": [
+ {
+ "name": "APP_NAME",
+ "displayName": "Application Name",
+ "required": true,
+ "value": "s2i-karaf2-cxf-rest",
+ "description": "The name assigned to the application."
+ },
+ {
+ "name": "GIT_REPO",
+ "displayName": "Git Repository URL",
+ "value": "https://github.com/fabric8-quickstarts/karaf2-cxf-rest.git",
+ "required": true,
+ "description": "The URL of the repository with your application source code."
+ },
+ {
+ "name": "GIT_REF",
+ "displayName": "Git Reference",
+ "value": "karaf2-cxf-rest-1.0.0.redhat-000010",
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch."
+ },
+ {
+ "name": "SERVICE_NAME",
+ "displayName": "Service Name",
+ "value": "s2i-karaf2-cxf-rest",
+ "description": "Exposed Service name."
+ },
+ {
+ "name": "BUILDER_VERSION",
+ "displayName": "Builder version",
+ "value": "2.0",
+ "description": "The version of the FIS S2I builder image to use."
+ },
+ {
+ "name": "APP_VERSION",
+ "displayName": "Application Version",
+ "value": "1.0.0.redhat-000010",
+ "description": "The application version."
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "displayName": "Maven Arguments",
+ "value": "install -DskipTests -Dfabric8.skip -e -B",
+ "description": "Arguments passed to mvn in the build."
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "displayName": "Extra Maven Arguments",
+ "description": "Extra arguments passed to mvn, e.g. for multi-module builds."
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "displayName": "Maven build directory",
+ "description": "Directory of the artifact to be built, e.g. for multi-module builds."
+ },
+ {
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "displayName": "Image Stream Namespace",
+ "value": "openshift",
+ "required": true,
+ "description": "Namespace in which the Fuse ImageStreams are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project."
+ },
+ {
+ "name": "CPU_REQUEST",
+ "displayName": "CPU request",
+ "value": "0.2",
+ "required": true,
+ "description": "The amount of CPU to requests."
+ },
+ {
+ "name": "MEMORY_REQUEST",
+ "displayName": "Memory request",
+ "value": "1.5G",
+ "required": true,
+ "description": "The amount of memory required for the container to run."
+ },
+ {
+ "name": "CPU_LIMIT",
+ "displayName": "CPU limit",
+ "value": "1.0",
+ "required": true,
+ "description": "The amount of CPU the container is limited to use."
+ },
+ {
+ "name": "MEMORY_LIMIT",
+ "displayName": "Memory limit",
+ "value": "2G",
+ "required": true,
+ "description": "The amount of memory the container is limited to use."
+ }
+ ],
+ "objects": [
+ {
+ "apiVersion": "v1",
+ "kind": "Route",
+ "metadata": {
+ "labels": {
+ "container": "java",
+ "component": "${APP_NAME}",
+ "provider": "s2i",
+ "project": "${APP_NAME}",
+ "version": "${APP_VERSION}",
+ "group": "quickstarts"
+ },
+ "name": "${SERVICE_NAME}-route"
+ },
+ "spec": {
+ "to": {
+ "kind": "Service",
+ "name": "${SERVICE_NAME}"
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "Service",
+ "metadata": {
+ "annotations": {
+ },
+ "labels": {
+ "container": "karaf",
+ "component": "${APP_NAME}",
+ "provider": "s2i",
+ "project": "${APP_NAME}",
+ "version": "${APP_VERSION}",
+ "group": "quickstarts"
+ },
+ "name": "${SERVICE_NAME}"
+ },
+ "spec": {
+ "clusterIP": "None",
+ "deprecatedPublicIPs": [],
+ "ports": [
+ {
+ "port": 9412,
+ "protocol": "TCP",
+ "targetPort": 8181
+ }
+ ],
+ "selector": {
+ "container": "karaf",
+ "project": "${APP_NAME}",
+ "component": "${APP_NAME}",
+ "provider": "s2i",
+ "group": "quickstarts"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "container": "karaf",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {},
+ "status": {
+ "dockerImageRepository": ""
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "container": "karaf",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ }
+ ],
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${GIT_REPO}",
+ "ref": "${GIT_REF}"
+ }
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "fis-karaf-openshift:${BUILDER_VERSION}"
+ },
+ "forcePull": true,
+ "incremental": true,
+ "env": [
+ {
+ "name": "BUILD_LOGLEVEL",
+ "value": "5"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "value": "${MAVEN_ARGS}"
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "value": "${MAVEN_ARGS_APPEND}"
+ }
+ ]
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ },
+ "resources": {}
+ },
+ "status": {
+ "lastVersion": 0
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "container": "karaf",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "resources": {}
+ },
+ "triggers": [
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APP_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ }
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "component": "${APP_NAME}",
+ "container": "karaf",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "container": "karaf",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "${APP_NAME}",
+ "image": "library/${APP_NAME}:latest",
+ "readinessProbe" : {
+ "httpGet" : {
+ "path" : "/readiness-check",
+ "port" : 8181
+ },
+ "initialDelaySeconds" : 10
+ },
+ "livenessProbe" : {
+ "httpGet" : {
+ "path" : "/health-check",
+ "port" : 8181
+ },
+ "initialDelaySeconds" : 180
+ },
+ "ports": [
+ {
+ "containerPort": 8181,
+ "name": "http"
+ },
+ {
+ "containerPort": 8778,
+ "name": "jolokia"
+ }
+ ],
+ "env" : [ {
+ "name" : "KUBERNETES_NAMESPACE",
+ "valueFrom" : {
+ "fieldRef" : {
+ "fieldPath" : "metadata.namespace"
+ }
+ }
+ } ],
+ "resources": {
+ "requests": {
+ "cpu": "${CPU_REQUEST}",
+ "memory": "${MEMORY_REQUEST}"
+ },
+ "limits": {
+ "cpu": "${CPU_LIMIT}",
+ "memory": "${MEMORY_LIMIT}"
+ }
+ }
+ }
+ ]
+ }
+ }
+ },
+ "status": {}
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.3/xpaas-templates/openjdk18-web-basic-s2i.json b/roles/openshift_examples/files/examples/v1.3/xpaas-templates/openjdk18-web-basic-s2i.json
new file mode 100644
index 000000000..143e16756
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.3/xpaas-templates/openjdk18-web-basic-s2i.json
@@ -0,0 +1,267 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-jboss",
+ "description": "Application template for Java applications built using S2I.",
+ "tags": "java,xpaas",
+ "version": "1.0.0"
+ },
+ "name": "openjdk18-web-basic-s2i"
+ },
+ "labels": {
+ "template": "openjdk18-web-basic-s2i",
+ "xpaas": "1.0.0"
+ },
+ "message": "A new java application has been created in your project.",
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "displayName": "Application Name",
+ "name": "APPLICATION_NAME",
+ "value": "openjdk-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "displayName": "Custom http Route Hostname",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Git source URI for application",
+ "displayName": "Git Repository URL",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "description": "Git branch/tag reference",
+ "displayName": "Git Reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "master",
+ "required": false
+ },
+ {
+ "description": "Path within Git project to build; empty for root project directory.",
+ "displayName": "Context Directory",
+ "name": "CONTEXT_DIR",
+ "value": "undertow-servlet",
+ "required": false
+ },
+ {
+ "description": "GitHub trigger secret",
+ "displayName": "Github Webhook Secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Generic build trigger secret",
+ "displayName": "Generic Webhook Secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "displayName": "ImageStream Namespace",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The application's http port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "redhat-openjdk18-openshift:1.0"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 75,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "env": [
+ ],
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.3/xpaas-templates/spring-boot-camel-amq-template.json b/roles/openshift_examples/files/examples/v1.3/xpaas-templates/spring-boot-camel-amq-template.json
new file mode 100644
index 000000000..8b3cd6ed0
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.3/xpaas-templates/spring-boot-camel-amq-template.json
@@ -0,0 +1,331 @@
+{
+ "apiVersion": "v1",
+ "kind": "Template",
+ "metadata": {
+ "annotations": {
+ "description": "Spring Boot, Camel and ActiveMQ QuickStart. This quickstart demonstrates how to connect a Spring-Boot application to an ActiveMQ broker and use JMS messaging between two Camel routes using OpenShift. In this example we will use two containers, one container to run as a ActiveMQ broker, and another as a client to the broker, where the Camel routes are running. This quickstart requires the ActiveMQ broker has been deployed and running first.",
+ "tags": "quickstart,java,springboot,fis",
+ "iconClass": "icon-jboss",
+ "version": "2.0"
+ },
+ "name": "s2i-spring-boot-camel-amq"
+ },
+ "labels": {
+ "template": "s2i-spring-boot-camel-amq"
+ },
+ "parameters": [
+ {
+ "name": "APP_NAME",
+ "displayName": "Application Name",
+ "required": true,
+ "value": "s2i-spring-boot-camel-amq",
+ "description": "The name assigned to the application."
+ },
+ {
+ "name": "GIT_REPO",
+ "displayName": "Git Repository URL",
+ "required": true,
+ "value": "https://github.com/fabric8-quickstarts/spring-boot-camel-amq.git",
+ "description": "The URL of the repository with your application source code."
+ },
+ {
+ "name": "GIT_REF",
+ "displayName": "Git Reference",
+ "value": "spring-boot-camel-amq-1.0.0.redhat-000055",
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch."
+ },
+ {
+ "name": "ACTIVEMQ_SERVICE_NAME",
+ "displayName": "ActiveMQ Broker Service",
+ "required": true,
+ "value": "broker-amq-tcp",
+ "description": "Set this to the name of the TCP service of the ActiveMQ broker. You may need to create a broker first."
+ },
+ {
+ "name": "ACTIVEMQ_BROKER_USERNAME",
+ "displayName": "ActiveMQ Broker Username",
+ "description": "The username used to authenticate with the ActiveMQ broker. Leave it empty if authentication is disabled."
+ },
+ {
+ "name": "ACTIVEMQ_BROKER_PASSWORD",
+ "displayName": "ActiveMQ Broker Password",
+ "description": "The password used to authenticate with the ActiveMQ broker. Leave it empty if authentication is disabled."
+ },
+ {
+ "name": "BUILDER_VERSION",
+ "displayName": "Builder version",
+ "value": "2.0",
+ "description": "The version of the FIS S2I builder image to use."
+ },
+ {
+ "name": "APP_VERSION",
+ "displayName": "Application Version",
+ "value": "1.0.0.redhat-000055",
+ "description": "The application version."
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "displayName": "Maven Arguments",
+ "value": "package -DskipTests -Dfabric8.skip -e -B",
+ "description": "Arguments passed to mvn in the build."
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "displayName": "Extra Maven Arguments",
+ "description": "Extra arguments passed to mvn, e.g. for multi-module builds."
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "displayName": "Maven build directory",
+ "description": "Directory of the artifact to be built, e.g. for multi-module builds."
+ },
+ {
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "displayName": "Image Stream Namespace",
+ "value": "openshift",
+ "required": true,
+ "description": "Namespace in which the Fuse ImageStreams are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project."
+ },
+ {
+ "name": "BUILD_SECRET",
+ "displayName": "Git Build Secret",
+ "generate": "expression",
+ "description": "The secret needed to trigger a build.",
+ "from": "[a-zA-Z0-9]{40}"
+ },
+ {
+ "name": "CPU_REQUEST",
+ "displayName": "CPU request",
+ "value": "0.2",
+ "required": true,
+ "description": "The amount of CPU to requests."
+ },
+ {
+ "name": "CPU_LIMIT",
+ "displayName": "CPU limit",
+ "value": "1.0",
+ "required": true,
+ "description": "The amount of CPU the container is limited to use."
+ }
+ ],
+ "objects": [
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {},
+ "status": {
+ "dockerImageRepository": ""
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ }
+ ],
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${GIT_REPO}",
+ "ref": "${GIT_REF}"
+ }
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "fis-java-openshift:${BUILDER_VERSION}"
+ },
+ "forcePull": true,
+ "incremental": true,
+ "env": [
+ {
+ "name": "BUILD_LOGLEVEL",
+ "value": "5"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "value": "${MAVEN_ARGS}"
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "value": "${MAVEN_ARGS_APPEND}"
+ }
+ ]
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ },
+ "resources": {}
+ },
+ "status": {
+ "lastVersion": 0
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "resources": {}
+ },
+ "triggers": [
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APP_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ }
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "component": "${APP_NAME}",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "${APP_NAME}",
+ "image": "library/${APP_NAME}:latest",
+ "readinessProbe" : {
+ "httpGet" : {
+ "path" : "/health",
+ "port" : 8081
+ },
+ "initialDelaySeconds" : 10
+ },
+ "livenessProbe" : {
+ "httpGet" : {
+ "path" : "/health",
+ "port" : 8081
+ },
+ "initialDelaySeconds" : 180
+ },
+ "ports": [
+ {
+ "containerPort": 8778,
+ "name": "jolokia"
+ }
+ ],
+ "env" : [ {
+ "name" : "KUBERNETES_NAMESPACE",
+ "valueFrom" : {
+ "fieldRef" : {
+ "fieldPath" : "metadata.namespace"
+ }
+ }
+ } , {
+ "name": "ACTIVEMQ_SERVICE_NAME",
+ "value": "${ACTIVEMQ_SERVICE_NAME}"
+ }, {
+ "name": "ACTIVEMQ_BROKER_USERNAME",
+ "value": "${ACTIVEMQ_BROKER_USERNAME}"
+ }, {
+ "name": "ACTIVEMQ_BROKER_PASSWORD",
+ "value": "${ACTIVEMQ_BROKER_PASSWORD}"
+ } ],
+ "resources": {
+ "requests": {
+ "cpu": "${CPU_REQUEST}"
+ },
+ "limits": {
+ "cpu": "${CPU_LIMIT}"
+ }
+ }
+ }
+ ]
+ }
+ }
+ },
+ "status": {}
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.3/xpaas-templates/spring-boot-camel-config-template.json b/roles/openshift_examples/files/examples/v1.3/xpaas-templates/spring-boot-camel-config-template.json
new file mode 100644
index 000000000..bc5bbad22
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.3/xpaas-templates/spring-boot-camel-config-template.json
@@ -0,0 +1,327 @@
+{
+ "apiVersion": "v1",
+ "kind": "Template",
+ "metadata": {
+ "annotations": {
+ "description": "Spring Boot and Camel using ConfigMaps and Secrets. This quickstart demonstrates how to configure a Spring-Boot application using Openshift ConfigMaps and Secrets.",
+ "tags": "quickstart,java,springboot,fis",
+ "iconClass": "icon-jboss",
+ "version": "2.0"
+ },
+ "name": "s2i-spring-boot-camel-config"
+ },
+ "labels": {
+ "template": "s2i-spring-boot-camel-config"
+ },
+ "parameters": [
+ {
+ "name": "APP_NAME",
+ "displayName": "Application Name",
+ "required": true,
+ "value": "s2i-spring-boot-camel-config",
+ "description": "The name assigned to the application."
+ },
+ {
+ "name": "GIT_REPO",
+ "displayName": "Git Repository URL",
+ "required": true,
+ "value": "https://github.com/fabric8-quickstarts/spring-boot-camel-config.git",
+ "description": "The URL of the repository with your application source code."
+ },
+ {
+ "name": "GIT_REF",
+ "displayName": "Git Reference",
+ "value": "spring-boot-camel-config-1.0.0.redhat-000005",
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch."
+ },
+ {
+ "name": "SERVICE_ACCOUNT_NAME",
+ "displayName": "Service Account",
+ "value": "qs-camel-config",
+ "required": true,
+ "description": "The Service Account that will be used to run the container. It must be already present in Openshift and have the view role."
+ },
+ {
+ "name": "SECRET_NAME",
+ "displayName": "Secret Name",
+ "value": "camel-config",
+ "required": true,
+ "description": "The name of the Openshift Secret that will be used to configure the application. It must be already present in Openshift."
+ },
+ {
+ "name": "CONFIGMAP_NAME",
+ "displayName": "ConfigMap Name",
+ "value": "camel-config",
+ "required": true,
+ "description": "The name of the Openshift ConfigMap that will be used to configure the application. It must be already present in Openshift."
+ },
+ {
+ "name": "BUILDER_VERSION",
+ "displayName": "Builder version",
+ "value": "2.0",
+ "description": "The version of the FIS S2I builder image to use."
+ },
+ {
+ "name": "APP_VERSION",
+ "displayName": "Application Version",
+ "value": "1.0.0.redhat-000005",
+ "description": "The application version."
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "displayName": "Maven Arguments",
+ "value": "package -DskipTests -Dfabric8.skip -e -B",
+ "description": "Arguments passed to mvn in the build."
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "displayName": "Extra Maven Arguments",
+ "description": "Extra arguments passed to mvn, e.g. for multi-module builds."
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "displayName": "Maven build directory",
+ "description": "Directory of the artifact to be built, e.g. for multi-module builds."
+ },
+ {
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "displayName": "Image Stream Namespace",
+ "value": "openshift",
+ "required": true,
+ "description": "Namespace in which the Fuse ImageStreams are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project."
+ },
+ {
+ "name": "BUILD_SECRET",
+ "displayName": "Git Build Secret",
+ "generate": "expression",
+ "description": "The secret needed to trigger a build.",
+ "from": "[a-zA-Z0-9]{40}"
+ }
+ ],
+ "objects": [
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {},
+ "status": {
+ "dockerImageRepository": ""
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ }
+ ],
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${GIT_REPO}",
+ "ref": "${GIT_REF}"
+ }
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "fis-java-openshift:${BUILDER_VERSION}"
+ },
+ "forcePull": true,
+ "incremental": true,
+ "env": [
+ {
+ "name": "BUILD_LOGLEVEL",
+ "value": "5"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "value": "${MAVEN_ARGS}"
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "value": "${MAVEN_ARGS_APPEND}"
+ }
+ ]
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ },
+ "resources": {}
+ },
+ "status": {
+ "lastVersion": 0
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "resources": {}
+ },
+ "triggers": [
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APP_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ }
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "component": "${APP_NAME}",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "${SERVICE_ACCOUNT_NAME}",
+ "volumes": [
+ {
+ "name": "camel-config",
+ "secret": {
+ "secretName": "${SECRET_NAME}"
+ }
+ }
+ ],
+ "containers": [
+ {
+ "name": "${APP_NAME}",
+ "image": "library/${APP_NAME}:latest",
+ "readinessProbe" : {
+ "httpGet" : {
+ "path" : "/health",
+ "port" : 8081
+ },
+ "initialDelaySeconds" : 10
+ },
+ "livenessProbe" : {
+ "httpGet" : {
+ "path" : "/health",
+ "port" : 8081
+ },
+ "initialDelaySeconds" : 180
+ },
+ "ports": [
+ {
+ "containerPort": 8778,
+ "name": "jolokia"
+ }
+ ],
+ "env" : [ {
+ "name" : "KUBERNETES_NAMESPACE",
+ "valueFrom" : {
+ "fieldRef" : {
+ "fieldPath" : "metadata.namespace"
+ }
+ }
+ }, {
+ "name": "SPRING_CLOUD_KUBERNETES_SECRETS_NAME",
+ "value": "${SECRET_NAME}"
+ }, {
+ "name": "SPRING_CLOUD_KUBERNETES_CONFIG_NAME",
+ "value": "${CONFIGMAP_NAME}"
+ } ],
+ "resources": {},
+ "volumeMounts": [
+ {
+ "name": "camel-config",
+ "readOnly": true,
+ "mountPath": "/etc/secrets/camel-config"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ },
+ "status": {}
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.3/xpaas-templates/spring-boot-camel-drools-template.json b/roles/openshift_examples/files/examples/v1.3/xpaas-templates/spring-boot-camel-drools-template.json
new file mode 100644
index 000000000..e54fa0d59
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.3/xpaas-templates/spring-boot-camel-drools-template.json
@@ -0,0 +1,334 @@
+{
+ "apiVersion": "v1",
+ "kind": "Template",
+ "metadata": {
+ "annotations": {
+ "description": "Spring-Boot, Camel and JBoss BRMS QuickStart. This example demonstrates how you can use Apache Camel and JBoss BRMS with Spring Boot on OpenShift. DRL files contain simple rules which are used to create knowledge session via Spring configuration file. Camel routes, defined via Spring as well, are then used to e.g. pass (insert) the Body of the message as a POJO to Drools engine for execution.",
+ "tags": "quickstart,java,springboot,fis",
+ "iconClass": "icon-jboss",
+ "version": "2.0"
+ },
+ "name": "s2i-spring-boot-camel-drools"
+ },
+ "labels": {
+ "template": "s2i-spring-boot-camel-drools"
+ },
+ "parameters": [
+ {
+ "name": "APP_NAME",
+ "displayName": "Application Name",
+ "required": true,
+ "value": "s2i-spring-boot-camel-drools",
+ "description": "The name assigned to the application."
+ },
+ {
+ "name": "GIT_REPO",
+ "displayName": "Git Repository URL",
+ "required": true,
+ "value": "https://github.com/fabric8-quickstarts/spring-boot-camel-drools.git",
+ "description": "The URL of the repository with your application source code."
+ },
+ {
+ "name": "GIT_REF",
+ "displayName": "Git Reference",
+ "value": "spring-boot-camel-drools-1.0.0.redhat-000054",
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch."
+ },
+ {
+ "name": "KIESERVER_SERVICE",
+ "displayName": "Decision Server Name",
+ "required": true,
+ "value": "kie-app",
+ "description": "Set this to the name of the Decision Server. You may need to create an instance before."
+ },
+ {
+ "name": "KIESERVER_USERNAME",
+ "displayName": "Decision Server Username",
+ "required": true,
+ "value": "kieserver",
+ "description": "The username used to authenticate with the Decision Server."
+ },
+ {
+ "name": "KIESERVER_PASSWORD",
+ "displayName": "Decision Server Password",
+ "required": true,
+ "description": "The password used to authenticate with the Decision Server."
+ },
+ {
+ "name": "BUILDER_VERSION",
+ "displayName": "Builder version",
+ "value": "2.0",
+ "description": "The version of the FIS S2I builder image to use."
+ },
+ {
+ "name": "APP_VERSION",
+ "displayName": "Application Version",
+ "value": "1.0.0.redhat-000054",
+ "description": "The application version."
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "displayName": "Maven Arguments",
+ "value": "package -DskipTests -Dfabric8.skip -e -B",
+ "description": "Arguments passed to mvn in the build."
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "displayName": "Extra Maven Arguments",
+ "description": "Extra arguments passed to mvn, e.g. for multi-module builds."
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "displayName": "Maven build directory",
+ "description": "Directory of the artifact to be built, e.g. for multi-module builds."
+ },
+ {
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "displayName": "Image Stream Namespace",
+ "value": "openshift",
+ "required": true,
+ "description": "Namespace in which the Fuse ImageStreams are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project."
+ },
+ {
+ "name": "BUILD_SECRET",
+ "displayName": "Git Build Secret",
+ "generate": "expression",
+ "description": "The secret needed to trigger a build.",
+ "from": "[a-zA-Z0-9]{40}"
+ },
+ {
+ "name": "CPU_REQUEST",
+ "displayName": "CPU request",
+ "value": "0.2",
+ "required": true,
+ "description": "The amount of CPU to requests."
+ },
+ {
+ "name": "CPU_LIMIT",
+ "displayName": "CPU limit",
+ "value": "1.0",
+ "required": true,
+ "description": "The amount of CPU the container is limited to use."
+ }
+ ],
+ "objects": [
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {},
+ "status": {
+ "dockerImageRepository": ""
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ }
+ ],
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${GIT_REPO}",
+ "ref": "${GIT_REF}"
+ }
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "fis-java-openshift:${BUILDER_VERSION}"
+ },
+ "forcePull": true,
+ "incremental": true,
+ "env": [
+ {
+ "name": "BUILD_LOGLEVEL",
+ "value": "5"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "value": "${MAVEN_ARGS}"
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "value": "${MAVEN_ARGS_APPEND}"
+ }
+ ]
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ },
+ "resources": {}
+ },
+ "status": {
+ "lastVersion": 0
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "resources": {}
+ },
+ "triggers": [
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APP_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ }
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "component": "${APP_NAME}",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "${APP_NAME}",
+ "image": "library/${APP_NAME}:latest",
+ "readinessProbe" : {
+ "httpGet" : {
+ "path" : "/health",
+ "port" : 8081
+ },
+ "initialDelaySeconds" : 10
+ },
+ "livenessProbe" : {
+ "httpGet" : {
+ "path" : "/health",
+ "port" : 8081
+ },
+ "initialDelaySeconds" : 180
+ },
+ "ports": [
+ {
+ "containerPort": 8778,
+ "name": "jolokia"
+ }
+ ],
+ "env" : [ {
+ "name" : "KUBERNETES_NAMESPACE",
+ "valueFrom" : {
+ "fieldRef" : {
+ "fieldPath" : "metadata.namespace"
+ }
+ }
+ }, {
+ "name": "KIESERVER_SERVICE",
+ "value": "${KIESERVER_SERVICE}"
+ }, {
+ "name": "KIESERVER_USERNAME",
+ "value": "${KIESERVER_USERNAME}"
+ }, {
+ "name": "KIESERVER_PASSWORD",
+ "value": "${KIESERVER_PASSWORD}"
+ } ],
+ "resources": {
+ "requests": {
+ "cpu": "${CPU_REQUEST}"
+ },
+ "limits": {
+ "cpu": "${CPU_LIMIT}"
+ }
+ }
+ }
+ ]
+ }
+ }
+ },
+ "status": {}
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.3/xpaas-templates/spring-boot-camel-infinispan-template.json b/roles/openshift_examples/files/examples/v1.3/xpaas-templates/spring-boot-camel-infinispan-template.json
new file mode 100644
index 000000000..20ba97dac
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.3/xpaas-templates/spring-boot-camel-infinispan-template.json
@@ -0,0 +1,315 @@
+{
+ "apiVersion": "v1",
+ "kind": "Template",
+ "metadata": {
+ "annotations": {
+ "description": "Spring Boot, Camel and JBoss Data Grid QuickStart. This quickstart demonstrates how to connect a Spring-Boot application to a JBoss Data Grid (or Infinispan) server using the Hot Rod protocol. It requires that the data grid server (or cluster) has been deployed first.",
+ "tags": "quickstart,java,springboot,fis",
+ "iconClass": "icon-jboss",
+ "version": "2.0"
+ },
+ "name": "s2i-spring-boot-camel-infinispan"
+ },
+ "labels": {
+ "template": "s2i-spring-boot-camel-infinispan"
+ },
+ "parameters": [
+ {
+ "name": "APP_NAME",
+ "displayName": "Application Name",
+ "required": true,
+ "value": "s2i-spring-boot-camel-infinispan",
+ "description": "The name assigned to the application."
+ },
+ {
+ "name": "GIT_REPO",
+ "displayName": "Git Repository URL",
+ "required": true,
+ "value": "https://github.com/fabric8-quickstarts/spring-boot-camel-infinispan.git",
+ "description": "The URL of the repository with your application source code."
+ },
+ {
+ "name": "GIT_REF",
+ "displayName": "Git Reference",
+ "value": "spring-boot-camel-infinispan-1.0.0.redhat-000024",
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch."
+ },
+ {
+ "name": "INFINISPAN_SERVICE",
+ "displayName": "JBoss Data Grid Service (Hot Rod)",
+ "required": true,
+ "value": "datagrid-app-hotrod",
+ "description": "Set this to the name of the Hot Rod service of the JBoss Data Grid. You may need to create the data grid first."
+ },
+ {
+ "name": "BUILDER_VERSION",
+ "displayName": "Builder version",
+ "value": "2.0",
+ "description": "The version of the FIS S2I builder image to use."
+ },
+ {
+ "name": "APP_VERSION",
+ "displayName": "Application Version",
+ "value": "1.0.0.redhat-000024",
+ "description": "The application version."
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "displayName": "Maven Arguments",
+ "value": "package -DskipTests -Dfabric8.skip -e -B",
+ "description": "Arguments passed to mvn in the build."
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "displayName": "Extra Maven Arguments",
+ "description": "Extra arguments passed to mvn, e.g. for multi-module builds."
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "displayName": "Maven build directory",
+ "description": "Directory of the artifact to be built, e.g. for multi-module builds."
+ },
+ {
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "displayName": "Image Stream Namespace",
+ "value": "openshift",
+ "required": true,
+ "description": "Namespace in which the Fuse ImageStreams are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project."
+ },
+ {
+ "name": "BUILD_SECRET",
+ "displayName": "Git Build Secret",
+ "generate": "expression",
+ "description": "The secret needed to trigger a build.",
+ "from": "[a-zA-Z0-9]{40}"
+ },
+ {
+ "name": "CPU_REQUEST",
+ "displayName": "CPU request",
+ "value": "0.2",
+ "required": true,
+ "description": "The amount of CPU to requests."
+ },
+ {
+ "name": "CPU_LIMIT",
+ "displayName": "CPU limit",
+ "value": "1.0",
+ "required": true,
+ "description": "The amount of CPU the container is limited to use."
+ }
+ ],
+ "objects": [
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {},
+ "status": {
+ "dockerImageRepository": ""
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ }
+ ],
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${GIT_REPO}",
+ "ref": "${GIT_REF}"
+ }
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "fis-java-openshift:${BUILDER_VERSION}"
+ },
+ "forcePull": true,
+ "incremental": true,
+ "env": [
+ {
+ "name": "BUILD_LOGLEVEL",
+ "value": "5"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "value": "${MAVEN_ARGS}"
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "value": "${MAVEN_ARGS_APPEND}"
+ }
+ ]
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ },
+ "resources": {}
+ },
+ "status": {
+ "lastVersion": 0
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "resources": {}
+ },
+ "triggers": [
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APP_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ }
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "component": "${APP_NAME}",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "${APP_NAME}",
+ "image": "library/${APP_NAME}:latest",
+ "readinessProbe" : {
+ "httpGet" : {
+ "path" : "/health",
+ "port" : 8081
+ },
+ "initialDelaySeconds" : 10
+ },
+ "livenessProbe" : {
+ "httpGet" : {
+ "path" : "/health",
+ "port" : 8081
+ },
+ "initialDelaySeconds" : 180
+ },
+ "ports": [
+ {
+ "containerPort": 8778,
+ "name": "jolokia"
+ }
+ ],
+ "env" : [ {
+ "name" : "KUBERNETES_NAMESPACE",
+ "valueFrom" : {
+ "fieldRef" : {
+ "fieldPath" : "metadata.namespace"
+ }
+ }
+ } , {
+ "name": "INFINISPAN_SERVICE",
+ "value": "${INFINISPAN_SERVICE}"
+ } ],
+ "resources": {
+ "requests": {
+ "cpu": "${CPU_REQUEST}"
+ },
+ "limits": {
+ "cpu": "${CPU_LIMIT}"
+ }
+ }
+ }
+ ]
+ }
+ }
+ },
+ "status": {}
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.3/xpaas-templates/spring-boot-camel-rest-sql-template.json b/roles/openshift_examples/files/examples/v1.3/xpaas-templates/spring-boot-camel-rest-sql-template.json
new file mode 100644
index 000000000..555647fab
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.3/xpaas-templates/spring-boot-camel-rest-sql-template.json
@@ -0,0 +1,403 @@
+{
+ "apiVersion": "v1",
+ "kind": "Template",
+ "metadata": {
+ "annotations": {
+ "description": "Spring Boot, Camel REST DSL and MySQL QuickStart. This quickstart demonstrates how to connect a Spring Boot application to a MySQL database and expose a REST API with Camel on OpenShift. In this example we will use two containers, one container to run as a MySQL server, and another as a client to the database, where the Camel routes are running. This quickstart requires the MySQL server to be deployed and started first.",
+ "tags": "quickstart,java,springboot,fis",
+ "iconClass": "icon-jboss",
+ "version": "2.0"
+ },
+ "name": "s2i-spring-boot-camel-rest-sql"
+ },
+ "labels": {
+ "template": "s2i-spring-boot-camel-rest-sql"
+ },
+ "parameters": [
+ {
+ "name": "APP_NAME",
+ "displayName": "Application Name",
+ "required": true,
+ "value": "s2i-spring-boot-camel-rest-sql",
+ "description": "The name assigned to the application."
+ },
+ {
+ "name": "GIT_REPO",
+ "displayName": "Git Repository URL",
+ "required": true,
+ "value": "https://github.com/fabric8-quickstarts/spring-boot-camel-rest-sql.git",
+ "description": "The URL of the repository with your application source code."
+ },
+ {
+ "name": "GIT_REF",
+ "displayName": "Git Reference",
+ "value": "spring-boot-camel-rest-sql-1.0.0.redhat-000055",
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch."
+ },
+ {
+ "name": "SERVICE_NAME",
+ "displayName": "Service Name",
+ "value": "camel-rest-sql",
+ "description": "Exposed service name."
+ },
+ {
+ "name": "MYSQL_SERVICE_NAME",
+ "displayName": "MySQL Server Service",
+ "required": true,
+ "value": "mysql",
+ "description": "Set this to the name of the TCP service of the MySQL server. You may need to create a server first."
+ },
+ {
+ "name": "MYSQL_SERVICE_DATABASE",
+ "displayName": "MySQL Server Database",
+ "value": "sampledb",
+ "description": "The database hosted by the MySQL server to be used by the application."
+ },
+ {
+ "name": "MYSQL_SERVICE_USERNAME",
+ "displayName": "MySQL Server Username",
+ "description": "The username used to authenticate with the MySQL server. Leave it empty if authentication is disabled."
+ },
+ {
+ "name": "MYSQL_SERVICE_PASSWORD",
+ "displayName": "MySQL Server Password",
+ "description": "The password used to authenticate with the MySQL server. Leave it empty if authentication is disabled."
+ },
+ {
+ "name": "BUILDER_VERSION",
+ "displayName": "Builder version",
+ "value": "2.0",
+ "description": "The version of the FIS S2I builder image to use."
+ },
+ {
+ "name": "APP_VERSION",
+ "displayName": "Application Version",
+ "value": "1.0.0.redhat-000055",
+ "description": "The application version."
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "displayName": "Maven Arguments",
+ "value": "package -DskipTests -Dfabric8.skip -e -B",
+ "description": "Arguments passed to mvn in the build."
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "displayName": "Extra Maven Arguments",
+ "description": "Extra arguments passed to mvn, e.g. for multi-module builds."
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "displayName": "Maven build directory",
+ "description": "Directory of the artifact to be built, e.g. for multi-module builds."
+ },
+ {
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "displayName": "Image Stream Namespace",
+ "value": "openshift",
+ "required": true,
+ "description": "Namespace in which the Fuse ImageStreams are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project."
+ },
+ {
+ "name": "BUILD_SECRET",
+ "displayName": "Git Build Secret",
+ "generate": "expression",
+ "description": "The secret needed to trigger a build.",
+ "from": "[a-zA-Z0-9]{40}"
+ },
+ {
+ "name": "CPU_REQUEST",
+ "displayName": "CPU request",
+ "value": "0.2",
+ "required": true,
+ "description": "The amount of CPU to requests."
+ },
+ {
+ "name": "CPU_LIMIT",
+ "displayName": "CPU limit",
+ "value": "1.0",
+ "required": true,
+ "description": "The amount of CPU the container is limited to use."
+ }
+ ],
+ "objects": [
+ {
+ "apiVersion": "v1",
+ "kind": "Route",
+ "metadata": {
+ "labels": {
+ "component": "${APP_NAME}",
+ "provider": "s2i",
+ "project": "${APP_NAME}",
+ "version": "${APP_VERSION}",
+ "group": "quickstarts"
+ },
+ "name": "${SERVICE_NAME}-route"
+ },
+ "spec": {
+ "to": {
+ "kind": "Service",
+ "name": "${SERVICE_NAME}"
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "Service",
+ "metadata": {
+ "annotations": {
+ },
+ "labels": {
+ "component": "${APP_NAME}",
+ "provider": "s2i",
+ "project": "${APP_NAME}",
+ "version": "${APP_VERSION}",
+ "group": "quickstarts"
+ },
+ "name": "${SERVICE_NAME}"
+ },
+ "spec": {
+ "clusterIP": "None",
+ "deprecatedPublicIPs": [],
+ "ports": [
+ {
+ "port": 9411,
+ "protocol": "TCP",
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "project": "${APP_NAME}",
+ "component": "${APP_NAME}",
+ "provider": "s2i",
+ "group": "quickstarts"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {},
+ "status": {
+ "dockerImageRepository": ""
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ }
+ ],
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${GIT_REPO}",
+ "ref": "${GIT_REF}"
+ }
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "fis-java-openshift:${BUILDER_VERSION}"
+ },
+ "forcePull": true,
+ "incremental": true,
+ "env": [
+ {
+ "name": "BUILD_LOGLEVEL",
+ "value": "5"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "value": "${MAVEN_ARGS}"
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "value": "${MAVEN_ARGS_APPEND}"
+ }
+ ]
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ },
+ "resources": {}
+ },
+ "status": {
+ "lastVersion": 0
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "resources": {}
+ },
+ "triggers": [
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APP_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ }
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "component": "${APP_NAME}",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "${APP_NAME}",
+ "image": "library/${APP_NAME}:latest",
+ "readinessProbe" : {
+ "httpGet" : {
+ "path" : "/health",
+ "port" : 8081
+ },
+ "initialDelaySeconds" : 10
+ },
+ "livenessProbe" : {
+ "httpGet" : {
+ "path" : "/health",
+ "port" : 8081
+ },
+ "initialDelaySeconds" : 180
+ },
+ "ports": [
+ {
+ "containerPort": 8080,
+ "name": "http"
+ },
+ {
+ "containerPort": 8778,
+ "name": "jolokia"
+ }
+ ],
+ "env" : [ {
+ "name" : "KUBERNETES_NAMESPACE",
+ "valueFrom" : {
+ "fieldRef" : {
+ "fieldPath" : "metadata.namespace"
+ }
+ }
+ } , {
+ "name": "MYSQL_SERVICE_NAME",
+ "value": "${MYSQL_SERVICE_NAME}"
+ }, {
+ "name": "MYSQL_SERVICE_DATABASE",
+ "value": "${MYSQL_SERVICE_DATABASE}"
+ }, {
+ "name": "MYSQL_SERVICE_USERNAME",
+ "value": "${MYSQL_SERVICE_USERNAME}"
+ }, {
+ "name": "MYSQL_SERVICE_PASSWORD",
+ "value": "${MYSQL_SERVICE_PASSWORD}"
+ } ],
+ "resources": {
+ "requests": {
+ "cpu": "${CPU_REQUEST}"
+ },
+ "limits": {
+ "cpu": "${CPU_LIMIT}"
+ }
+ }
+ }
+ ]
+ }
+ }
+ },
+ "status": {}
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.3/xpaas-templates/spring-boot-camel-teiid-template.json b/roles/openshift_examples/files/examples/v1.3/xpaas-templates/spring-boot-camel-teiid-template.json
new file mode 100644
index 000000000..cf9a4e903
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.3/xpaas-templates/spring-boot-camel-teiid-template.json
@@ -0,0 +1,343 @@
+{
+ "apiVersion": "v1",
+ "kind": "Template",
+ "metadata": {
+ "annotations": {
+ "description": "Spring-Boot, Camel and JBoss Data Virtualization QuickStart. This example demonstrates how to connect Apache Camel to a remote JBoss Data Virtualization (or Teiid) Server using the JDBC protocol.",
+ "tags": "quickstart,java,springboot,fis",
+ "iconClass": "icon-jboss",
+ "version": "2.0"
+ },
+ "name": "s2i-spring-boot-camel-teiid"
+ },
+ "labels": {
+ "template": "s2i-spring-boot-camel-teiid"
+ },
+ "parameters": [
+ {
+ "name": "APP_NAME",
+ "displayName": "Application Name",
+ "required": true,
+ "value": "s2i-spring-boot-camel-teiid",
+ "description": "The name assigned to the application."
+ },
+ {
+ "name": "GIT_REPO",
+ "displayName": "Git Repository URL",
+ "required": true,
+ "value": "https://github.com/fabric8-quickstarts/spring-boot-camel-teiid.git",
+ "description": "The URL of the repository with your application source code."
+ },
+ {
+ "name": "GIT_REF",
+ "displayName": "Git Reference",
+ "value": "spring-boot-camel-teiid-1.0.0.redhat-000053",
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch."
+ },
+ {
+ "name": "TEIID_SERVICE_NAME",
+ "displayName": "JDV Server Name",
+ "required": true,
+ "value": "datavirt-app",
+ "description": "Set this to the name of the JDV Server. You may need to create an instance before."
+ },
+ {
+ "name": "TEIID_PORT_NAME",
+ "displayName": "JDV Port Name",
+ "value": "jdbc",
+ "description": "Set this to the name of the JDV port to use. Set this value if the JDV service contains multiple named ports."
+ },
+ {
+ "name": "TEIID_USERNAME",
+ "displayName": "JDV Server Username",
+ "required": true,
+ "description": "The username used to authenticate with the JDV Server."
+ },
+ {
+ "name": "TEIID_PASSWORD",
+ "displayName": "JDV Server Password",
+ "required": true,
+ "description": "The password used to authenticate with the JDV Server."
+ },
+ {
+ "name": "BUILDER_VERSION",
+ "displayName": "Builder version",
+ "value": "2.0",
+ "description": "The version of the FIS S2I builder image to use."
+ },
+ {
+ "name": "APP_VERSION",
+ "displayName": "Application Version",
+ "value": "1.0.0.redhat-000053",
+ "description": "The application version."
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "displayName": "Maven Arguments",
+ "value": "package -DskipTests -Dfabric8.skip -e -B",
+ "description": "Arguments passed to mvn in the build."
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "displayName": "Extra Maven Arguments",
+ "description": "Extra arguments passed to mvn, e.g. for multi-module builds."
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "displayName": "Maven build directory",
+ "description": "Directory of the artifact to be built, e.g. for multi-module builds."
+ },
+ {
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "displayName": "Image Stream Namespace",
+ "value": "openshift",
+ "required": true,
+ "description": "Namespace in which the Fuse ImageStreams are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project."
+ },
+ {
+ "name": "BUILD_SECRET",
+ "displayName": "Git Build Secret",
+ "generate": "expression",
+ "description": "The secret needed to trigger a build.",
+ "from": "[a-zA-Z0-9]{40}"
+ },
+ {
+ "name": "CPU_REQUEST",
+ "displayName": "CPU request",
+ "value": "0.2",
+ "required": true,
+ "description": "The amount of CPU to requests."
+ },
+ {
+ "name": "CPU_LIMIT",
+ "displayName": "CPU limit",
+ "value": "1.0",
+ "required": true,
+ "description": "The amount of CPU the container is limited to use."
+ }
+ ],
+ "objects": [
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {},
+ "status": {
+ "dockerImageRepository": ""
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ }
+ ],
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${GIT_REPO}",
+ "ref": "${GIT_REF}"
+ }
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "fis-java-openshift:${BUILDER_VERSION}"
+ },
+ "forcePull": true,
+ "incremental": true,
+ "env": [
+ {
+ "name": "BUILD_LOGLEVEL",
+ "value": "5"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "value": "${MAVEN_ARGS}"
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "value": "${MAVEN_ARGS_APPEND}"
+ }
+ ]
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ },
+ "resources": {}
+ },
+ "status": {
+ "lastVersion": 0
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "resources": {}
+ },
+ "triggers": [
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APP_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ }
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "component": "${APP_NAME}",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "${APP_NAME}",
+ "image": "library/${APP_NAME}:latest",
+ "readinessProbe" : {
+ "httpGet" : {
+ "path" : "/health",
+ "port" : 8081
+ },
+ "initialDelaySeconds" : 10
+ },
+ "livenessProbe" : {
+ "httpGet" : {
+ "path" : "/health",
+ "port" : 8081
+ },
+ "initialDelaySeconds" : 180
+ },
+ "ports": [
+ {
+ "containerPort": 8778,
+ "name": "jolokia"
+ }
+ ],
+ "env" : [
+ {
+ "name" : "KUBERNETES_NAMESPACE",
+ "valueFrom" : {
+ "fieldRef" : {
+ "fieldPath" : "metadata.namespace"
+ }
+ }
+ }, {
+ "name": "TEIID_SERVICE_NAME",
+ "value": "${TEIID_SERVICE_NAME}"
+ }, {
+ "name": "TEIID_PORT_NAME",
+ "value": "${TEIID_PORT_NAME}"
+ }, {
+ "name": "TEIID_USERNAME",
+ "value": "${TEIID_USERNAME}"
+ }, {
+ "name": "TEIID_PASSWORD",
+ "value": "${TEIID_PASSWORD}"
+ }],
+ "resources": {
+ "requests": {
+ "cpu": "${CPU_REQUEST}"
+ },
+ "limits": {
+ "cpu": "${CPU_LIMIT}"
+ }
+ }
+ }
+ ]
+ }
+ }
+ },
+ "status": {}
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.3/xpaas-templates/spring-boot-camel-template.json b/roles/openshift_examples/files/examples/v1.3/xpaas-templates/spring-boot-camel-template.json
new file mode 100644
index 000000000..c78a96f7c
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.3/xpaas-templates/spring-boot-camel-template.json
@@ -0,0 +1,305 @@
+{
+ "apiVersion": "v1",
+ "kind": "Template",
+ "metadata": {
+ "annotations": {
+ "description": "Spring-Boot and Camel QuickStart. This example demonstrates how you can use Apache Camel with Spring Boot on Openshift. The quickstart uses Spring Boot to configure a little application that includes a Camel route that triggeres a message every 5th second, and routes the message to a log.",
+ "tags": "quickstart,java,springboot,fis",
+ "iconClass": "icon-jboss",
+ "version": "2.0"
+ },
+ "name": "s2i-spring-boot-camel"
+ },
+ "labels": {
+ "template": "s2i-spring-boot-camel"
+ },
+ "parameters": [
+ {
+ "name": "APP_NAME",
+ "displayName": "Application Name",
+ "required": true,
+ "value": "s2i-spring-boot-camel",
+ "description": "The name assigned to the application."
+ },
+ {
+ "name": "GIT_REPO",
+ "displayName": "Git Repository URL",
+ "required": true,
+ "value": "https://github.com/fabric8-quickstarts/spring-boot-camel.git",
+ "description": "The URL of the repository with your application source code."
+ },
+ {
+ "name": "GIT_REF",
+ "displayName": "Git Reference",
+ "value": "spring-boot-camel-1.0.0.redhat-000055",
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch."
+ },
+ {
+ "name": "BUILDER_VERSION",
+ "displayName": "Builder version",
+ "value": "2.0",
+ "description": "The version of the FIS S2I builder image to use."
+ },
+ {
+ "name": "APP_VERSION",
+ "displayName": "Application Version",
+ "value": "1.0.0.redhat-000055",
+ "description": "The application version."
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "displayName": "Maven Arguments",
+ "value": "package -DskipTests -Dfabric8.skip -e -B",
+ "description": "Arguments passed to mvn in the build."
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "displayName": "Extra Maven Arguments",
+ "description": "Extra arguments passed to mvn, e.g. for multi-module builds."
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "displayName": "Maven build directory",
+ "description": "Directory of the artifact to be built, e.g. for multi-module builds."
+ },
+ {
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "displayName": "Image Stream Namespace",
+ "value": "openshift",
+ "required": true,
+ "description": "Namespace in which the Fuse ImageStreams are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project."
+ },
+ {
+ "name": "BUILD_SECRET",
+ "displayName": "Git Build Secret",
+ "generate": "expression",
+ "description": "The secret needed to trigger a build.",
+ "from": "[a-zA-Z0-9]{40}"
+ },
+ {
+ "name": "CPU_REQUEST",
+ "displayName": "CPU request",
+ "value": "0.2",
+ "required": true,
+ "description": "The amount of CPU to requests."
+ },
+ {
+ "name": "CPU_LIMIT",
+ "displayName": "CPU limit",
+ "value": "1.0",
+ "required": true,
+ "description": "The amount of CPU the container is limited to use."
+ }
+ ],
+ "objects": [
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {},
+ "status": {
+ "dockerImageRepository": ""
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ }
+ ],
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${GIT_REPO}",
+ "ref": "${GIT_REF}"
+ }
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "fis-java-openshift:${BUILDER_VERSION}"
+ },
+ "forcePull": true,
+ "incremental": true,
+ "env": [
+ {
+ "name": "BUILD_LOGLEVEL",
+ "value": "5"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "value": "${MAVEN_ARGS}"
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "value": "${MAVEN_ARGS_APPEND}"
+ }
+ ]
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ },
+ "resources": {}
+ },
+ "status": {
+ "lastVersion": 0
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "resources": {}
+ },
+ "triggers": [
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APP_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ }
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "component": "${APP_NAME}",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "${APP_NAME}",
+ "image": "library/${APP_NAME}:latest",
+ "readinessProbe" : {
+ "httpGet" : {
+ "path" : "/health",
+ "port" : 8081
+ },
+ "initialDelaySeconds" : 10
+ },
+ "livenessProbe" : {
+ "httpGet" : {
+ "path" : "/health",
+ "port" : 8081
+ },
+ "initialDelaySeconds" : 180
+ },
+ "ports": [
+ {
+ "containerPort": 8778,
+ "name": "jolokia"
+ }
+ ],
+ "env" : [ {
+ "name" : "KUBERNETES_NAMESPACE",
+ "valueFrom" : {
+ "fieldRef" : {
+ "fieldPath" : "metadata.namespace"
+ }
+ }
+ } ],
+ "resources": {
+ "requests": {
+ "cpu": "${CPU_REQUEST}"
+ },
+ "limits": {
+ "cpu": "${CPU_LIMIT}"
+ }
+ }
+ }
+ ]
+ }
+ }
+ },
+ "status": {}
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.3/xpaas-templates/spring-boot-camel-xml-template.json b/roles/openshift_examples/files/examples/v1.3/xpaas-templates/spring-boot-camel-xml-template.json
new file mode 100644
index 000000000..620425902
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.3/xpaas-templates/spring-boot-camel-xml-template.json
@@ -0,0 +1,305 @@
+{
+ "apiVersion": "v1",
+ "kind": "Template",
+ "metadata": {
+ "annotations": {
+ "description": "Spring-Boot and Camel Xml QuickStart. This example demonstrates how you can use Apache Camel with Spring Boot on Openshift. The quickstart uses Spring Boot to configure a little application that includes a Camel route (in Spring xml) that triggeres a message every 5th second, and routes the message to a log.",
+ "tags": "quickstart,java,springboot,fis",
+ "iconClass": "icon-jboss",
+ "version": "2.0"
+ },
+ "name": "s2i-spring-boot-camel-xml"
+ },
+ "labels": {
+ "template": "s2i-spring-boot-camel-xml"
+ },
+ "parameters": [
+ {
+ "name": "APP_NAME",
+ "displayName": "Application Name",
+ "required": true,
+ "value": "s2i-spring-boot-camel-xml",
+ "description": "The name assigned to the application."
+ },
+ {
+ "name": "GIT_REPO",
+ "displayName": "Git Repository URL",
+ "required": true,
+ "value": "https://github.com/fabric8-quickstarts/spring-boot-camel-xml.git",
+ "description": "The URL of the repository with your application source code."
+ },
+ {
+ "name": "GIT_REF",
+ "displayName": "Git Reference",
+ "value": "spring-boot-camel-xml-1.0.0.redhat-000055",
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch."
+ },
+ {
+ "name": "BUILDER_VERSION",
+ "displayName": "Builder version",
+ "value": "2.0",
+ "description": "The version of the FIS S2I builder image to use."
+ },
+ {
+ "name": "APP_VERSION",
+ "displayName": "Application Version",
+ "value": "1.0.0.redhat-000055",
+ "description": "The application version."
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "displayName": "Maven Arguments",
+ "value": "package -DskipTests -Dfabric8.skip -e -B",
+ "description": "Arguments passed to mvn in the build."
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "displayName": "Extra Maven Arguments",
+ "description": "Extra arguments passed to mvn, e.g. for multi-module builds."
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "displayName": "Maven build directory",
+ "description": "Directory of the artifact to be built, e.g. for multi-module builds."
+ },
+ {
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "displayName": "Image Stream Namespace",
+ "value": "openshift",
+ "required": true,
+ "description": "Namespace in which the Fuse ImageStreams are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project."
+ },
+ {
+ "name": "BUILD_SECRET",
+ "displayName": "Git Build Secret",
+ "generate": "expression",
+ "description": "The secret needed to trigger a build.",
+ "from": "[a-zA-Z0-9]{40}"
+ },
+ {
+ "name": "CPU_REQUEST",
+ "displayName": "CPU request",
+ "value": "0.2",
+ "required": true,
+ "description": "The amount of CPU to requests."
+ },
+ {
+ "name": "CPU_LIMIT",
+ "displayName": "CPU limit",
+ "value": "1.0",
+ "required": true,
+ "description": "The amount of CPU the container is limited to use."
+ }
+ ],
+ "objects": [
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {},
+ "status": {
+ "dockerImageRepository": ""
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ }
+ ],
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${GIT_REPO}",
+ "ref": "${GIT_REF}"
+ }
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "fis-java-openshift:${BUILDER_VERSION}"
+ },
+ "forcePull": true,
+ "incremental": true,
+ "env": [
+ {
+ "name": "BUILD_LOGLEVEL",
+ "value": "5"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "value": "${MAVEN_ARGS}"
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "value": "${MAVEN_ARGS_APPEND}"
+ }
+ ]
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ },
+ "resources": {}
+ },
+ "status": {
+ "lastVersion": 0
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "resources": {}
+ },
+ "triggers": [
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APP_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ }
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "component": "${APP_NAME}",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "${APP_NAME}",
+ "image": "library/${APP_NAME}:latest",
+ "readinessProbe" : {
+ "httpGet" : {
+ "path" : "/health",
+ "port" : 8081
+ },
+ "initialDelaySeconds" : 10
+ },
+ "livenessProbe" : {
+ "httpGet" : {
+ "path" : "/health",
+ "port" : 8081
+ },
+ "initialDelaySeconds" : 180
+ },
+ "ports": [
+ {
+ "containerPort": 8778,
+ "name": "jolokia"
+ }
+ ],
+ "env" : [ {
+ "name" : "KUBERNETES_NAMESPACE",
+ "valueFrom" : {
+ "fieldRef" : {
+ "fieldPath" : "metadata.namespace"
+ }
+ }
+ } ],
+ "resources": {
+ "requests": {
+ "cpu": "${CPU_REQUEST}"
+ },
+ "limits": {
+ "cpu": "${CPU_LIMIT}"
+ }
+ }
+ }
+ ]
+ }
+ }
+ },
+ "status": {}
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.3/xpaas-templates/spring-boot-cxf-jaxrs-template.json b/roles/openshift_examples/files/examples/v1.3/xpaas-templates/spring-boot-cxf-jaxrs-template.json
new file mode 100644
index 000000000..15cfc93fd
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.3/xpaas-templates/spring-boot-cxf-jaxrs-template.json
@@ -0,0 +1,305 @@
+{
+ "apiVersion": "v1",
+ "kind": "Template",
+ "metadata": {
+ "annotations": {
+ "description": "Spring-Boot and CXF JAXRS QuickStart. This example demonstrates how you can use Apache CXF JAXRS with Spring Boot on Openshift. The quickstart uses Spring Boot to configure a little application that includes a CXF JAXRS endpoint with Swagger enabled.",
+ "tags": "quickstart,java,springboot,fis",
+ "iconClass": "icon-jboss",
+ "version": "2.0"
+ },
+ "name": "s2i-spring-boot-cxf-jaxrs"
+ },
+ "labels": {
+ "template": "s2i-spring-boot-cxf-jaxrs"
+ },
+ "parameters": [
+ {
+ "name": "APP_NAME",
+ "displayName": "Application Name",
+ "required": true,
+ "value": "s2i-spring-boot-cxf-jaxrs",
+ "description": "The name assigned to the application."
+ },
+ {
+ "name": "GIT_REPO",
+ "displayName": "Git Repository URL",
+ "required": true,
+ "value": "https://github.com/fabric8-quickstarts/spring-boot-cxf-jaxrs.git",
+ "description": "The URL of the repository with your application source code."
+ },
+ {
+ "name": "GIT_REF",
+ "displayName": "Git Reference",
+ "value": "spring-boot-cxf-jaxrs-1.0.0.redhat-000005",
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch."
+ },
+ {
+ "name": "BUILDER_VERSION",
+ "displayName": "Builder version",
+ "value": "2.0",
+ "description": "The version of the FIS S2I builder image to use."
+ },
+ {
+ "name": "APP_VERSION",
+ "displayName": "Application Version",
+ "value": "1.0.0.redhat-000005",
+ "description": "The application version."
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "displayName": "Maven Arguments",
+ "value": "package -DskipTests -Dfabric8.skip -e -B",
+ "description": "Arguments passed to mvn in the build."
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "displayName": "Extra Maven Arguments",
+ "description": "Extra arguments passed to mvn, e.g. for multi-module builds."
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "displayName": "Maven build directory",
+ "description": "Directory of the artifact to be built, e.g. for multi-module builds."
+ },
+ {
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "displayName": "Image Stream Namespace",
+ "value": "openshift",
+ "required": true,
+ "description": "Namespace in which the Fuse ImageStreams are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project."
+ },
+ {
+ "name": "BUILD_SECRET",
+ "displayName": "Git Build Secret",
+ "generate": "expression",
+ "description": "The secret needed to trigger a build.",
+ "from": "[a-zA-Z0-9]{40}"
+ },
+ {
+ "name": "CPU_REQUEST",
+ "displayName": "CPU request",
+ "value": "0.2",
+ "required": true,
+ "description": "The amount of CPU to requests."
+ },
+ {
+ "name": "CPU_LIMIT",
+ "displayName": "CPU limit",
+ "value": "1.0",
+ "required": true,
+ "description": "The amount of CPU the container is limited to use."
+ }
+ ],
+ "objects": [
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {},
+ "status": {
+ "dockerImageRepository": ""
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ }
+ ],
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${GIT_REPO}",
+ "ref": "${GIT_REF}"
+ }
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "fis-java-openshift:${BUILDER_VERSION}"
+ },
+ "forcePull": true,
+ "incremental": true,
+ "env": [
+ {
+ "name": "BUILD_LOGLEVEL",
+ "value": "5"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "value": "${MAVEN_ARGS}"
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "value": "${MAVEN_ARGS_APPEND}"
+ }
+ ]
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ },
+ "resources": {}
+ },
+ "status": {
+ "lastVersion": 0
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "resources": {}
+ },
+ "triggers": [
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APP_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ }
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "component": "${APP_NAME}",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "${APP_NAME}",
+ "image": "library/${APP_NAME}:latest",
+ "readinessProbe" : {
+ "httpGet" : {
+ "path" : "/health",
+ "port" : 8081
+ },
+ "initialDelaySeconds" : 10
+ },
+ "livenessProbe" : {
+ "httpGet" : {
+ "path" : "/health",
+ "port" : 8081
+ },
+ "initialDelaySeconds" : 180
+ },
+ "ports": [
+ {
+ "containerPort": 8778,
+ "name": "jolokia"
+ }
+ ],
+ "env" : [ {
+ "name" : "KUBERNETES_NAMESPACE",
+ "valueFrom" : {
+ "fieldRef" : {
+ "fieldPath" : "metadata.namespace"
+ }
+ }
+ } ],
+ "resources": {
+ "requests": {
+ "cpu": "${CPU_REQUEST}"
+ },
+ "limits": {
+ "cpu": "${CPU_LIMIT}"
+ }
+ }
+ }
+ ]
+ }
+ }
+ },
+ "status": {}
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.3/xpaas-templates/spring-boot-cxf-jaxws-template.json b/roles/openshift_examples/files/examples/v1.3/xpaas-templates/spring-boot-cxf-jaxws-template.json
new file mode 100644
index 000000000..c70ee7726
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.3/xpaas-templates/spring-boot-cxf-jaxws-template.json
@@ -0,0 +1,305 @@
+{
+ "apiVersion": "v1",
+ "kind": "Template",
+ "metadata": {
+ "annotations": {
+ "description": "Spring-Boot and CXF JAXWS QuickStart. This example demonstrates how you can use Apache CXF JAXWS with Spring Boot on Openshift. The quickstart uses Spring Boot to configure a little application that includes a CXF JAXWS endpoint.",
+ "tags": "quickstart,java,springboot,fis",
+ "iconClass": "icon-jboss",
+ "version": "2.0"
+ },
+ "name": "s2i-spring-boot-cxf-jaxws"
+ },
+ "labels": {
+ "template": "s2i-spring-boot-cxf-jaxws"
+ },
+ "parameters": [
+ {
+ "name": "APP_NAME",
+ "displayName": "Application Name",
+ "required": true,
+ "value": "s2i-spring-boot-cxf-jaxws",
+ "description": "The name assigned to the application."
+ },
+ {
+ "name": "GIT_REPO",
+ "displayName": "Git Repository URL",
+ "required": true,
+ "value": "https://github.com/fabric8-quickstarts/spring-boot-cxf-jaxws.git",
+ "description": "The URL of the repository with your application source code."
+ },
+ {
+ "name": "GIT_REF",
+ "displayName": "Git Reference",
+ "value": "spring-boot-cxf-jaxws-1.0.0.redhat-000005",
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch."
+ },
+ {
+ "name": "BUILDER_VERSION",
+ "displayName": "Builder version",
+ "value": "2.0",
+ "description": "The version of the FIS S2I builder image to use."
+ },
+ {
+ "name": "APP_VERSION",
+ "displayName": "Application Version",
+ "value": "1.0.0.redhat-000005",
+ "description": "The application version."
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "displayName": "Maven Arguments",
+ "value": "package -DskipTests -Dfabric8.skip -e -B",
+ "description": "Arguments passed to mvn in the build."
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "displayName": "Extra Maven Arguments",
+ "description": "Extra arguments passed to mvn, e.g. for multi-module builds."
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "displayName": "Maven build directory",
+ "description": "Directory of the artifact to be built, e.g. for multi-module builds."
+ },
+ {
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "displayName": "Image Stream Namespace",
+ "value": "openshift",
+ "required": true,
+ "description": "Namespace in which the Fuse ImageStreams are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project."
+ },
+ {
+ "name": "BUILD_SECRET",
+ "displayName": "Git Build Secret",
+ "generate": "expression",
+ "description": "The secret needed to trigger a build.",
+ "from": "[a-zA-Z0-9]{40}"
+ },
+ {
+ "name": "CPU_REQUEST",
+ "displayName": "CPU request",
+ "value": "0.2",
+ "required": true,
+ "description": "The amount of CPU to requests."
+ },
+ {
+ "name": "CPU_LIMIT",
+ "displayName": "CPU limit",
+ "value": "1.0",
+ "required": true,
+ "description": "The amount of CPU the container is limited to use."
+ }
+ ],
+ "objects": [
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {},
+ "status": {
+ "dockerImageRepository": ""
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ }
+ ],
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${GIT_REPO}",
+ "ref": "${GIT_REF}"
+ }
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "fis-java-openshift:${BUILDER_VERSION}"
+ },
+ "forcePull": true,
+ "incremental": true,
+ "env": [
+ {
+ "name": "BUILD_LOGLEVEL",
+ "value": "5"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "value": "${MAVEN_ARGS}"
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "value": "${MAVEN_ARGS_APPEND}"
+ }
+ ]
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ },
+ "resources": {}
+ },
+ "status": {
+ "lastVersion": 0
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "resources": {}
+ },
+ "triggers": [
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APP_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ }
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "component": "${APP_NAME}",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "${APP_NAME}",
+ "image": "library/${APP_NAME}:latest",
+ "readinessProbe" : {
+ "httpGet" : {
+ "path" : "/health",
+ "port" : 8081
+ },
+ "initialDelaySeconds" : 10
+ },
+ "livenessProbe" : {
+ "httpGet" : {
+ "path" : "/health",
+ "port" : 8081
+ },
+ "initialDelaySeconds" : 180
+ },
+ "ports": [
+ {
+ "containerPort": 8778,
+ "name": "jolokia"
+ }
+ ],
+ "env" : [ {
+ "name" : "KUBERNETES_NAMESPACE",
+ "valueFrom" : {
+ "fieldRef" : {
+ "fieldPath" : "metadata.namespace"
+ }
+ }
+ } ],
+ "resources": {
+ "requests": {
+ "cpu": "${CPU_REQUEST}"
+ },
+ "limits": {
+ "cpu": "${CPU_LIMIT}"
+ }
+ }
+ }
+ ]
+ }
+ }
+ },
+ "status": {}
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/karaf2-camel-amq-template.json b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/karaf2-camel-amq-template.json
new file mode 100644
index 000000000..cd0bec3c1
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/karaf2-camel-amq-template.json
@@ -0,0 +1,362 @@
+{
+ "apiVersion": "v1",
+ "kind": "Template",
+ "metadata": {
+ "annotations": {
+ "description": "Camel route using ActiveMQ in Karaf container.",
+ "tags": "quickstart,java,karaf,fis",
+ "iconClass": "icon-jboss",
+ "version": "2.0"
+ },
+ "name": "s2i-karaf2-camel-amq"
+ },
+ "labels": {
+ "template": "s2i-karaf2-camel-amq"
+ },
+ "parameters": [
+ {
+ "name": "APP_NAME",
+ "displayName": "Application Name",
+ "required": true,
+ "value": "s2i-karaf2-camel-amq",
+ "description": "The name assigned to the application."
+ },
+ {
+ "name": "GIT_REPO",
+ "displayName": "Git Repository URL",
+ "value": "https://github.com/fabric8-quickstarts/karaf2-camel-amq.git",
+ "required": true,
+ "description": "The URL of the repository with your application source code."
+ },
+ {
+ "name": "GIT_REF",
+ "displayName": "Git Reference",
+ "value": "karaf2-camel-amq-1.0.0.redhat-000010",
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch."
+ },
+ {
+ "name": "SERVICE_NAME",
+ "displayName": "Service Name",
+ "value": "s2i-karaf2-camel-amq",
+ "description": "Exposed Service name."
+ },
+ {
+ "name": "BUILDER_VERSION",
+ "displayName": "Builder version",
+ "value": "2.0",
+ "description": "The version of the FIS S2I builder image to use."
+ },
+ {
+ "name": "APP_VERSION",
+ "displayName": "Application Version",
+ "value": "1.0.0.redhat-000010",
+ "description": "The application version."
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "displayName": "Maven Arguments",
+ "value": "install -DskipTests -Dfabric8.skip -e -B",
+ "description": "Arguments passed to mvn in the build."
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "displayName": "Extra Maven Arguments",
+ "description": "Extra arguments passed to mvn, e.g. for multi-module builds."
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "displayName": "Maven build directory",
+ "description": "Directory of the artifact to be built, e.g. for multi-module builds."
+ },
+ {
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "displayName": "Image Stream Namespace",
+ "value": "openshift",
+ "required": true,
+ "description": "Namespace in which the Fuse ImageStreams are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project."
+ },
+ {
+ "name": "BUILD_SECRET",
+ "displayName": "Git Build Secret",
+ "generate": "expression",
+ "description": "The secret needed to trigger a build.",
+ "from": "[a-zA-Z0-9]{40}"
+ },
+ {
+ "name": "ACTIVEMQ_SERVICE_NAME",
+ "displayName": "ActiveMQ Broker Service",
+ "required": true,
+ "value": "broker-amq-tcp",
+ "description": "Set this to the name of the TCP service of the ActiveMQ broker. You may need to create a broker first."
+ },
+ {
+ "name": "ACTIVEMQ_USERNAME",
+ "displayName": "ActiveMQ Broker Username",
+ "description": "The username used to authenticate with the ActiveMQ broker. Leave it empty if authentication is disabled."
+ },
+ {
+ "name": "ACTIVEMQ_PASSWORD",
+ "displayName": "ActiveMQ Broker Password",
+ "description": "The password used to authenticate with the ActiveMQ broker. Leave it empty if authentication is disabled."
+ },
+ {
+ "name": "CPU_REQUEST",
+ "displayName": "CPU request",
+ "value": "0.2",
+ "required": true,
+ "description": "The amount of CPU to requests."
+ },
+ {
+ "name": "MEMORY_REQUEST",
+ "displayName": "Memory request",
+ "value": "1.5G",
+ "required": true,
+ "description": "The amount of memory required for the container to run."
+ },
+ {
+ "name": "CPU_LIMIT",
+ "displayName": "CPU limit",
+ "value": "1.0",
+ "required": true,
+ "description": "The amount of CPU the container is limited to use."
+ },
+ {
+ "name": "MEMORY_LIMIT",
+ "displayName": "Memory limit",
+ "value": "2G",
+ "required": true,
+ "description": "The amount of memory the container is limited to use."
+ }
+ ],
+ "objects": [
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "container": "karaf",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {},
+ "status": {
+ "dockerImageRepository": ""
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "container": "karaf",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ }
+ ],
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${GIT_REPO}",
+ "ref": "${GIT_REF}"
+ }
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "fis-karaf-openshift:${BUILDER_VERSION}"
+ },
+ "forcePull": true,
+ "incremental": true,
+ "env": [
+ {
+ "name": "BUILD_LOGLEVEL",
+ "value": "5"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "value": "${MAVEN_ARGS}"
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "value": "${MAVEN_ARGS_APPEND}"
+ }
+ ]
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ },
+ "resources": {}
+ },
+ "status": {
+ "lastVersion": 0
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "container": "karaf",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "resources": {}
+ },
+ "triggers": [
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APP_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ }
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "component": "${APP_NAME}",
+ "container": "karaf",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "container": "karaf",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "${APP_NAME}",
+ "image": "library/${APP_NAME}:latest",
+ "readinessProbe" : {
+ "httpGet" : {
+ "path" : "/readiness-check",
+ "port" : 8181
+ },
+ "initialDelaySeconds" : 10
+ },
+ "livenessProbe" : {
+ "httpGet" : {
+ "path" : "/health-check",
+ "port" : 8181
+ },
+ "initialDelaySeconds" : 180
+ },
+ "ports": [
+ {
+ "containerPort": 8181,
+ "name": "http"
+ },
+ {
+ "containerPort": 8778,
+ "name": "jolokia"
+ }
+ ],
+ "env" : [ {
+ "name" : "KUBERNETES_NAMESPACE",
+ "valueFrom" : {
+ "fieldRef" : {
+ "fieldPath" : "metadata.namespace"
+ }
+ }
+ }, {
+ "name": "ACTIVEMQ_SERVICE_NAME",
+ "value": "${ACTIVEMQ_SERVICE_NAME}"
+ }, {
+ "name": "ACTIVEMQ_USERNAME",
+ "value": "${ACTIVEMQ_USERNAME}"
+ }, {
+ "name": "ACTIVEMQ_PASSWORD",
+ "value": "${ACTIVEMQ_PASSWORD}"
+ } ],
+ "resources": {
+ "requests": {
+ "cpu": "${CPU_REQUEST}",
+ "memory": "${MEMORY_REQUEST}"
+ },
+ "limits": {
+ "cpu": "${CPU_LIMIT}",
+ "memory": "${MEMORY_LIMIT}"
+ }
+ }
+ }
+ ]
+ }
+ }
+ },
+ "status": {}
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/karaf2-camel-log-template.json b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/karaf2-camel-log-template.json
new file mode 100644
index 000000000..2ecce08a9
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/karaf2-camel-log-template.json
@@ -0,0 +1,336 @@
+{
+ "apiVersion": "v1",
+ "kind": "Template",
+ "metadata": {
+ "annotations": {
+ "description": "A simple Camel route in Karaf container.",
+ "tags": "quickstart,java,karaf,fis",
+ "iconClass": "icon-jboss",
+ "version": "2.0"
+ },
+ "name": "s2i-karaf2-camel-log"
+ },
+ "labels": {
+ "template": "s2i-karaf2-camel-log"
+ },
+ "parameters": [
+ {
+ "name": "APP_NAME",
+ "displayName": "Application Name",
+ "required": true,
+ "value": "s2i-karaf2-camel-log",
+ "description": "The name assigned to the application."
+ },
+ {
+ "name": "GIT_REPO",
+ "displayName": "Git Repository URL",
+ "value": "https://github.com/fabric8-quickstarts/karaf2-camel-log.git",
+ "required": true,
+ "description": "The URL of the repository with your application source code."
+ },
+ {
+ "name": "GIT_REF",
+ "displayName": "Git Reference",
+ "value": "karaf2-camel-log-1.0.0.redhat-000010",
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch."
+ },
+ {
+ "name": "SERVICE_NAME",
+ "displayName": "Service Name",
+ "value": "s2i-karaf2-camel-log",
+ "description": "Exposed Service name."
+ },
+ {
+ "name": "BUILDER_VERSION",
+ "displayName": "Builder version",
+ "value": "2.0",
+ "description": "The version of the FIS S2I builder image to use."
+ },
+ {
+ "name": "APP_VERSION",
+ "displayName": "Application Version",
+ "value": "1.0.0.redhat-000010",
+ "description": "The application version."
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "displayName": "Maven Arguments",
+ "value": "install -DskipTests -Dfabric8.skip -e -B",
+ "description": "Arguments passed to mvn in the build."
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "displayName": "Extra Maven Arguments",
+ "description": "Extra arguments passed to mvn, e.g. for multi-module builds."
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "displayName": "Maven build directory",
+ "description": "Directory of the artifact to be built, e.g. for multi-module builds."
+ },
+ {
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "displayName": "Image Stream Namespace",
+ "value": "openshift",
+ "required": true,
+ "description": "Namespace in which the Fuse ImageStreams are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project."
+ },
+ {
+ "name": "BUILD_SECRET",
+ "displayName": "Git Build Secret",
+ "generate": "expression",
+ "description": "The secret needed to trigger a build.",
+ "from": "[a-zA-Z0-9]{40}"
+ },
+ {
+ "name": "CPU_REQUEST",
+ "displayName": "CPU request",
+ "value": "0.2",
+ "required": true,
+ "description": "The amount of CPU to requests."
+ },
+ {
+ "name": "MEMORY_REQUEST",
+ "displayName": "Memory request",
+ "value": "1.5G",
+ "required": true,
+ "description": "The amount of memory required for the container to run."
+ },
+ {
+ "name": "CPU_LIMIT",
+ "displayName": "CPU limit",
+ "value": "1.0",
+ "required": true,
+ "description": "The amount of CPU the container is limited to use."
+ },
+ {
+ "name": "MEMORY_LIMIT",
+ "displayName": "Memory limit",
+ "value": "2G",
+ "required": true,
+ "description": "The amount of memory the container is limited to use."
+ }
+ ],
+ "objects": [
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "container": "karaf",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {},
+ "status": {
+ "dockerImageRepository": ""
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "container": "karaf",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ }
+ ],
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${GIT_REPO}",
+ "ref": "${GIT_REF}"
+ }
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "fis-karaf-openshift:${BUILDER_VERSION}"
+ },
+ "forcePull": true,
+ "incremental": true,
+ "env": [
+ {
+ "name": "BUILD_LOGLEVEL",
+ "value": "5"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "value": "${MAVEN_ARGS}"
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "value": "${MAVEN_ARGS_APPEND}"
+ }
+ ]
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ },
+ "resources": {}
+ },
+ "status": {
+ "lastVersion": 0
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "container": "karaf",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "resources": {}
+ },
+ "triggers": [
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APP_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ }
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "component": "${APP_NAME}",
+ "container": "karaf",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "container": "karaf",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "${APP_NAME}",
+ "image": "library/${APP_NAME}:latest",
+ "readinessProbe" : {
+ "httpGet" : {
+ "path" : "/readiness-check",
+ "port" : 8181
+ },
+ "initialDelaySeconds" : 10
+ },
+ "livenessProbe" : {
+ "httpGet" : {
+ "path" : "/health-check",
+ "port" : 8181
+ },
+ "initialDelaySeconds" : 180
+ },
+ "ports": [
+ {
+ "containerPort": 8181,
+ "name": "http"
+ },
+ {
+ "containerPort": 8778,
+ "name": "jolokia"
+ }
+ ],
+ "env" : [ {
+ "name" : "KUBERNETES_NAMESPACE",
+ "valueFrom" : {
+ "fieldRef" : {
+ "fieldPath" : "metadata.namespace"
+ }
+ }
+ } ],
+ "resources": {
+ "requests": {
+ "cpu": "${CPU_REQUEST}",
+ "memory": "${MEMORY_REQUEST}"
+ },
+ "limits": {
+ "cpu": "${CPU_LIMIT}",
+ "memory": "${MEMORY_LIMIT}"
+ }
+ }
+ }
+ ]
+ }
+ }
+ },
+ "status": {}
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/karaf2-camel-rest-sql-template.json b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/karaf2-camel-rest-sql-template.json
new file mode 100644
index 000000000..d80939efb
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/karaf2-camel-rest-sql-template.json
@@ -0,0 +1,421 @@
+{
+ "apiVersion": "v1",
+ "kind": "Template",
+ "metadata": {
+ "annotations": {
+ "description": "Camel example using Rest DSL with SQL Database in Karaf container.",
+ "tags": "quickstart,java,karaf,fis",
+ "iconClass": "icon-jboss",
+ "version": "2.0"
+ },
+ "name": "s2i-karaf2-camel-rest-sql"
+ },
+ "labels": {
+ "template": "s2i-karaf2-camel-rest-sql"
+ },
+ "parameters": [
+ {
+ "name": "APP_NAME",
+ "displayName": "Application Name",
+ "required": true,
+ "value": "s2i-karaf2-camel-rest-sql",
+ "description": "The name assigned to the application."
+ },
+ {
+ "name": "GIT_REPO",
+ "displayName": "Git Repository URL",
+ "value": "https://github.com/fabric8-quickstarts/karaf2-camel-rest-sql.git",
+ "required": true,
+ "description": "The URL of the repository with your application source code."
+ },
+ {
+ "name": "GIT_REF",
+ "displayName": "Git Reference",
+ "value": "karaf2-camel-rest-sql-1.0.0.redhat-000010",
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch."
+ },
+ {
+ "name": "SERVICE_NAME",
+ "displayName": "Service Name",
+ "value": "s2i-karaf2-camel-rest",
+ "description": "Exposed Service name."
+ },
+ {
+ "name": "MYSQL_SERVICE_NAME",
+ "displayName": "MySQL Server Service",
+ "required": true,
+ "value": "mysql",
+ "description": "Set this to the name of the TCP service of the MySQL server. You may need to create a server first."
+ },
+ {
+ "name": "MYSQL_SERVICE_DATABASE",
+ "displayName": "MySQL Server Database",
+ "required": true,
+ "value": "sampledb",
+ "description": "The database hosted by the MySQL server to be used by the application."
+ },
+ {
+ "name": "MYSQL_SERVICE_USERNAME",
+ "displayName": "MySQL Server Username",
+ "description": "The username used to authenticate with the MySQL server. Leave it empty if authentication is disabled."
+ },
+ {
+ "name": "MYSQL_SERVICE_PASSWORD",
+ "displayName": "MySQL Server Password",
+ "description": "The password used to authenticate with the MySQL server. Leave it empty if authentication is disabled."
+ },
+ {
+ "name": "BUILDER_VERSION",
+ "displayName": "Builder version",
+ "value": "2.0",
+ "description": "The version of the FIS S2I builder image to use."
+ },
+ {
+ "name": "APP_VERSION",
+ "displayName": "Application Version",
+ "value": "1.0.0.redhat-000010",
+ "description": "The application version."
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "displayName": "Maven Arguments",
+ "value": "install -DskipTests -Dfabric8.skip -e -B",
+ "description": "Arguments passed to mvn in the build."
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "displayName": "Extra Maven Arguments",
+ "description": "Extra arguments passed to mvn, e.g. for multi-module builds."
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "displayName": "Maven build directory",
+ "description": "Directory of the artifact to be built, e.g. for multi-module builds."
+ },
+ {
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "displayName": "Image Stream Namespace",
+ "value": "openshift",
+ "required": true,
+ "description": "Namespace in which the Fuse ImageStreams are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project."
+ },
+ {
+ "name": "CPU_REQUEST",
+ "displayName": "CPU request",
+ "value": "0.2",
+ "required": true,
+ "description": "The amount of CPU to requests."
+ },
+ {
+ "name": "MEMORY_REQUEST",
+ "displayName": "Memory request",
+ "value": "1.5G",
+ "required": true,
+ "description": "The amount of memory required for the container to run."
+ },
+ {
+ "name": "CPU_LIMIT",
+ "displayName": "CPU limit",
+ "value": "1.0",
+ "required": true,
+ "description": "The amount of CPU the container is limited to use."
+ },
+ {
+ "name": "MEMORY_LIMIT",
+ "displayName": "Memory limit",
+ "value": "2G",
+ "required": true,
+ "description": "The amount of memory the container is limited to use."
+ }
+ ],
+ "objects": [
+ {
+ "apiVersion": "v1",
+ "kind": "Route",
+ "metadata": {
+ "labels": {
+ "container": "karaf",
+ "component": "${APP_NAME}",
+ "provider": "s2i",
+ "project": "${APP_NAME}",
+ "version": "${APP_VERSION}",
+ "group": "quickstarts"
+ },
+ "name": "${SERVICE_NAME}-route"
+ },
+ "spec": {
+ "to": {
+ "kind": "Service",
+ "name": "${SERVICE_NAME}"
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "Service",
+ "metadata": {
+ "annotations": {
+ },
+ "labels": {
+ "container": "karaf",
+ "component": "${APP_NAME}",
+ "provider": "s2i",
+ "project": "${APP_NAME}",
+ "version": "${APP_VERSION}",
+ "group": "quickstarts"
+ },
+ "name": "${SERVICE_NAME}"
+ },
+ "spec": {
+ "clusterIP": "None",
+ "deprecatedPublicIPs": [],
+ "ports": [
+ {
+ "port": 9411,
+ "protocol": "TCP",
+ "targetPort": 8181
+ }
+ ],
+ "selector": {
+ "container": "karaf",
+ "project": "${APP_NAME}",
+ "component": "${APP_NAME}",
+ "provider": "s2i",
+ "group": "quickstarts"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "container": "karaf",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {},
+ "status": {
+ "dockerImageRepository": ""
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "container": "karaf",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ }
+ ],
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${GIT_REPO}",
+ "ref": "${GIT_REF}"
+ }
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "fis-karaf-openshift:${BUILDER_VERSION}"
+ },
+ "forcePull": true,
+ "incremental": true,
+ "env": [
+ {
+ "name": "BUILD_LOGLEVEL",
+ "value": "5"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "value": "${MAVEN_ARGS}"
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "value": "${MAVEN_ARGS_APPEND}"
+ }
+ ]
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ },
+ "resources": {}
+ },
+ "status": {
+ "lastVersion": 0
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "container": "karaf",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "resources": {}
+ },
+ "triggers": [
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APP_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ }
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "component": "${APP_NAME}",
+ "container": "karaf",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "container": "karaf",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "${APP_NAME}",
+ "image": "library/${APP_NAME}:latest",
+ "readinessProbe" : {
+ "httpGet" : {
+ "path" : "/readiness-check",
+ "port" : 8181
+ },
+ "initialDelaySeconds" : 10
+ },
+ "livenessProbe" : {
+ "httpGet" : {
+ "path" : "/health-check",
+ "port" : 8181
+ },
+ "initialDelaySeconds" : 180
+ },
+ "ports": [
+ {
+ "containerPort": 8181,
+ "name": "http"
+ },
+ {
+ "containerPort": 8778,
+ "name": "jolokia"
+ }
+ ],
+ "env" : [ {
+ "name" : "KUBERNETES_NAMESPACE",
+ "valueFrom" : {
+ "fieldRef" : {
+ "fieldPath" : "metadata.namespace"
+ }
+ }
+ } , {
+ "name": "MYSQL_SERVICE_NAME",
+ "value": "${MYSQL_SERVICE_NAME}"
+ }, {
+ "name": "MYSQL_SERVICE_DATABASE",
+ "value": "${MYSQL_SERVICE_DATABASE}"
+ }, {
+ "name": "MYSQL_SERVICE_USERNAME",
+ "value": "${MYSQL_SERVICE_USERNAME}"
+ }, {
+ "name": "MYSQL_SERVICE_PASSWORD",
+ "value": "${MYSQL_SERVICE_PASSWORD}"
+ } ],
+ "resources": {
+ "requests": {
+ "cpu": "${CPU_REQUEST}",
+ "memory": "${MEMORY_REQUEST}"
+ },
+ "limits": {
+ "cpu": "${CPU_LIMIT}",
+ "memory": "${MEMORY_LIMIT}"
+ }
+ }
+ }
+ ]
+ }
+ }
+ },
+ "status": {}
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/karaf2-cxf-rest-template.json b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/karaf2-cxf-rest-template.json
new file mode 100644
index 000000000..f99099868
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/karaf2-cxf-rest-template.json
@@ -0,0 +1,385 @@
+{
+ "apiVersion": "v1",
+ "kind": "Template",
+ "metadata": {
+ "annotations": {
+ "description": "REST example using CXF in Karaf container.",
+ "tags": "quickstart,java,karaf,fis",
+ "iconClass": "icon-jboss",
+ "version": "2.0"
+ },
+ "name": "s2i-karaf2-cxf-rest"
+ },
+ "labels": {
+ "template": "s2i-karaf2-cxf-rest"
+ },
+ "parameters": [
+ {
+ "name": "APP_NAME",
+ "displayName": "Application Name",
+ "required": true,
+ "value": "s2i-karaf2-cxf-rest",
+ "description": "The name assigned to the application."
+ },
+ {
+ "name": "GIT_REPO",
+ "displayName": "Git Repository URL",
+ "value": "https://github.com/fabric8-quickstarts/karaf2-cxf-rest.git",
+ "required": true,
+ "description": "The URL of the repository with your application source code."
+ },
+ {
+ "name": "GIT_REF",
+ "displayName": "Git Reference",
+ "value": "karaf2-cxf-rest-1.0.0.redhat-000010",
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch."
+ },
+ {
+ "name": "SERVICE_NAME",
+ "displayName": "Service Name",
+ "value": "s2i-karaf2-cxf-rest",
+ "description": "Exposed Service name."
+ },
+ {
+ "name": "BUILDER_VERSION",
+ "displayName": "Builder version",
+ "value": "2.0",
+ "description": "The version of the FIS S2I builder image to use."
+ },
+ {
+ "name": "APP_VERSION",
+ "displayName": "Application Version",
+ "value": "1.0.0.redhat-000010",
+ "description": "The application version."
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "displayName": "Maven Arguments",
+ "value": "install -DskipTests -Dfabric8.skip -e -B",
+ "description": "Arguments passed to mvn in the build."
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "displayName": "Extra Maven Arguments",
+ "description": "Extra arguments passed to mvn, e.g. for multi-module builds."
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "displayName": "Maven build directory",
+ "description": "Directory of the artifact to be built, e.g. for multi-module builds."
+ },
+ {
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "displayName": "Image Stream Namespace",
+ "value": "openshift",
+ "required": true,
+ "description": "Namespace in which the Fuse ImageStreams are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project."
+ },
+ {
+ "name": "CPU_REQUEST",
+ "displayName": "CPU request",
+ "value": "0.2",
+ "required": true,
+ "description": "The amount of CPU to requests."
+ },
+ {
+ "name": "MEMORY_REQUEST",
+ "displayName": "Memory request",
+ "value": "1.5G",
+ "required": true,
+ "description": "The amount of memory required for the container to run."
+ },
+ {
+ "name": "CPU_LIMIT",
+ "displayName": "CPU limit",
+ "value": "1.0",
+ "required": true,
+ "description": "The amount of CPU the container is limited to use."
+ },
+ {
+ "name": "MEMORY_LIMIT",
+ "displayName": "Memory limit",
+ "value": "2G",
+ "required": true,
+ "description": "The amount of memory the container is limited to use."
+ }
+ ],
+ "objects": [
+ {
+ "apiVersion": "v1",
+ "kind": "Route",
+ "metadata": {
+ "labels": {
+ "container": "java",
+ "component": "${APP_NAME}",
+ "provider": "s2i",
+ "project": "${APP_NAME}",
+ "version": "${APP_VERSION}",
+ "group": "quickstarts"
+ },
+ "name": "${SERVICE_NAME}-route"
+ },
+ "spec": {
+ "to": {
+ "kind": "Service",
+ "name": "${SERVICE_NAME}"
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "Service",
+ "metadata": {
+ "annotations": {
+ },
+ "labels": {
+ "container": "karaf",
+ "component": "${APP_NAME}",
+ "provider": "s2i",
+ "project": "${APP_NAME}",
+ "version": "${APP_VERSION}",
+ "group": "quickstarts"
+ },
+ "name": "${SERVICE_NAME}"
+ },
+ "spec": {
+ "clusterIP": "None",
+ "deprecatedPublicIPs": [],
+ "ports": [
+ {
+ "port": 9412,
+ "protocol": "TCP",
+ "targetPort": 8181
+ }
+ ],
+ "selector": {
+ "container": "karaf",
+ "project": "${APP_NAME}",
+ "component": "${APP_NAME}",
+ "provider": "s2i",
+ "group": "quickstarts"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "container": "karaf",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {},
+ "status": {
+ "dockerImageRepository": ""
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "container": "karaf",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ }
+ ],
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${GIT_REPO}",
+ "ref": "${GIT_REF}"
+ }
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "fis-karaf-openshift:${BUILDER_VERSION}"
+ },
+ "forcePull": true,
+ "incremental": true,
+ "env": [
+ {
+ "name": "BUILD_LOGLEVEL",
+ "value": "5"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "value": "${MAVEN_ARGS}"
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "value": "${MAVEN_ARGS_APPEND}"
+ }
+ ]
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ },
+ "resources": {}
+ },
+ "status": {
+ "lastVersion": 0
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "container": "karaf",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "resources": {}
+ },
+ "triggers": [
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APP_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ }
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "component": "${APP_NAME}",
+ "container": "karaf",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "container": "karaf",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "${APP_NAME}",
+ "image": "library/${APP_NAME}:latest",
+ "readinessProbe" : {
+ "httpGet" : {
+ "path" : "/readiness-check",
+ "port" : 8181
+ },
+ "initialDelaySeconds" : 10
+ },
+ "livenessProbe" : {
+ "httpGet" : {
+ "path" : "/health-check",
+ "port" : 8181
+ },
+ "initialDelaySeconds" : 180
+ },
+ "ports": [
+ {
+ "containerPort": 8181,
+ "name": "http"
+ },
+ {
+ "containerPort": 8778,
+ "name": "jolokia"
+ }
+ ],
+ "env" : [ {
+ "name" : "KUBERNETES_NAMESPACE",
+ "valueFrom" : {
+ "fieldRef" : {
+ "fieldPath" : "metadata.namespace"
+ }
+ }
+ } ],
+ "resources": {
+ "requests": {
+ "cpu": "${CPU_REQUEST}",
+ "memory": "${MEMORY_REQUEST}"
+ },
+ "limits": {
+ "cpu": "${CPU_LIMIT}",
+ "memory": "${MEMORY_LIMIT}"
+ }
+ }
+ }
+ ]
+ }
+ }
+ },
+ "status": {}
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/spring-boot-camel-amq-template.json b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/spring-boot-camel-amq-template.json
new file mode 100644
index 000000000..8b3cd6ed0
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/spring-boot-camel-amq-template.json
@@ -0,0 +1,331 @@
+{
+ "apiVersion": "v1",
+ "kind": "Template",
+ "metadata": {
+ "annotations": {
+ "description": "Spring Boot, Camel and ActiveMQ QuickStart. This quickstart demonstrates how to connect a Spring-Boot application to an ActiveMQ broker and use JMS messaging between two Camel routes using OpenShift. In this example we will use two containers, one container to run as a ActiveMQ broker, and another as a client to the broker, where the Camel routes are running. This quickstart requires the ActiveMQ broker has been deployed and running first.",
+ "tags": "quickstart,java,springboot,fis",
+ "iconClass": "icon-jboss",
+ "version": "2.0"
+ },
+ "name": "s2i-spring-boot-camel-amq"
+ },
+ "labels": {
+ "template": "s2i-spring-boot-camel-amq"
+ },
+ "parameters": [
+ {
+ "name": "APP_NAME",
+ "displayName": "Application Name",
+ "required": true,
+ "value": "s2i-spring-boot-camel-amq",
+ "description": "The name assigned to the application."
+ },
+ {
+ "name": "GIT_REPO",
+ "displayName": "Git Repository URL",
+ "required": true,
+ "value": "https://github.com/fabric8-quickstarts/spring-boot-camel-amq.git",
+ "description": "The URL of the repository with your application source code."
+ },
+ {
+ "name": "GIT_REF",
+ "displayName": "Git Reference",
+ "value": "spring-boot-camel-amq-1.0.0.redhat-000055",
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch."
+ },
+ {
+ "name": "ACTIVEMQ_SERVICE_NAME",
+ "displayName": "ActiveMQ Broker Service",
+ "required": true,
+ "value": "broker-amq-tcp",
+ "description": "Set this to the name of the TCP service of the ActiveMQ broker. You may need to create a broker first."
+ },
+ {
+ "name": "ACTIVEMQ_BROKER_USERNAME",
+ "displayName": "ActiveMQ Broker Username",
+ "description": "The username used to authenticate with the ActiveMQ broker. Leave it empty if authentication is disabled."
+ },
+ {
+ "name": "ACTIVEMQ_BROKER_PASSWORD",
+ "displayName": "ActiveMQ Broker Password",
+ "description": "The password used to authenticate with the ActiveMQ broker. Leave it empty if authentication is disabled."
+ },
+ {
+ "name": "BUILDER_VERSION",
+ "displayName": "Builder version",
+ "value": "2.0",
+ "description": "The version of the FIS S2I builder image to use."
+ },
+ {
+ "name": "APP_VERSION",
+ "displayName": "Application Version",
+ "value": "1.0.0.redhat-000055",
+ "description": "The application version."
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "displayName": "Maven Arguments",
+ "value": "package -DskipTests -Dfabric8.skip -e -B",
+ "description": "Arguments passed to mvn in the build."
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "displayName": "Extra Maven Arguments",
+ "description": "Extra arguments passed to mvn, e.g. for multi-module builds."
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "displayName": "Maven build directory",
+ "description": "Directory of the artifact to be built, e.g. for multi-module builds."
+ },
+ {
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "displayName": "Image Stream Namespace",
+ "value": "openshift",
+ "required": true,
+ "description": "Namespace in which the Fuse ImageStreams are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project."
+ },
+ {
+ "name": "BUILD_SECRET",
+ "displayName": "Git Build Secret",
+ "generate": "expression",
+ "description": "The secret needed to trigger a build.",
+ "from": "[a-zA-Z0-9]{40}"
+ },
+ {
+ "name": "CPU_REQUEST",
+ "displayName": "CPU request",
+ "value": "0.2",
+ "required": true,
+ "description": "The amount of CPU to requests."
+ },
+ {
+ "name": "CPU_LIMIT",
+ "displayName": "CPU limit",
+ "value": "1.0",
+ "required": true,
+ "description": "The amount of CPU the container is limited to use."
+ }
+ ],
+ "objects": [
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {},
+ "status": {
+ "dockerImageRepository": ""
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ }
+ ],
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${GIT_REPO}",
+ "ref": "${GIT_REF}"
+ }
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "fis-java-openshift:${BUILDER_VERSION}"
+ },
+ "forcePull": true,
+ "incremental": true,
+ "env": [
+ {
+ "name": "BUILD_LOGLEVEL",
+ "value": "5"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "value": "${MAVEN_ARGS}"
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "value": "${MAVEN_ARGS_APPEND}"
+ }
+ ]
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ },
+ "resources": {}
+ },
+ "status": {
+ "lastVersion": 0
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "resources": {}
+ },
+ "triggers": [
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APP_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ }
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "component": "${APP_NAME}",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "${APP_NAME}",
+ "image": "library/${APP_NAME}:latest",
+ "readinessProbe" : {
+ "httpGet" : {
+ "path" : "/health",
+ "port" : 8081
+ },
+ "initialDelaySeconds" : 10
+ },
+ "livenessProbe" : {
+ "httpGet" : {
+ "path" : "/health",
+ "port" : 8081
+ },
+ "initialDelaySeconds" : 180
+ },
+ "ports": [
+ {
+ "containerPort": 8778,
+ "name": "jolokia"
+ }
+ ],
+ "env" : [ {
+ "name" : "KUBERNETES_NAMESPACE",
+ "valueFrom" : {
+ "fieldRef" : {
+ "fieldPath" : "metadata.namespace"
+ }
+ }
+ } , {
+ "name": "ACTIVEMQ_SERVICE_NAME",
+ "value": "${ACTIVEMQ_SERVICE_NAME}"
+ }, {
+ "name": "ACTIVEMQ_BROKER_USERNAME",
+ "value": "${ACTIVEMQ_BROKER_USERNAME}"
+ }, {
+ "name": "ACTIVEMQ_BROKER_PASSWORD",
+ "value": "${ACTIVEMQ_BROKER_PASSWORD}"
+ } ],
+ "resources": {
+ "requests": {
+ "cpu": "${CPU_REQUEST}"
+ },
+ "limits": {
+ "cpu": "${CPU_LIMIT}"
+ }
+ }
+ }
+ ]
+ }
+ }
+ },
+ "status": {}
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/spring-boot-camel-config-template.json b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/spring-boot-camel-config-template.json
new file mode 100644
index 000000000..bc5bbad22
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/spring-boot-camel-config-template.json
@@ -0,0 +1,327 @@
+{
+ "apiVersion": "v1",
+ "kind": "Template",
+ "metadata": {
+ "annotations": {
+ "description": "Spring Boot and Camel using ConfigMaps and Secrets. This quickstart demonstrates how to configure a Spring-Boot application using Openshift ConfigMaps and Secrets.",
+ "tags": "quickstart,java,springboot,fis",
+ "iconClass": "icon-jboss",
+ "version": "2.0"
+ },
+ "name": "s2i-spring-boot-camel-config"
+ },
+ "labels": {
+ "template": "s2i-spring-boot-camel-config"
+ },
+ "parameters": [
+ {
+ "name": "APP_NAME",
+ "displayName": "Application Name",
+ "required": true,
+ "value": "s2i-spring-boot-camel-config",
+ "description": "The name assigned to the application."
+ },
+ {
+ "name": "GIT_REPO",
+ "displayName": "Git Repository URL",
+ "required": true,
+ "value": "https://github.com/fabric8-quickstarts/spring-boot-camel-config.git",
+ "description": "The URL of the repository with your application source code."
+ },
+ {
+ "name": "GIT_REF",
+ "displayName": "Git Reference",
+ "value": "spring-boot-camel-config-1.0.0.redhat-000005",
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch."
+ },
+ {
+ "name": "SERVICE_ACCOUNT_NAME",
+ "displayName": "Service Account",
+ "value": "qs-camel-config",
+ "required": true,
+ "description": "The Service Account that will be used to run the container. It must be already present in Openshift and have the view role."
+ },
+ {
+ "name": "SECRET_NAME",
+ "displayName": "Secret Name",
+ "value": "camel-config",
+ "required": true,
+ "description": "The name of the Openshift Secret that will be used to configure the application. It must be already present in Openshift."
+ },
+ {
+ "name": "CONFIGMAP_NAME",
+ "displayName": "ConfigMap Name",
+ "value": "camel-config",
+ "required": true,
+ "description": "The name of the Openshift ConfigMap that will be used to configure the application. It must be already present in Openshift."
+ },
+ {
+ "name": "BUILDER_VERSION",
+ "displayName": "Builder version",
+ "value": "2.0",
+ "description": "The version of the FIS S2I builder image to use."
+ },
+ {
+ "name": "APP_VERSION",
+ "displayName": "Application Version",
+ "value": "1.0.0.redhat-000005",
+ "description": "The application version."
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "displayName": "Maven Arguments",
+ "value": "package -DskipTests -Dfabric8.skip -e -B",
+ "description": "Arguments passed to mvn in the build."
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "displayName": "Extra Maven Arguments",
+ "description": "Extra arguments passed to mvn, e.g. for multi-module builds."
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "displayName": "Maven build directory",
+ "description": "Directory of the artifact to be built, e.g. for multi-module builds."
+ },
+ {
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "displayName": "Image Stream Namespace",
+ "value": "openshift",
+ "required": true,
+ "description": "Namespace in which the Fuse ImageStreams are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project."
+ },
+ {
+ "name": "BUILD_SECRET",
+ "displayName": "Git Build Secret",
+ "generate": "expression",
+ "description": "The secret needed to trigger a build.",
+ "from": "[a-zA-Z0-9]{40}"
+ }
+ ],
+ "objects": [
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {},
+ "status": {
+ "dockerImageRepository": ""
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ }
+ ],
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${GIT_REPO}",
+ "ref": "${GIT_REF}"
+ }
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "fis-java-openshift:${BUILDER_VERSION}"
+ },
+ "forcePull": true,
+ "incremental": true,
+ "env": [
+ {
+ "name": "BUILD_LOGLEVEL",
+ "value": "5"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "value": "${MAVEN_ARGS}"
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "value": "${MAVEN_ARGS_APPEND}"
+ }
+ ]
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ },
+ "resources": {}
+ },
+ "status": {
+ "lastVersion": 0
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "resources": {}
+ },
+ "triggers": [
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APP_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ }
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "component": "${APP_NAME}",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "${SERVICE_ACCOUNT_NAME}",
+ "volumes": [
+ {
+ "name": "camel-config",
+ "secret": {
+ "secretName": "${SECRET_NAME}"
+ }
+ }
+ ],
+ "containers": [
+ {
+ "name": "${APP_NAME}",
+ "image": "library/${APP_NAME}:latest",
+ "readinessProbe" : {
+ "httpGet" : {
+ "path" : "/health",
+ "port" : 8081
+ },
+ "initialDelaySeconds" : 10
+ },
+ "livenessProbe" : {
+ "httpGet" : {
+ "path" : "/health",
+ "port" : 8081
+ },
+ "initialDelaySeconds" : 180
+ },
+ "ports": [
+ {
+ "containerPort": 8778,
+ "name": "jolokia"
+ }
+ ],
+ "env" : [ {
+ "name" : "KUBERNETES_NAMESPACE",
+ "valueFrom" : {
+ "fieldRef" : {
+ "fieldPath" : "metadata.namespace"
+ }
+ }
+ }, {
+ "name": "SPRING_CLOUD_KUBERNETES_SECRETS_NAME",
+ "value": "${SECRET_NAME}"
+ }, {
+ "name": "SPRING_CLOUD_KUBERNETES_CONFIG_NAME",
+ "value": "${CONFIGMAP_NAME}"
+ } ],
+ "resources": {},
+ "volumeMounts": [
+ {
+ "name": "camel-config",
+ "readOnly": true,
+ "mountPath": "/etc/secrets/camel-config"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ },
+ "status": {}
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/spring-boot-camel-drools-template.json b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/spring-boot-camel-drools-template.json
new file mode 100644
index 000000000..e54fa0d59
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/spring-boot-camel-drools-template.json
@@ -0,0 +1,334 @@
+{
+ "apiVersion": "v1",
+ "kind": "Template",
+ "metadata": {
+ "annotations": {
+ "description": "Spring-Boot, Camel and JBoss BRMS QuickStart. This example demonstrates how you can use Apache Camel and JBoss BRMS with Spring Boot on OpenShift. DRL files contain simple rules which are used to create knowledge session via Spring configuration file. Camel routes, defined via Spring as well, are then used to e.g. pass (insert) the Body of the message as a POJO to Drools engine for execution.",
+ "tags": "quickstart,java,springboot,fis",
+ "iconClass": "icon-jboss",
+ "version": "2.0"
+ },
+ "name": "s2i-spring-boot-camel-drools"
+ },
+ "labels": {
+ "template": "s2i-spring-boot-camel-drools"
+ },
+ "parameters": [
+ {
+ "name": "APP_NAME",
+ "displayName": "Application Name",
+ "required": true,
+ "value": "s2i-spring-boot-camel-drools",
+ "description": "The name assigned to the application."
+ },
+ {
+ "name": "GIT_REPO",
+ "displayName": "Git Repository URL",
+ "required": true,
+ "value": "https://github.com/fabric8-quickstarts/spring-boot-camel-drools.git",
+ "description": "The URL of the repository with your application source code."
+ },
+ {
+ "name": "GIT_REF",
+ "displayName": "Git Reference",
+ "value": "spring-boot-camel-drools-1.0.0.redhat-000054",
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch."
+ },
+ {
+ "name": "KIESERVER_SERVICE",
+ "displayName": "Decision Server Name",
+ "required": true,
+ "value": "kie-app",
+ "description": "Set this to the name of the Decision Server. You may need to create an instance before."
+ },
+ {
+ "name": "KIESERVER_USERNAME",
+ "displayName": "Decision Server Username",
+ "required": true,
+ "value": "kieserver",
+ "description": "The username used to authenticate with the Decision Server."
+ },
+ {
+ "name": "KIESERVER_PASSWORD",
+ "displayName": "Decision Server Password",
+ "required": true,
+ "description": "The password used to authenticate with the Decision Server."
+ },
+ {
+ "name": "BUILDER_VERSION",
+ "displayName": "Builder version",
+ "value": "2.0",
+ "description": "The version of the FIS S2I builder image to use."
+ },
+ {
+ "name": "APP_VERSION",
+ "displayName": "Application Version",
+ "value": "1.0.0.redhat-000054",
+ "description": "The application version."
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "displayName": "Maven Arguments",
+ "value": "package -DskipTests -Dfabric8.skip -e -B",
+ "description": "Arguments passed to mvn in the build."
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "displayName": "Extra Maven Arguments",
+ "description": "Extra arguments passed to mvn, e.g. for multi-module builds."
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "displayName": "Maven build directory",
+ "description": "Directory of the artifact to be built, e.g. for multi-module builds."
+ },
+ {
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "displayName": "Image Stream Namespace",
+ "value": "openshift",
+ "required": true,
+ "description": "Namespace in which the Fuse ImageStreams are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project."
+ },
+ {
+ "name": "BUILD_SECRET",
+ "displayName": "Git Build Secret",
+ "generate": "expression",
+ "description": "The secret needed to trigger a build.",
+ "from": "[a-zA-Z0-9]{40}"
+ },
+ {
+ "name": "CPU_REQUEST",
+ "displayName": "CPU request",
+ "value": "0.2",
+ "required": true,
+ "description": "The amount of CPU to requests."
+ },
+ {
+ "name": "CPU_LIMIT",
+ "displayName": "CPU limit",
+ "value": "1.0",
+ "required": true,
+ "description": "The amount of CPU the container is limited to use."
+ }
+ ],
+ "objects": [
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {},
+ "status": {
+ "dockerImageRepository": ""
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ }
+ ],
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${GIT_REPO}",
+ "ref": "${GIT_REF}"
+ }
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "fis-java-openshift:${BUILDER_VERSION}"
+ },
+ "forcePull": true,
+ "incremental": true,
+ "env": [
+ {
+ "name": "BUILD_LOGLEVEL",
+ "value": "5"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "value": "${MAVEN_ARGS}"
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "value": "${MAVEN_ARGS_APPEND}"
+ }
+ ]
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ },
+ "resources": {}
+ },
+ "status": {
+ "lastVersion": 0
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "resources": {}
+ },
+ "triggers": [
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APP_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ }
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "component": "${APP_NAME}",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "${APP_NAME}",
+ "image": "library/${APP_NAME}:latest",
+ "readinessProbe" : {
+ "httpGet" : {
+ "path" : "/health",
+ "port" : 8081
+ },
+ "initialDelaySeconds" : 10
+ },
+ "livenessProbe" : {
+ "httpGet" : {
+ "path" : "/health",
+ "port" : 8081
+ },
+ "initialDelaySeconds" : 180
+ },
+ "ports": [
+ {
+ "containerPort": 8778,
+ "name": "jolokia"
+ }
+ ],
+ "env" : [ {
+ "name" : "KUBERNETES_NAMESPACE",
+ "valueFrom" : {
+ "fieldRef" : {
+ "fieldPath" : "metadata.namespace"
+ }
+ }
+ }, {
+ "name": "KIESERVER_SERVICE",
+ "value": "${KIESERVER_SERVICE}"
+ }, {
+ "name": "KIESERVER_USERNAME",
+ "value": "${KIESERVER_USERNAME}"
+ }, {
+ "name": "KIESERVER_PASSWORD",
+ "value": "${KIESERVER_PASSWORD}"
+ } ],
+ "resources": {
+ "requests": {
+ "cpu": "${CPU_REQUEST}"
+ },
+ "limits": {
+ "cpu": "${CPU_LIMIT}"
+ }
+ }
+ }
+ ]
+ }
+ }
+ },
+ "status": {}
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/spring-boot-camel-infinispan-template.json b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/spring-boot-camel-infinispan-template.json
new file mode 100644
index 000000000..20ba97dac
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/spring-boot-camel-infinispan-template.json
@@ -0,0 +1,315 @@
+{
+ "apiVersion": "v1",
+ "kind": "Template",
+ "metadata": {
+ "annotations": {
+ "description": "Spring Boot, Camel and JBoss Data Grid QuickStart. This quickstart demonstrates how to connect a Spring-Boot application to a JBoss Data Grid (or Infinispan) server using the Hot Rod protocol. It requires that the data grid server (or cluster) has been deployed first.",
+ "tags": "quickstart,java,springboot,fis",
+ "iconClass": "icon-jboss",
+ "version": "2.0"
+ },
+ "name": "s2i-spring-boot-camel-infinispan"
+ },
+ "labels": {
+ "template": "s2i-spring-boot-camel-infinispan"
+ },
+ "parameters": [
+ {
+ "name": "APP_NAME",
+ "displayName": "Application Name",
+ "required": true,
+ "value": "s2i-spring-boot-camel-infinispan",
+ "description": "The name assigned to the application."
+ },
+ {
+ "name": "GIT_REPO",
+ "displayName": "Git Repository URL",
+ "required": true,
+ "value": "https://github.com/fabric8-quickstarts/spring-boot-camel-infinispan.git",
+ "description": "The URL of the repository with your application source code."
+ },
+ {
+ "name": "GIT_REF",
+ "displayName": "Git Reference",
+ "value": "spring-boot-camel-infinispan-1.0.0.redhat-000024",
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch."
+ },
+ {
+ "name": "INFINISPAN_SERVICE",
+ "displayName": "JBoss Data Grid Service (Hot Rod)",
+ "required": true,
+ "value": "datagrid-app-hotrod",
+ "description": "Set this to the name of the Hot Rod service of the JBoss Data Grid. You may need to create the data grid first."
+ },
+ {
+ "name": "BUILDER_VERSION",
+ "displayName": "Builder version",
+ "value": "2.0",
+ "description": "The version of the FIS S2I builder image to use."
+ },
+ {
+ "name": "APP_VERSION",
+ "displayName": "Application Version",
+ "value": "1.0.0.redhat-000024",
+ "description": "The application version."
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "displayName": "Maven Arguments",
+ "value": "package -DskipTests -Dfabric8.skip -e -B",
+ "description": "Arguments passed to mvn in the build."
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "displayName": "Extra Maven Arguments",
+ "description": "Extra arguments passed to mvn, e.g. for multi-module builds."
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "displayName": "Maven build directory",
+ "description": "Directory of the artifact to be built, e.g. for multi-module builds."
+ },
+ {
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "displayName": "Image Stream Namespace",
+ "value": "openshift",
+ "required": true,
+ "description": "Namespace in which the Fuse ImageStreams are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project."
+ },
+ {
+ "name": "BUILD_SECRET",
+ "displayName": "Git Build Secret",
+ "generate": "expression",
+ "description": "The secret needed to trigger a build.",
+ "from": "[a-zA-Z0-9]{40}"
+ },
+ {
+ "name": "CPU_REQUEST",
+ "displayName": "CPU request",
+ "value": "0.2",
+ "required": true,
+ "description": "The amount of CPU to requests."
+ },
+ {
+ "name": "CPU_LIMIT",
+ "displayName": "CPU limit",
+ "value": "1.0",
+ "required": true,
+ "description": "The amount of CPU the container is limited to use."
+ }
+ ],
+ "objects": [
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {},
+ "status": {
+ "dockerImageRepository": ""
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ }
+ ],
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${GIT_REPO}",
+ "ref": "${GIT_REF}"
+ }
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "fis-java-openshift:${BUILDER_VERSION}"
+ },
+ "forcePull": true,
+ "incremental": true,
+ "env": [
+ {
+ "name": "BUILD_LOGLEVEL",
+ "value": "5"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "value": "${MAVEN_ARGS}"
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "value": "${MAVEN_ARGS_APPEND}"
+ }
+ ]
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ },
+ "resources": {}
+ },
+ "status": {
+ "lastVersion": 0
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "resources": {}
+ },
+ "triggers": [
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APP_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ }
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "component": "${APP_NAME}",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "${APP_NAME}",
+ "image": "library/${APP_NAME}:latest",
+ "readinessProbe" : {
+ "httpGet" : {
+ "path" : "/health",
+ "port" : 8081
+ },
+ "initialDelaySeconds" : 10
+ },
+ "livenessProbe" : {
+ "httpGet" : {
+ "path" : "/health",
+ "port" : 8081
+ },
+ "initialDelaySeconds" : 180
+ },
+ "ports": [
+ {
+ "containerPort": 8778,
+ "name": "jolokia"
+ }
+ ],
+ "env" : [ {
+ "name" : "KUBERNETES_NAMESPACE",
+ "valueFrom" : {
+ "fieldRef" : {
+ "fieldPath" : "metadata.namespace"
+ }
+ }
+ } , {
+ "name": "INFINISPAN_SERVICE",
+ "value": "${INFINISPAN_SERVICE}"
+ } ],
+ "resources": {
+ "requests": {
+ "cpu": "${CPU_REQUEST}"
+ },
+ "limits": {
+ "cpu": "${CPU_LIMIT}"
+ }
+ }
+ }
+ ]
+ }
+ }
+ },
+ "status": {}
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/spring-boot-camel-rest-sql-template.json b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/spring-boot-camel-rest-sql-template.json
new file mode 100644
index 000000000..555647fab
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/spring-boot-camel-rest-sql-template.json
@@ -0,0 +1,403 @@
+{
+ "apiVersion": "v1",
+ "kind": "Template",
+ "metadata": {
+ "annotations": {
+ "description": "Spring Boot, Camel REST DSL and MySQL QuickStart. This quickstart demonstrates how to connect a Spring Boot application to a MySQL database and expose a REST API with Camel on OpenShift. In this example we will use two containers, one container to run as a MySQL server, and another as a client to the database, where the Camel routes are running. This quickstart requires the MySQL server to be deployed and started first.",
+ "tags": "quickstart,java,springboot,fis",
+ "iconClass": "icon-jboss",
+ "version": "2.0"
+ },
+ "name": "s2i-spring-boot-camel-rest-sql"
+ },
+ "labels": {
+ "template": "s2i-spring-boot-camel-rest-sql"
+ },
+ "parameters": [
+ {
+ "name": "APP_NAME",
+ "displayName": "Application Name",
+ "required": true,
+ "value": "s2i-spring-boot-camel-rest-sql",
+ "description": "The name assigned to the application."
+ },
+ {
+ "name": "GIT_REPO",
+ "displayName": "Git Repository URL",
+ "required": true,
+ "value": "https://github.com/fabric8-quickstarts/spring-boot-camel-rest-sql.git",
+ "description": "The URL of the repository with your application source code."
+ },
+ {
+ "name": "GIT_REF",
+ "displayName": "Git Reference",
+ "value": "spring-boot-camel-rest-sql-1.0.0.redhat-000055",
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch."
+ },
+ {
+ "name": "SERVICE_NAME",
+ "displayName": "Service Name",
+ "value": "camel-rest-sql",
+ "description": "Exposed service name."
+ },
+ {
+ "name": "MYSQL_SERVICE_NAME",
+ "displayName": "MySQL Server Service",
+ "required": true,
+ "value": "mysql",
+ "description": "Set this to the name of the TCP service of the MySQL server. You may need to create a server first."
+ },
+ {
+ "name": "MYSQL_SERVICE_DATABASE",
+ "displayName": "MySQL Server Database",
+ "value": "sampledb",
+ "description": "The database hosted by the MySQL server to be used by the application."
+ },
+ {
+ "name": "MYSQL_SERVICE_USERNAME",
+ "displayName": "MySQL Server Username",
+ "description": "The username used to authenticate with the MySQL server. Leave it empty if authentication is disabled."
+ },
+ {
+ "name": "MYSQL_SERVICE_PASSWORD",
+ "displayName": "MySQL Server Password",
+ "description": "The password used to authenticate with the MySQL server. Leave it empty if authentication is disabled."
+ },
+ {
+ "name": "BUILDER_VERSION",
+ "displayName": "Builder version",
+ "value": "2.0",
+ "description": "The version of the FIS S2I builder image to use."
+ },
+ {
+ "name": "APP_VERSION",
+ "displayName": "Application Version",
+ "value": "1.0.0.redhat-000055",
+ "description": "The application version."
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "displayName": "Maven Arguments",
+ "value": "package -DskipTests -Dfabric8.skip -e -B",
+ "description": "Arguments passed to mvn in the build."
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "displayName": "Extra Maven Arguments",
+ "description": "Extra arguments passed to mvn, e.g. for multi-module builds."
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "displayName": "Maven build directory",
+ "description": "Directory of the artifact to be built, e.g. for multi-module builds."
+ },
+ {
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "displayName": "Image Stream Namespace",
+ "value": "openshift",
+ "required": true,
+ "description": "Namespace in which the Fuse ImageStreams are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project."
+ },
+ {
+ "name": "BUILD_SECRET",
+ "displayName": "Git Build Secret",
+ "generate": "expression",
+ "description": "The secret needed to trigger a build.",
+ "from": "[a-zA-Z0-9]{40}"
+ },
+ {
+ "name": "CPU_REQUEST",
+ "displayName": "CPU request",
+ "value": "0.2",
+ "required": true,
+ "description": "The amount of CPU to requests."
+ },
+ {
+ "name": "CPU_LIMIT",
+ "displayName": "CPU limit",
+ "value": "1.0",
+ "required": true,
+ "description": "The amount of CPU the container is limited to use."
+ }
+ ],
+ "objects": [
+ {
+ "apiVersion": "v1",
+ "kind": "Route",
+ "metadata": {
+ "labels": {
+ "component": "${APP_NAME}",
+ "provider": "s2i",
+ "project": "${APP_NAME}",
+ "version": "${APP_VERSION}",
+ "group": "quickstarts"
+ },
+ "name": "${SERVICE_NAME}-route"
+ },
+ "spec": {
+ "to": {
+ "kind": "Service",
+ "name": "${SERVICE_NAME}"
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "Service",
+ "metadata": {
+ "annotations": {
+ },
+ "labels": {
+ "component": "${APP_NAME}",
+ "provider": "s2i",
+ "project": "${APP_NAME}",
+ "version": "${APP_VERSION}",
+ "group": "quickstarts"
+ },
+ "name": "${SERVICE_NAME}"
+ },
+ "spec": {
+ "clusterIP": "None",
+ "deprecatedPublicIPs": [],
+ "ports": [
+ {
+ "port": 9411,
+ "protocol": "TCP",
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "project": "${APP_NAME}",
+ "component": "${APP_NAME}",
+ "provider": "s2i",
+ "group": "quickstarts"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {},
+ "status": {
+ "dockerImageRepository": ""
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ }
+ ],
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${GIT_REPO}",
+ "ref": "${GIT_REF}"
+ }
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "fis-java-openshift:${BUILDER_VERSION}"
+ },
+ "forcePull": true,
+ "incremental": true,
+ "env": [
+ {
+ "name": "BUILD_LOGLEVEL",
+ "value": "5"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "value": "${MAVEN_ARGS}"
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "value": "${MAVEN_ARGS_APPEND}"
+ }
+ ]
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ },
+ "resources": {}
+ },
+ "status": {
+ "lastVersion": 0
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "resources": {}
+ },
+ "triggers": [
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APP_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ }
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "component": "${APP_NAME}",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "${APP_NAME}",
+ "image": "library/${APP_NAME}:latest",
+ "readinessProbe" : {
+ "httpGet" : {
+ "path" : "/health",
+ "port" : 8081
+ },
+ "initialDelaySeconds" : 10
+ },
+ "livenessProbe" : {
+ "httpGet" : {
+ "path" : "/health",
+ "port" : 8081
+ },
+ "initialDelaySeconds" : 180
+ },
+ "ports": [
+ {
+ "containerPort": 8080,
+ "name": "http"
+ },
+ {
+ "containerPort": 8778,
+ "name": "jolokia"
+ }
+ ],
+ "env" : [ {
+ "name" : "KUBERNETES_NAMESPACE",
+ "valueFrom" : {
+ "fieldRef" : {
+ "fieldPath" : "metadata.namespace"
+ }
+ }
+ } , {
+ "name": "MYSQL_SERVICE_NAME",
+ "value": "${MYSQL_SERVICE_NAME}"
+ }, {
+ "name": "MYSQL_SERVICE_DATABASE",
+ "value": "${MYSQL_SERVICE_DATABASE}"
+ }, {
+ "name": "MYSQL_SERVICE_USERNAME",
+ "value": "${MYSQL_SERVICE_USERNAME}"
+ }, {
+ "name": "MYSQL_SERVICE_PASSWORD",
+ "value": "${MYSQL_SERVICE_PASSWORD}"
+ } ],
+ "resources": {
+ "requests": {
+ "cpu": "${CPU_REQUEST}"
+ },
+ "limits": {
+ "cpu": "${CPU_LIMIT}"
+ }
+ }
+ }
+ ]
+ }
+ }
+ },
+ "status": {}
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/spring-boot-camel-teiid-template.json b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/spring-boot-camel-teiid-template.json
new file mode 100644
index 000000000..cf9a4e903
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/spring-boot-camel-teiid-template.json
@@ -0,0 +1,343 @@
+{
+ "apiVersion": "v1",
+ "kind": "Template",
+ "metadata": {
+ "annotations": {
+ "description": "Spring-Boot, Camel and JBoss Data Virtualization QuickStart. This example demonstrates how to connect Apache Camel to a remote JBoss Data Virtualization (or Teiid) Server using the JDBC protocol.",
+ "tags": "quickstart,java,springboot,fis",
+ "iconClass": "icon-jboss",
+ "version": "2.0"
+ },
+ "name": "s2i-spring-boot-camel-teiid"
+ },
+ "labels": {
+ "template": "s2i-spring-boot-camel-teiid"
+ },
+ "parameters": [
+ {
+ "name": "APP_NAME",
+ "displayName": "Application Name",
+ "required": true,
+ "value": "s2i-spring-boot-camel-teiid",
+ "description": "The name assigned to the application."
+ },
+ {
+ "name": "GIT_REPO",
+ "displayName": "Git Repository URL",
+ "required": true,
+ "value": "https://github.com/fabric8-quickstarts/spring-boot-camel-teiid.git",
+ "description": "The URL of the repository with your application source code."
+ },
+ {
+ "name": "GIT_REF",
+ "displayName": "Git Reference",
+ "value": "spring-boot-camel-teiid-1.0.0.redhat-000053",
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch."
+ },
+ {
+ "name": "TEIID_SERVICE_NAME",
+ "displayName": "JDV Server Name",
+ "required": true,
+ "value": "datavirt-app",
+ "description": "Set this to the name of the JDV Server. You may need to create an instance before."
+ },
+ {
+ "name": "TEIID_PORT_NAME",
+ "displayName": "JDV Port Name",
+ "value": "jdbc",
+ "description": "Set this to the name of the JDV port to use. Set this value if the JDV service contains multiple named ports."
+ },
+ {
+ "name": "TEIID_USERNAME",
+ "displayName": "JDV Server Username",
+ "required": true,
+ "description": "The username used to authenticate with the JDV Server."
+ },
+ {
+ "name": "TEIID_PASSWORD",
+ "displayName": "JDV Server Password",
+ "required": true,
+ "description": "The password used to authenticate with the JDV Server."
+ },
+ {
+ "name": "BUILDER_VERSION",
+ "displayName": "Builder version",
+ "value": "2.0",
+ "description": "The version of the FIS S2I builder image to use."
+ },
+ {
+ "name": "APP_VERSION",
+ "displayName": "Application Version",
+ "value": "1.0.0.redhat-000053",
+ "description": "The application version."
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "displayName": "Maven Arguments",
+ "value": "package -DskipTests -Dfabric8.skip -e -B",
+ "description": "Arguments passed to mvn in the build."
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "displayName": "Extra Maven Arguments",
+ "description": "Extra arguments passed to mvn, e.g. for multi-module builds."
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "displayName": "Maven build directory",
+ "description": "Directory of the artifact to be built, e.g. for multi-module builds."
+ },
+ {
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "displayName": "Image Stream Namespace",
+ "value": "openshift",
+ "required": true,
+ "description": "Namespace in which the Fuse ImageStreams are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project."
+ },
+ {
+ "name": "BUILD_SECRET",
+ "displayName": "Git Build Secret",
+ "generate": "expression",
+ "description": "The secret needed to trigger a build.",
+ "from": "[a-zA-Z0-9]{40}"
+ },
+ {
+ "name": "CPU_REQUEST",
+ "displayName": "CPU request",
+ "value": "0.2",
+ "required": true,
+ "description": "The amount of CPU to requests."
+ },
+ {
+ "name": "CPU_LIMIT",
+ "displayName": "CPU limit",
+ "value": "1.0",
+ "required": true,
+ "description": "The amount of CPU the container is limited to use."
+ }
+ ],
+ "objects": [
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {},
+ "status": {
+ "dockerImageRepository": ""
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ }
+ ],
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${GIT_REPO}",
+ "ref": "${GIT_REF}"
+ }
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "fis-java-openshift:${BUILDER_VERSION}"
+ },
+ "forcePull": true,
+ "incremental": true,
+ "env": [
+ {
+ "name": "BUILD_LOGLEVEL",
+ "value": "5"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "value": "${MAVEN_ARGS}"
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "value": "${MAVEN_ARGS_APPEND}"
+ }
+ ]
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ },
+ "resources": {}
+ },
+ "status": {
+ "lastVersion": 0
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "resources": {}
+ },
+ "triggers": [
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APP_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ }
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "component": "${APP_NAME}",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "${APP_NAME}",
+ "image": "library/${APP_NAME}:latest",
+ "readinessProbe" : {
+ "httpGet" : {
+ "path" : "/health",
+ "port" : 8081
+ },
+ "initialDelaySeconds" : 10
+ },
+ "livenessProbe" : {
+ "httpGet" : {
+ "path" : "/health",
+ "port" : 8081
+ },
+ "initialDelaySeconds" : 180
+ },
+ "ports": [
+ {
+ "containerPort": 8778,
+ "name": "jolokia"
+ }
+ ],
+ "env" : [
+ {
+ "name" : "KUBERNETES_NAMESPACE",
+ "valueFrom" : {
+ "fieldRef" : {
+ "fieldPath" : "metadata.namespace"
+ }
+ }
+ }, {
+ "name": "TEIID_SERVICE_NAME",
+ "value": "${TEIID_SERVICE_NAME}"
+ }, {
+ "name": "TEIID_PORT_NAME",
+ "value": "${TEIID_PORT_NAME}"
+ }, {
+ "name": "TEIID_USERNAME",
+ "value": "${TEIID_USERNAME}"
+ }, {
+ "name": "TEIID_PASSWORD",
+ "value": "${TEIID_PASSWORD}"
+ }],
+ "resources": {
+ "requests": {
+ "cpu": "${CPU_REQUEST}"
+ },
+ "limits": {
+ "cpu": "${CPU_LIMIT}"
+ }
+ }
+ }
+ ]
+ }
+ }
+ },
+ "status": {}
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/spring-boot-camel-template.json b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/spring-boot-camel-template.json
new file mode 100644
index 000000000..c78a96f7c
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/spring-boot-camel-template.json
@@ -0,0 +1,305 @@
+{
+ "apiVersion": "v1",
+ "kind": "Template",
+ "metadata": {
+ "annotations": {
+ "description": "Spring-Boot and Camel QuickStart. This example demonstrates how you can use Apache Camel with Spring Boot on Openshift. The quickstart uses Spring Boot to configure a little application that includes a Camel route that triggeres a message every 5th second, and routes the message to a log.",
+ "tags": "quickstart,java,springboot,fis",
+ "iconClass": "icon-jboss",
+ "version": "2.0"
+ },
+ "name": "s2i-spring-boot-camel"
+ },
+ "labels": {
+ "template": "s2i-spring-boot-camel"
+ },
+ "parameters": [
+ {
+ "name": "APP_NAME",
+ "displayName": "Application Name",
+ "required": true,
+ "value": "s2i-spring-boot-camel",
+ "description": "The name assigned to the application."
+ },
+ {
+ "name": "GIT_REPO",
+ "displayName": "Git Repository URL",
+ "required": true,
+ "value": "https://github.com/fabric8-quickstarts/spring-boot-camel.git",
+ "description": "The URL of the repository with your application source code."
+ },
+ {
+ "name": "GIT_REF",
+ "displayName": "Git Reference",
+ "value": "spring-boot-camel-1.0.0.redhat-000055",
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch."
+ },
+ {
+ "name": "BUILDER_VERSION",
+ "displayName": "Builder version",
+ "value": "2.0",
+ "description": "The version of the FIS S2I builder image to use."
+ },
+ {
+ "name": "APP_VERSION",
+ "displayName": "Application Version",
+ "value": "1.0.0.redhat-000055",
+ "description": "The application version."
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "displayName": "Maven Arguments",
+ "value": "package -DskipTests -Dfabric8.skip -e -B",
+ "description": "Arguments passed to mvn in the build."
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "displayName": "Extra Maven Arguments",
+ "description": "Extra arguments passed to mvn, e.g. for multi-module builds."
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "displayName": "Maven build directory",
+ "description": "Directory of the artifact to be built, e.g. for multi-module builds."
+ },
+ {
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "displayName": "Image Stream Namespace",
+ "value": "openshift",
+ "required": true,
+ "description": "Namespace in which the Fuse ImageStreams are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project."
+ },
+ {
+ "name": "BUILD_SECRET",
+ "displayName": "Git Build Secret",
+ "generate": "expression",
+ "description": "The secret needed to trigger a build.",
+ "from": "[a-zA-Z0-9]{40}"
+ },
+ {
+ "name": "CPU_REQUEST",
+ "displayName": "CPU request",
+ "value": "0.2",
+ "required": true,
+ "description": "The amount of CPU to requests."
+ },
+ {
+ "name": "CPU_LIMIT",
+ "displayName": "CPU limit",
+ "value": "1.0",
+ "required": true,
+ "description": "The amount of CPU the container is limited to use."
+ }
+ ],
+ "objects": [
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {},
+ "status": {
+ "dockerImageRepository": ""
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ }
+ ],
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${GIT_REPO}",
+ "ref": "${GIT_REF}"
+ }
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "fis-java-openshift:${BUILDER_VERSION}"
+ },
+ "forcePull": true,
+ "incremental": true,
+ "env": [
+ {
+ "name": "BUILD_LOGLEVEL",
+ "value": "5"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "value": "${MAVEN_ARGS}"
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "value": "${MAVEN_ARGS_APPEND}"
+ }
+ ]
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ },
+ "resources": {}
+ },
+ "status": {
+ "lastVersion": 0
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "resources": {}
+ },
+ "triggers": [
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APP_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ }
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "component": "${APP_NAME}",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "${APP_NAME}",
+ "image": "library/${APP_NAME}:latest",
+ "readinessProbe" : {
+ "httpGet" : {
+ "path" : "/health",
+ "port" : 8081
+ },
+ "initialDelaySeconds" : 10
+ },
+ "livenessProbe" : {
+ "httpGet" : {
+ "path" : "/health",
+ "port" : 8081
+ },
+ "initialDelaySeconds" : 180
+ },
+ "ports": [
+ {
+ "containerPort": 8778,
+ "name": "jolokia"
+ }
+ ],
+ "env" : [ {
+ "name" : "KUBERNETES_NAMESPACE",
+ "valueFrom" : {
+ "fieldRef" : {
+ "fieldPath" : "metadata.namespace"
+ }
+ }
+ } ],
+ "resources": {
+ "requests": {
+ "cpu": "${CPU_REQUEST}"
+ },
+ "limits": {
+ "cpu": "${CPU_LIMIT}"
+ }
+ }
+ }
+ ]
+ }
+ }
+ },
+ "status": {}
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/spring-boot-camel-xml-template.json b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/spring-boot-camel-xml-template.json
new file mode 100644
index 000000000..620425902
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/spring-boot-camel-xml-template.json
@@ -0,0 +1,305 @@
+{
+ "apiVersion": "v1",
+ "kind": "Template",
+ "metadata": {
+ "annotations": {
+ "description": "Spring-Boot and Camel Xml QuickStart. This example demonstrates how you can use Apache Camel with Spring Boot on Openshift. The quickstart uses Spring Boot to configure a little application that includes a Camel route (in Spring xml) that triggeres a message every 5th second, and routes the message to a log.",
+ "tags": "quickstart,java,springboot,fis",
+ "iconClass": "icon-jboss",
+ "version": "2.0"
+ },
+ "name": "s2i-spring-boot-camel-xml"
+ },
+ "labels": {
+ "template": "s2i-spring-boot-camel-xml"
+ },
+ "parameters": [
+ {
+ "name": "APP_NAME",
+ "displayName": "Application Name",
+ "required": true,
+ "value": "s2i-spring-boot-camel-xml",
+ "description": "The name assigned to the application."
+ },
+ {
+ "name": "GIT_REPO",
+ "displayName": "Git Repository URL",
+ "required": true,
+ "value": "https://github.com/fabric8-quickstarts/spring-boot-camel-xml.git",
+ "description": "The URL of the repository with your application source code."
+ },
+ {
+ "name": "GIT_REF",
+ "displayName": "Git Reference",
+ "value": "spring-boot-camel-xml-1.0.0.redhat-000055",
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch."
+ },
+ {
+ "name": "BUILDER_VERSION",
+ "displayName": "Builder version",
+ "value": "2.0",
+ "description": "The version of the FIS S2I builder image to use."
+ },
+ {
+ "name": "APP_VERSION",
+ "displayName": "Application Version",
+ "value": "1.0.0.redhat-000055",
+ "description": "The application version."
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "displayName": "Maven Arguments",
+ "value": "package -DskipTests -Dfabric8.skip -e -B",
+ "description": "Arguments passed to mvn in the build."
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "displayName": "Extra Maven Arguments",
+ "description": "Extra arguments passed to mvn, e.g. for multi-module builds."
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "displayName": "Maven build directory",
+ "description": "Directory of the artifact to be built, e.g. for multi-module builds."
+ },
+ {
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "displayName": "Image Stream Namespace",
+ "value": "openshift",
+ "required": true,
+ "description": "Namespace in which the Fuse ImageStreams are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project."
+ },
+ {
+ "name": "BUILD_SECRET",
+ "displayName": "Git Build Secret",
+ "generate": "expression",
+ "description": "The secret needed to trigger a build.",
+ "from": "[a-zA-Z0-9]{40}"
+ },
+ {
+ "name": "CPU_REQUEST",
+ "displayName": "CPU request",
+ "value": "0.2",
+ "required": true,
+ "description": "The amount of CPU to requests."
+ },
+ {
+ "name": "CPU_LIMIT",
+ "displayName": "CPU limit",
+ "value": "1.0",
+ "required": true,
+ "description": "The amount of CPU the container is limited to use."
+ }
+ ],
+ "objects": [
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {},
+ "status": {
+ "dockerImageRepository": ""
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ }
+ ],
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${GIT_REPO}",
+ "ref": "${GIT_REF}"
+ }
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "fis-java-openshift:${BUILDER_VERSION}"
+ },
+ "forcePull": true,
+ "incremental": true,
+ "env": [
+ {
+ "name": "BUILD_LOGLEVEL",
+ "value": "5"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "value": "${MAVEN_ARGS}"
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "value": "${MAVEN_ARGS_APPEND}"
+ }
+ ]
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ },
+ "resources": {}
+ },
+ "status": {
+ "lastVersion": 0
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "resources": {}
+ },
+ "triggers": [
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APP_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ }
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "component": "${APP_NAME}",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "${APP_NAME}",
+ "image": "library/${APP_NAME}:latest",
+ "readinessProbe" : {
+ "httpGet" : {
+ "path" : "/health",
+ "port" : 8081
+ },
+ "initialDelaySeconds" : 10
+ },
+ "livenessProbe" : {
+ "httpGet" : {
+ "path" : "/health",
+ "port" : 8081
+ },
+ "initialDelaySeconds" : 180
+ },
+ "ports": [
+ {
+ "containerPort": 8778,
+ "name": "jolokia"
+ }
+ ],
+ "env" : [ {
+ "name" : "KUBERNETES_NAMESPACE",
+ "valueFrom" : {
+ "fieldRef" : {
+ "fieldPath" : "metadata.namespace"
+ }
+ }
+ } ],
+ "resources": {
+ "requests": {
+ "cpu": "${CPU_REQUEST}"
+ },
+ "limits": {
+ "cpu": "${CPU_LIMIT}"
+ }
+ }
+ }
+ ]
+ }
+ }
+ },
+ "status": {}
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/spring-boot-cxf-jaxrs-template.json b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/spring-boot-cxf-jaxrs-template.json
new file mode 100644
index 000000000..15cfc93fd
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/spring-boot-cxf-jaxrs-template.json
@@ -0,0 +1,305 @@
+{
+ "apiVersion": "v1",
+ "kind": "Template",
+ "metadata": {
+ "annotations": {
+ "description": "Spring-Boot and CXF JAXRS QuickStart. This example demonstrates how you can use Apache CXF JAXRS with Spring Boot on Openshift. The quickstart uses Spring Boot to configure a little application that includes a CXF JAXRS endpoint with Swagger enabled.",
+ "tags": "quickstart,java,springboot,fis",
+ "iconClass": "icon-jboss",
+ "version": "2.0"
+ },
+ "name": "s2i-spring-boot-cxf-jaxrs"
+ },
+ "labels": {
+ "template": "s2i-spring-boot-cxf-jaxrs"
+ },
+ "parameters": [
+ {
+ "name": "APP_NAME",
+ "displayName": "Application Name",
+ "required": true,
+ "value": "s2i-spring-boot-cxf-jaxrs",
+ "description": "The name assigned to the application."
+ },
+ {
+ "name": "GIT_REPO",
+ "displayName": "Git Repository URL",
+ "required": true,
+ "value": "https://github.com/fabric8-quickstarts/spring-boot-cxf-jaxrs.git",
+ "description": "The URL of the repository with your application source code."
+ },
+ {
+ "name": "GIT_REF",
+ "displayName": "Git Reference",
+ "value": "spring-boot-cxf-jaxrs-1.0.0.redhat-000005",
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch."
+ },
+ {
+ "name": "BUILDER_VERSION",
+ "displayName": "Builder version",
+ "value": "2.0",
+ "description": "The version of the FIS S2I builder image to use."
+ },
+ {
+ "name": "APP_VERSION",
+ "displayName": "Application Version",
+ "value": "1.0.0.redhat-000005",
+ "description": "The application version."
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "displayName": "Maven Arguments",
+ "value": "package -DskipTests -Dfabric8.skip -e -B",
+ "description": "Arguments passed to mvn in the build."
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "displayName": "Extra Maven Arguments",
+ "description": "Extra arguments passed to mvn, e.g. for multi-module builds."
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "displayName": "Maven build directory",
+ "description": "Directory of the artifact to be built, e.g. for multi-module builds."
+ },
+ {
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "displayName": "Image Stream Namespace",
+ "value": "openshift",
+ "required": true,
+ "description": "Namespace in which the Fuse ImageStreams are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project."
+ },
+ {
+ "name": "BUILD_SECRET",
+ "displayName": "Git Build Secret",
+ "generate": "expression",
+ "description": "The secret needed to trigger a build.",
+ "from": "[a-zA-Z0-9]{40}"
+ },
+ {
+ "name": "CPU_REQUEST",
+ "displayName": "CPU request",
+ "value": "0.2",
+ "required": true,
+ "description": "The amount of CPU to requests."
+ },
+ {
+ "name": "CPU_LIMIT",
+ "displayName": "CPU limit",
+ "value": "1.0",
+ "required": true,
+ "description": "The amount of CPU the container is limited to use."
+ }
+ ],
+ "objects": [
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {},
+ "status": {
+ "dockerImageRepository": ""
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ }
+ ],
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${GIT_REPO}",
+ "ref": "${GIT_REF}"
+ }
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "fis-java-openshift:${BUILDER_VERSION}"
+ },
+ "forcePull": true,
+ "incremental": true,
+ "env": [
+ {
+ "name": "BUILD_LOGLEVEL",
+ "value": "5"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "value": "${MAVEN_ARGS}"
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "value": "${MAVEN_ARGS_APPEND}"
+ }
+ ]
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ },
+ "resources": {}
+ },
+ "status": {
+ "lastVersion": 0
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "resources": {}
+ },
+ "triggers": [
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APP_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ }
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "component": "${APP_NAME}",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "${APP_NAME}",
+ "image": "library/${APP_NAME}:latest",
+ "readinessProbe" : {
+ "httpGet" : {
+ "path" : "/health",
+ "port" : 8081
+ },
+ "initialDelaySeconds" : 10
+ },
+ "livenessProbe" : {
+ "httpGet" : {
+ "path" : "/health",
+ "port" : 8081
+ },
+ "initialDelaySeconds" : 180
+ },
+ "ports": [
+ {
+ "containerPort": 8778,
+ "name": "jolokia"
+ }
+ ],
+ "env" : [ {
+ "name" : "KUBERNETES_NAMESPACE",
+ "valueFrom" : {
+ "fieldRef" : {
+ "fieldPath" : "metadata.namespace"
+ }
+ }
+ } ],
+ "resources": {
+ "requests": {
+ "cpu": "${CPU_REQUEST}"
+ },
+ "limits": {
+ "cpu": "${CPU_LIMIT}"
+ }
+ }
+ }
+ ]
+ }
+ }
+ },
+ "status": {}
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/spring-boot-cxf-jaxws-template.json b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/spring-boot-cxf-jaxws-template.json
new file mode 100644
index 000000000..c70ee7726
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/spring-boot-cxf-jaxws-template.json
@@ -0,0 +1,305 @@
+{
+ "apiVersion": "v1",
+ "kind": "Template",
+ "metadata": {
+ "annotations": {
+ "description": "Spring-Boot and CXF JAXWS QuickStart. This example demonstrates how you can use Apache CXF JAXWS with Spring Boot on Openshift. The quickstart uses Spring Boot to configure a little application that includes a CXF JAXWS endpoint.",
+ "tags": "quickstart,java,springboot,fis",
+ "iconClass": "icon-jboss",
+ "version": "2.0"
+ },
+ "name": "s2i-spring-boot-cxf-jaxws"
+ },
+ "labels": {
+ "template": "s2i-spring-boot-cxf-jaxws"
+ },
+ "parameters": [
+ {
+ "name": "APP_NAME",
+ "displayName": "Application Name",
+ "required": true,
+ "value": "s2i-spring-boot-cxf-jaxws",
+ "description": "The name assigned to the application."
+ },
+ {
+ "name": "GIT_REPO",
+ "displayName": "Git Repository URL",
+ "required": true,
+ "value": "https://github.com/fabric8-quickstarts/spring-boot-cxf-jaxws.git",
+ "description": "The URL of the repository with your application source code."
+ },
+ {
+ "name": "GIT_REF",
+ "displayName": "Git Reference",
+ "value": "spring-boot-cxf-jaxws-1.0.0.redhat-000005",
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch."
+ },
+ {
+ "name": "BUILDER_VERSION",
+ "displayName": "Builder version",
+ "value": "2.0",
+ "description": "The version of the FIS S2I builder image to use."
+ },
+ {
+ "name": "APP_VERSION",
+ "displayName": "Application Version",
+ "value": "1.0.0.redhat-000005",
+ "description": "The application version."
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "displayName": "Maven Arguments",
+ "value": "package -DskipTests -Dfabric8.skip -e -B",
+ "description": "Arguments passed to mvn in the build."
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "displayName": "Extra Maven Arguments",
+ "description": "Extra arguments passed to mvn, e.g. for multi-module builds."
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "displayName": "Maven build directory",
+ "description": "Directory of the artifact to be built, e.g. for multi-module builds."
+ },
+ {
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "displayName": "Image Stream Namespace",
+ "value": "openshift",
+ "required": true,
+ "description": "Namespace in which the Fuse ImageStreams are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project."
+ },
+ {
+ "name": "BUILD_SECRET",
+ "displayName": "Git Build Secret",
+ "generate": "expression",
+ "description": "The secret needed to trigger a build.",
+ "from": "[a-zA-Z0-9]{40}"
+ },
+ {
+ "name": "CPU_REQUEST",
+ "displayName": "CPU request",
+ "value": "0.2",
+ "required": true,
+ "description": "The amount of CPU to requests."
+ },
+ {
+ "name": "CPU_LIMIT",
+ "displayName": "CPU limit",
+ "value": "1.0",
+ "required": true,
+ "description": "The amount of CPU the container is limited to use."
+ }
+ ],
+ "objects": [
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {},
+ "status": {
+ "dockerImageRepository": ""
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ }
+ ],
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${GIT_REPO}",
+ "ref": "${GIT_REF}"
+ }
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "fis-java-openshift:${BUILDER_VERSION}"
+ },
+ "forcePull": true,
+ "incremental": true,
+ "env": [
+ {
+ "name": "BUILD_LOGLEVEL",
+ "value": "5"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "value": "${MAVEN_ARGS}"
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "value": "${MAVEN_ARGS_APPEND}"
+ }
+ ]
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ },
+ "resources": {}
+ },
+ "status": {
+ "lastVersion": 0
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "resources": {}
+ },
+ "triggers": [
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APP_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ }
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "component": "${APP_NAME}",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "${APP_NAME}",
+ "image": "library/${APP_NAME}:latest",
+ "readinessProbe" : {
+ "httpGet" : {
+ "path" : "/health",
+ "port" : 8081
+ },
+ "initialDelaySeconds" : 10
+ },
+ "livenessProbe" : {
+ "httpGet" : {
+ "path" : "/health",
+ "port" : 8081
+ },
+ "initialDelaySeconds" : 180
+ },
+ "ports": [
+ {
+ "containerPort": 8778,
+ "name": "jolokia"
+ }
+ ],
+ "env" : [ {
+ "name" : "KUBERNETES_NAMESPACE",
+ "valueFrom" : {
+ "fieldRef" : {
+ "fieldPath" : "metadata.namespace"
+ }
+ }
+ } ],
+ "resources": {
+ "requests": {
+ "cpu": "${CPU_REQUEST}"
+ },
+ "limits": {
+ "cpu": "${CPU_LIMIT}"
+ }
+ }
+ }
+ ]
+ }
+ }
+ },
+ "status": {}
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/karaf2-camel-amq-template.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/karaf2-camel-amq-template.json
new file mode 100644
index 000000000..cd0bec3c1
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/karaf2-camel-amq-template.json
@@ -0,0 +1,362 @@
+{
+ "apiVersion": "v1",
+ "kind": "Template",
+ "metadata": {
+ "annotations": {
+ "description": "Camel route using ActiveMQ in Karaf container.",
+ "tags": "quickstart,java,karaf,fis",
+ "iconClass": "icon-jboss",
+ "version": "2.0"
+ },
+ "name": "s2i-karaf2-camel-amq"
+ },
+ "labels": {
+ "template": "s2i-karaf2-camel-amq"
+ },
+ "parameters": [
+ {
+ "name": "APP_NAME",
+ "displayName": "Application Name",
+ "required": true,
+ "value": "s2i-karaf2-camel-amq",
+ "description": "The name assigned to the application."
+ },
+ {
+ "name": "GIT_REPO",
+ "displayName": "Git Repository URL",
+ "value": "https://github.com/fabric8-quickstarts/karaf2-camel-amq.git",
+ "required": true,
+ "description": "The URL of the repository with your application source code."
+ },
+ {
+ "name": "GIT_REF",
+ "displayName": "Git Reference",
+ "value": "karaf2-camel-amq-1.0.0.redhat-000010",
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch."
+ },
+ {
+ "name": "SERVICE_NAME",
+ "displayName": "Service Name",
+ "value": "s2i-karaf2-camel-amq",
+ "description": "Exposed Service name."
+ },
+ {
+ "name": "BUILDER_VERSION",
+ "displayName": "Builder version",
+ "value": "2.0",
+ "description": "The version of the FIS S2I builder image to use."
+ },
+ {
+ "name": "APP_VERSION",
+ "displayName": "Application Version",
+ "value": "1.0.0.redhat-000010",
+ "description": "The application version."
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "displayName": "Maven Arguments",
+ "value": "install -DskipTests -Dfabric8.skip -e -B",
+ "description": "Arguments passed to mvn in the build."
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "displayName": "Extra Maven Arguments",
+ "description": "Extra arguments passed to mvn, e.g. for multi-module builds."
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "displayName": "Maven build directory",
+ "description": "Directory of the artifact to be built, e.g. for multi-module builds."
+ },
+ {
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "displayName": "Image Stream Namespace",
+ "value": "openshift",
+ "required": true,
+ "description": "Namespace in which the Fuse ImageStreams are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project."
+ },
+ {
+ "name": "BUILD_SECRET",
+ "displayName": "Git Build Secret",
+ "generate": "expression",
+ "description": "The secret needed to trigger a build.",
+ "from": "[a-zA-Z0-9]{40}"
+ },
+ {
+ "name": "ACTIVEMQ_SERVICE_NAME",
+ "displayName": "ActiveMQ Broker Service",
+ "required": true,
+ "value": "broker-amq-tcp",
+ "description": "Set this to the name of the TCP service of the ActiveMQ broker. You may need to create a broker first."
+ },
+ {
+ "name": "ACTIVEMQ_USERNAME",
+ "displayName": "ActiveMQ Broker Username",
+ "description": "The username used to authenticate with the ActiveMQ broker. Leave it empty if authentication is disabled."
+ },
+ {
+ "name": "ACTIVEMQ_PASSWORD",
+ "displayName": "ActiveMQ Broker Password",
+ "description": "The password used to authenticate with the ActiveMQ broker. Leave it empty if authentication is disabled."
+ },
+ {
+ "name": "CPU_REQUEST",
+ "displayName": "CPU request",
+ "value": "0.2",
+ "required": true,
+ "description": "The amount of CPU to requests."
+ },
+ {
+ "name": "MEMORY_REQUEST",
+ "displayName": "Memory request",
+ "value": "1.5G",
+ "required": true,
+ "description": "The amount of memory required for the container to run."
+ },
+ {
+ "name": "CPU_LIMIT",
+ "displayName": "CPU limit",
+ "value": "1.0",
+ "required": true,
+ "description": "The amount of CPU the container is limited to use."
+ },
+ {
+ "name": "MEMORY_LIMIT",
+ "displayName": "Memory limit",
+ "value": "2G",
+ "required": true,
+ "description": "The amount of memory the container is limited to use."
+ }
+ ],
+ "objects": [
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "container": "karaf",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {},
+ "status": {
+ "dockerImageRepository": ""
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "container": "karaf",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ }
+ ],
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${GIT_REPO}",
+ "ref": "${GIT_REF}"
+ }
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "fis-karaf-openshift:${BUILDER_VERSION}"
+ },
+ "forcePull": true,
+ "incremental": true,
+ "env": [
+ {
+ "name": "BUILD_LOGLEVEL",
+ "value": "5"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "value": "${MAVEN_ARGS}"
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "value": "${MAVEN_ARGS_APPEND}"
+ }
+ ]
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ },
+ "resources": {}
+ },
+ "status": {
+ "lastVersion": 0
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "container": "karaf",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "resources": {}
+ },
+ "triggers": [
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APP_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ }
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "component": "${APP_NAME}",
+ "container": "karaf",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "container": "karaf",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "${APP_NAME}",
+ "image": "library/${APP_NAME}:latest",
+ "readinessProbe" : {
+ "httpGet" : {
+ "path" : "/readiness-check",
+ "port" : 8181
+ },
+ "initialDelaySeconds" : 10
+ },
+ "livenessProbe" : {
+ "httpGet" : {
+ "path" : "/health-check",
+ "port" : 8181
+ },
+ "initialDelaySeconds" : 180
+ },
+ "ports": [
+ {
+ "containerPort": 8181,
+ "name": "http"
+ },
+ {
+ "containerPort": 8778,
+ "name": "jolokia"
+ }
+ ],
+ "env" : [ {
+ "name" : "KUBERNETES_NAMESPACE",
+ "valueFrom" : {
+ "fieldRef" : {
+ "fieldPath" : "metadata.namespace"
+ }
+ }
+ }, {
+ "name": "ACTIVEMQ_SERVICE_NAME",
+ "value": "${ACTIVEMQ_SERVICE_NAME}"
+ }, {
+ "name": "ACTIVEMQ_USERNAME",
+ "value": "${ACTIVEMQ_USERNAME}"
+ }, {
+ "name": "ACTIVEMQ_PASSWORD",
+ "value": "${ACTIVEMQ_PASSWORD}"
+ } ],
+ "resources": {
+ "requests": {
+ "cpu": "${CPU_REQUEST}",
+ "memory": "${MEMORY_REQUEST}"
+ },
+ "limits": {
+ "cpu": "${CPU_LIMIT}",
+ "memory": "${MEMORY_LIMIT}"
+ }
+ }
+ }
+ ]
+ }
+ }
+ },
+ "status": {}
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/karaf2-camel-log-template.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/karaf2-camel-log-template.json
new file mode 100644
index 000000000..2ecce08a9
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/karaf2-camel-log-template.json
@@ -0,0 +1,336 @@
+{
+ "apiVersion": "v1",
+ "kind": "Template",
+ "metadata": {
+ "annotations": {
+ "description": "A simple Camel route in Karaf container.",
+ "tags": "quickstart,java,karaf,fis",
+ "iconClass": "icon-jboss",
+ "version": "2.0"
+ },
+ "name": "s2i-karaf2-camel-log"
+ },
+ "labels": {
+ "template": "s2i-karaf2-camel-log"
+ },
+ "parameters": [
+ {
+ "name": "APP_NAME",
+ "displayName": "Application Name",
+ "required": true,
+ "value": "s2i-karaf2-camel-log",
+ "description": "The name assigned to the application."
+ },
+ {
+ "name": "GIT_REPO",
+ "displayName": "Git Repository URL",
+ "value": "https://github.com/fabric8-quickstarts/karaf2-camel-log.git",
+ "required": true,
+ "description": "The URL of the repository with your application source code."
+ },
+ {
+ "name": "GIT_REF",
+ "displayName": "Git Reference",
+ "value": "karaf2-camel-log-1.0.0.redhat-000010",
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch."
+ },
+ {
+ "name": "SERVICE_NAME",
+ "displayName": "Service Name",
+ "value": "s2i-karaf2-camel-log",
+ "description": "Exposed Service name."
+ },
+ {
+ "name": "BUILDER_VERSION",
+ "displayName": "Builder version",
+ "value": "2.0",
+ "description": "The version of the FIS S2I builder image to use."
+ },
+ {
+ "name": "APP_VERSION",
+ "displayName": "Application Version",
+ "value": "1.0.0.redhat-000010",
+ "description": "The application version."
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "displayName": "Maven Arguments",
+ "value": "install -DskipTests -Dfabric8.skip -e -B",
+ "description": "Arguments passed to mvn in the build."
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "displayName": "Extra Maven Arguments",
+ "description": "Extra arguments passed to mvn, e.g. for multi-module builds."
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "displayName": "Maven build directory",
+ "description": "Directory of the artifact to be built, e.g. for multi-module builds."
+ },
+ {
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "displayName": "Image Stream Namespace",
+ "value": "openshift",
+ "required": true,
+ "description": "Namespace in which the Fuse ImageStreams are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project."
+ },
+ {
+ "name": "BUILD_SECRET",
+ "displayName": "Git Build Secret",
+ "generate": "expression",
+ "description": "The secret needed to trigger a build.",
+ "from": "[a-zA-Z0-9]{40}"
+ },
+ {
+ "name": "CPU_REQUEST",
+ "displayName": "CPU request",
+ "value": "0.2",
+ "required": true,
+ "description": "The amount of CPU to requests."
+ },
+ {
+ "name": "MEMORY_REQUEST",
+ "displayName": "Memory request",
+ "value": "1.5G",
+ "required": true,
+ "description": "The amount of memory required for the container to run."
+ },
+ {
+ "name": "CPU_LIMIT",
+ "displayName": "CPU limit",
+ "value": "1.0",
+ "required": true,
+ "description": "The amount of CPU the container is limited to use."
+ },
+ {
+ "name": "MEMORY_LIMIT",
+ "displayName": "Memory limit",
+ "value": "2G",
+ "required": true,
+ "description": "The amount of memory the container is limited to use."
+ }
+ ],
+ "objects": [
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "container": "karaf",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {},
+ "status": {
+ "dockerImageRepository": ""
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "container": "karaf",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ }
+ ],
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${GIT_REPO}",
+ "ref": "${GIT_REF}"
+ }
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "fis-karaf-openshift:${BUILDER_VERSION}"
+ },
+ "forcePull": true,
+ "incremental": true,
+ "env": [
+ {
+ "name": "BUILD_LOGLEVEL",
+ "value": "5"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "value": "${MAVEN_ARGS}"
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "value": "${MAVEN_ARGS_APPEND}"
+ }
+ ]
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ },
+ "resources": {}
+ },
+ "status": {
+ "lastVersion": 0
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "container": "karaf",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "resources": {}
+ },
+ "triggers": [
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APP_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ }
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "component": "${APP_NAME}",
+ "container": "karaf",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "container": "karaf",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "${APP_NAME}",
+ "image": "library/${APP_NAME}:latest",
+ "readinessProbe" : {
+ "httpGet" : {
+ "path" : "/readiness-check",
+ "port" : 8181
+ },
+ "initialDelaySeconds" : 10
+ },
+ "livenessProbe" : {
+ "httpGet" : {
+ "path" : "/health-check",
+ "port" : 8181
+ },
+ "initialDelaySeconds" : 180
+ },
+ "ports": [
+ {
+ "containerPort": 8181,
+ "name": "http"
+ },
+ {
+ "containerPort": 8778,
+ "name": "jolokia"
+ }
+ ],
+ "env" : [ {
+ "name" : "KUBERNETES_NAMESPACE",
+ "valueFrom" : {
+ "fieldRef" : {
+ "fieldPath" : "metadata.namespace"
+ }
+ }
+ } ],
+ "resources": {
+ "requests": {
+ "cpu": "${CPU_REQUEST}",
+ "memory": "${MEMORY_REQUEST}"
+ },
+ "limits": {
+ "cpu": "${CPU_LIMIT}",
+ "memory": "${MEMORY_LIMIT}"
+ }
+ }
+ }
+ ]
+ }
+ }
+ },
+ "status": {}
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/karaf2-camel-rest-sql-template.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/karaf2-camel-rest-sql-template.json
new file mode 100644
index 000000000..d80939efb
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/karaf2-camel-rest-sql-template.json
@@ -0,0 +1,421 @@
+{
+ "apiVersion": "v1",
+ "kind": "Template",
+ "metadata": {
+ "annotations": {
+ "description": "Camel example using Rest DSL with SQL Database in Karaf container.",
+ "tags": "quickstart,java,karaf,fis",
+ "iconClass": "icon-jboss",
+ "version": "2.0"
+ },
+ "name": "s2i-karaf2-camel-rest-sql"
+ },
+ "labels": {
+ "template": "s2i-karaf2-camel-rest-sql"
+ },
+ "parameters": [
+ {
+ "name": "APP_NAME",
+ "displayName": "Application Name",
+ "required": true,
+ "value": "s2i-karaf2-camel-rest-sql",
+ "description": "The name assigned to the application."
+ },
+ {
+ "name": "GIT_REPO",
+ "displayName": "Git Repository URL",
+ "value": "https://github.com/fabric8-quickstarts/karaf2-camel-rest-sql.git",
+ "required": true,
+ "description": "The URL of the repository with your application source code."
+ },
+ {
+ "name": "GIT_REF",
+ "displayName": "Git Reference",
+ "value": "karaf2-camel-rest-sql-1.0.0.redhat-000010",
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch."
+ },
+ {
+ "name": "SERVICE_NAME",
+ "displayName": "Service Name",
+ "value": "s2i-karaf2-camel-rest",
+ "description": "Exposed Service name."
+ },
+ {
+ "name": "MYSQL_SERVICE_NAME",
+ "displayName": "MySQL Server Service",
+ "required": true,
+ "value": "mysql",
+ "description": "Set this to the name of the TCP service of the MySQL server. You may need to create a server first."
+ },
+ {
+ "name": "MYSQL_SERVICE_DATABASE",
+ "displayName": "MySQL Server Database",
+ "required": true,
+ "value": "sampledb",
+ "description": "The database hosted by the MySQL server to be used by the application."
+ },
+ {
+ "name": "MYSQL_SERVICE_USERNAME",
+ "displayName": "MySQL Server Username",
+ "description": "The username used to authenticate with the MySQL server. Leave it empty if authentication is disabled."
+ },
+ {
+ "name": "MYSQL_SERVICE_PASSWORD",
+ "displayName": "MySQL Server Password",
+ "description": "The password used to authenticate with the MySQL server. Leave it empty if authentication is disabled."
+ },
+ {
+ "name": "BUILDER_VERSION",
+ "displayName": "Builder version",
+ "value": "2.0",
+ "description": "The version of the FIS S2I builder image to use."
+ },
+ {
+ "name": "APP_VERSION",
+ "displayName": "Application Version",
+ "value": "1.0.0.redhat-000010",
+ "description": "The application version."
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "displayName": "Maven Arguments",
+ "value": "install -DskipTests -Dfabric8.skip -e -B",
+ "description": "Arguments passed to mvn in the build."
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "displayName": "Extra Maven Arguments",
+ "description": "Extra arguments passed to mvn, e.g. for multi-module builds."
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "displayName": "Maven build directory",
+ "description": "Directory of the artifact to be built, e.g. for multi-module builds."
+ },
+ {
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "displayName": "Image Stream Namespace",
+ "value": "openshift",
+ "required": true,
+ "description": "Namespace in which the Fuse ImageStreams are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project."
+ },
+ {
+ "name": "CPU_REQUEST",
+ "displayName": "CPU request",
+ "value": "0.2",
+ "required": true,
+ "description": "The amount of CPU to requests."
+ },
+ {
+ "name": "MEMORY_REQUEST",
+ "displayName": "Memory request",
+ "value": "1.5G",
+ "required": true,
+ "description": "The amount of memory required for the container to run."
+ },
+ {
+ "name": "CPU_LIMIT",
+ "displayName": "CPU limit",
+ "value": "1.0",
+ "required": true,
+ "description": "The amount of CPU the container is limited to use."
+ },
+ {
+ "name": "MEMORY_LIMIT",
+ "displayName": "Memory limit",
+ "value": "2G",
+ "required": true,
+ "description": "The amount of memory the container is limited to use."
+ }
+ ],
+ "objects": [
+ {
+ "apiVersion": "v1",
+ "kind": "Route",
+ "metadata": {
+ "labels": {
+ "container": "karaf",
+ "component": "${APP_NAME}",
+ "provider": "s2i",
+ "project": "${APP_NAME}",
+ "version": "${APP_VERSION}",
+ "group": "quickstarts"
+ },
+ "name": "${SERVICE_NAME}-route"
+ },
+ "spec": {
+ "to": {
+ "kind": "Service",
+ "name": "${SERVICE_NAME}"
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "Service",
+ "metadata": {
+ "annotations": {
+ },
+ "labels": {
+ "container": "karaf",
+ "component": "${APP_NAME}",
+ "provider": "s2i",
+ "project": "${APP_NAME}",
+ "version": "${APP_VERSION}",
+ "group": "quickstarts"
+ },
+ "name": "${SERVICE_NAME}"
+ },
+ "spec": {
+ "clusterIP": "None",
+ "deprecatedPublicIPs": [],
+ "ports": [
+ {
+ "port": 9411,
+ "protocol": "TCP",
+ "targetPort": 8181
+ }
+ ],
+ "selector": {
+ "container": "karaf",
+ "project": "${APP_NAME}",
+ "component": "${APP_NAME}",
+ "provider": "s2i",
+ "group": "quickstarts"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "container": "karaf",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {},
+ "status": {
+ "dockerImageRepository": ""
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "container": "karaf",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ }
+ ],
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${GIT_REPO}",
+ "ref": "${GIT_REF}"
+ }
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "fis-karaf-openshift:${BUILDER_VERSION}"
+ },
+ "forcePull": true,
+ "incremental": true,
+ "env": [
+ {
+ "name": "BUILD_LOGLEVEL",
+ "value": "5"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "value": "${MAVEN_ARGS}"
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "value": "${MAVEN_ARGS_APPEND}"
+ }
+ ]
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ },
+ "resources": {}
+ },
+ "status": {
+ "lastVersion": 0
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "container": "karaf",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "resources": {}
+ },
+ "triggers": [
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APP_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ }
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "component": "${APP_NAME}",
+ "container": "karaf",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "container": "karaf",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "${APP_NAME}",
+ "image": "library/${APP_NAME}:latest",
+ "readinessProbe" : {
+ "httpGet" : {
+ "path" : "/readiness-check",
+ "port" : 8181
+ },
+ "initialDelaySeconds" : 10
+ },
+ "livenessProbe" : {
+ "httpGet" : {
+ "path" : "/health-check",
+ "port" : 8181
+ },
+ "initialDelaySeconds" : 180
+ },
+ "ports": [
+ {
+ "containerPort": 8181,
+ "name": "http"
+ },
+ {
+ "containerPort": 8778,
+ "name": "jolokia"
+ }
+ ],
+ "env" : [ {
+ "name" : "KUBERNETES_NAMESPACE",
+ "valueFrom" : {
+ "fieldRef" : {
+ "fieldPath" : "metadata.namespace"
+ }
+ }
+ } , {
+ "name": "MYSQL_SERVICE_NAME",
+ "value": "${MYSQL_SERVICE_NAME}"
+ }, {
+ "name": "MYSQL_SERVICE_DATABASE",
+ "value": "${MYSQL_SERVICE_DATABASE}"
+ }, {
+ "name": "MYSQL_SERVICE_USERNAME",
+ "value": "${MYSQL_SERVICE_USERNAME}"
+ }, {
+ "name": "MYSQL_SERVICE_PASSWORD",
+ "value": "${MYSQL_SERVICE_PASSWORD}"
+ } ],
+ "resources": {
+ "requests": {
+ "cpu": "${CPU_REQUEST}",
+ "memory": "${MEMORY_REQUEST}"
+ },
+ "limits": {
+ "cpu": "${CPU_LIMIT}",
+ "memory": "${MEMORY_LIMIT}"
+ }
+ }
+ }
+ ]
+ }
+ }
+ },
+ "status": {}
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/karaf2-cxf-rest-template.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/karaf2-cxf-rest-template.json
new file mode 100644
index 000000000..f99099868
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/karaf2-cxf-rest-template.json
@@ -0,0 +1,385 @@
+{
+ "apiVersion": "v1",
+ "kind": "Template",
+ "metadata": {
+ "annotations": {
+ "description": "REST example using CXF in Karaf container.",
+ "tags": "quickstart,java,karaf,fis",
+ "iconClass": "icon-jboss",
+ "version": "2.0"
+ },
+ "name": "s2i-karaf2-cxf-rest"
+ },
+ "labels": {
+ "template": "s2i-karaf2-cxf-rest"
+ },
+ "parameters": [
+ {
+ "name": "APP_NAME",
+ "displayName": "Application Name",
+ "required": true,
+ "value": "s2i-karaf2-cxf-rest",
+ "description": "The name assigned to the application."
+ },
+ {
+ "name": "GIT_REPO",
+ "displayName": "Git Repository URL",
+ "value": "https://github.com/fabric8-quickstarts/karaf2-cxf-rest.git",
+ "required": true,
+ "description": "The URL of the repository with your application source code."
+ },
+ {
+ "name": "GIT_REF",
+ "displayName": "Git Reference",
+ "value": "karaf2-cxf-rest-1.0.0.redhat-000010",
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch."
+ },
+ {
+ "name": "SERVICE_NAME",
+ "displayName": "Service Name",
+ "value": "s2i-karaf2-cxf-rest",
+ "description": "Exposed Service name."
+ },
+ {
+ "name": "BUILDER_VERSION",
+ "displayName": "Builder version",
+ "value": "2.0",
+ "description": "The version of the FIS S2I builder image to use."
+ },
+ {
+ "name": "APP_VERSION",
+ "displayName": "Application Version",
+ "value": "1.0.0.redhat-000010",
+ "description": "The application version."
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "displayName": "Maven Arguments",
+ "value": "install -DskipTests -Dfabric8.skip -e -B",
+ "description": "Arguments passed to mvn in the build."
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "displayName": "Extra Maven Arguments",
+ "description": "Extra arguments passed to mvn, e.g. for multi-module builds."
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "displayName": "Maven build directory",
+ "description": "Directory of the artifact to be built, e.g. for multi-module builds."
+ },
+ {
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "displayName": "Image Stream Namespace",
+ "value": "openshift",
+ "required": true,
+ "description": "Namespace in which the Fuse ImageStreams are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project."
+ },
+ {
+ "name": "CPU_REQUEST",
+ "displayName": "CPU request",
+ "value": "0.2",
+ "required": true,
+ "description": "The amount of CPU to requests."
+ },
+ {
+ "name": "MEMORY_REQUEST",
+ "displayName": "Memory request",
+ "value": "1.5G",
+ "required": true,
+ "description": "The amount of memory required for the container to run."
+ },
+ {
+ "name": "CPU_LIMIT",
+ "displayName": "CPU limit",
+ "value": "1.0",
+ "required": true,
+ "description": "The amount of CPU the container is limited to use."
+ },
+ {
+ "name": "MEMORY_LIMIT",
+ "displayName": "Memory limit",
+ "value": "2G",
+ "required": true,
+ "description": "The amount of memory the container is limited to use."
+ }
+ ],
+ "objects": [
+ {
+ "apiVersion": "v1",
+ "kind": "Route",
+ "metadata": {
+ "labels": {
+ "container": "java",
+ "component": "${APP_NAME}",
+ "provider": "s2i",
+ "project": "${APP_NAME}",
+ "version": "${APP_VERSION}",
+ "group": "quickstarts"
+ },
+ "name": "${SERVICE_NAME}-route"
+ },
+ "spec": {
+ "to": {
+ "kind": "Service",
+ "name": "${SERVICE_NAME}"
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "Service",
+ "metadata": {
+ "annotations": {
+ },
+ "labels": {
+ "container": "karaf",
+ "component": "${APP_NAME}",
+ "provider": "s2i",
+ "project": "${APP_NAME}",
+ "version": "${APP_VERSION}",
+ "group": "quickstarts"
+ },
+ "name": "${SERVICE_NAME}"
+ },
+ "spec": {
+ "clusterIP": "None",
+ "deprecatedPublicIPs": [],
+ "ports": [
+ {
+ "port": 9412,
+ "protocol": "TCP",
+ "targetPort": 8181
+ }
+ ],
+ "selector": {
+ "container": "karaf",
+ "project": "${APP_NAME}",
+ "component": "${APP_NAME}",
+ "provider": "s2i",
+ "group": "quickstarts"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "container": "karaf",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {},
+ "status": {
+ "dockerImageRepository": ""
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "container": "karaf",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ }
+ ],
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${GIT_REPO}",
+ "ref": "${GIT_REF}"
+ }
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "fis-karaf-openshift:${BUILDER_VERSION}"
+ },
+ "forcePull": true,
+ "incremental": true,
+ "env": [
+ {
+ "name": "BUILD_LOGLEVEL",
+ "value": "5"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "value": "${MAVEN_ARGS}"
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "value": "${MAVEN_ARGS_APPEND}"
+ }
+ ]
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ },
+ "resources": {}
+ },
+ "status": {
+ "lastVersion": 0
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "container": "karaf",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "resources": {}
+ },
+ "triggers": [
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APP_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ }
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "component": "${APP_NAME}",
+ "container": "karaf",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "container": "karaf",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "${APP_NAME}",
+ "image": "library/${APP_NAME}:latest",
+ "readinessProbe" : {
+ "httpGet" : {
+ "path" : "/readiness-check",
+ "port" : 8181
+ },
+ "initialDelaySeconds" : 10
+ },
+ "livenessProbe" : {
+ "httpGet" : {
+ "path" : "/health-check",
+ "port" : 8181
+ },
+ "initialDelaySeconds" : 180
+ },
+ "ports": [
+ {
+ "containerPort": 8181,
+ "name": "http"
+ },
+ {
+ "containerPort": 8778,
+ "name": "jolokia"
+ }
+ ],
+ "env" : [ {
+ "name" : "KUBERNETES_NAMESPACE",
+ "valueFrom" : {
+ "fieldRef" : {
+ "fieldPath" : "metadata.namespace"
+ }
+ }
+ } ],
+ "resources": {
+ "requests": {
+ "cpu": "${CPU_REQUEST}",
+ "memory": "${MEMORY_REQUEST}"
+ },
+ "limits": {
+ "cpu": "${CPU_LIMIT}",
+ "memory": "${MEMORY_LIMIT}"
+ }
+ }
+ }
+ ]
+ }
+ }
+ },
+ "status": {}
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/spring-boot-camel-amq-template.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/spring-boot-camel-amq-template.json
new file mode 100644
index 000000000..8b3cd6ed0
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/spring-boot-camel-amq-template.json
@@ -0,0 +1,331 @@
+{
+ "apiVersion": "v1",
+ "kind": "Template",
+ "metadata": {
+ "annotations": {
+ "description": "Spring Boot, Camel and ActiveMQ QuickStart. This quickstart demonstrates how to connect a Spring-Boot application to an ActiveMQ broker and use JMS messaging between two Camel routes using OpenShift. In this example we will use two containers, one container to run as a ActiveMQ broker, and another as a client to the broker, where the Camel routes are running. This quickstart requires the ActiveMQ broker has been deployed and running first.",
+ "tags": "quickstart,java,springboot,fis",
+ "iconClass": "icon-jboss",
+ "version": "2.0"
+ },
+ "name": "s2i-spring-boot-camel-amq"
+ },
+ "labels": {
+ "template": "s2i-spring-boot-camel-amq"
+ },
+ "parameters": [
+ {
+ "name": "APP_NAME",
+ "displayName": "Application Name",
+ "required": true,
+ "value": "s2i-spring-boot-camel-amq",
+ "description": "The name assigned to the application."
+ },
+ {
+ "name": "GIT_REPO",
+ "displayName": "Git Repository URL",
+ "required": true,
+ "value": "https://github.com/fabric8-quickstarts/spring-boot-camel-amq.git",
+ "description": "The URL of the repository with your application source code."
+ },
+ {
+ "name": "GIT_REF",
+ "displayName": "Git Reference",
+ "value": "spring-boot-camel-amq-1.0.0.redhat-000055",
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch."
+ },
+ {
+ "name": "ACTIVEMQ_SERVICE_NAME",
+ "displayName": "ActiveMQ Broker Service",
+ "required": true,
+ "value": "broker-amq-tcp",
+ "description": "Set this to the name of the TCP service of the ActiveMQ broker. You may need to create a broker first."
+ },
+ {
+ "name": "ACTIVEMQ_BROKER_USERNAME",
+ "displayName": "ActiveMQ Broker Username",
+ "description": "The username used to authenticate with the ActiveMQ broker. Leave it empty if authentication is disabled."
+ },
+ {
+ "name": "ACTIVEMQ_BROKER_PASSWORD",
+ "displayName": "ActiveMQ Broker Password",
+ "description": "The password used to authenticate with the ActiveMQ broker. Leave it empty if authentication is disabled."
+ },
+ {
+ "name": "BUILDER_VERSION",
+ "displayName": "Builder version",
+ "value": "2.0",
+ "description": "The version of the FIS S2I builder image to use."
+ },
+ {
+ "name": "APP_VERSION",
+ "displayName": "Application Version",
+ "value": "1.0.0.redhat-000055",
+ "description": "The application version."
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "displayName": "Maven Arguments",
+ "value": "package -DskipTests -Dfabric8.skip -e -B",
+ "description": "Arguments passed to mvn in the build."
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "displayName": "Extra Maven Arguments",
+ "description": "Extra arguments passed to mvn, e.g. for multi-module builds."
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "displayName": "Maven build directory",
+ "description": "Directory of the artifact to be built, e.g. for multi-module builds."
+ },
+ {
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "displayName": "Image Stream Namespace",
+ "value": "openshift",
+ "required": true,
+ "description": "Namespace in which the Fuse ImageStreams are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project."
+ },
+ {
+ "name": "BUILD_SECRET",
+ "displayName": "Git Build Secret",
+ "generate": "expression",
+ "description": "The secret needed to trigger a build.",
+ "from": "[a-zA-Z0-9]{40}"
+ },
+ {
+ "name": "CPU_REQUEST",
+ "displayName": "CPU request",
+ "value": "0.2",
+ "required": true,
+ "description": "The amount of CPU to requests."
+ },
+ {
+ "name": "CPU_LIMIT",
+ "displayName": "CPU limit",
+ "value": "1.0",
+ "required": true,
+ "description": "The amount of CPU the container is limited to use."
+ }
+ ],
+ "objects": [
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {},
+ "status": {
+ "dockerImageRepository": ""
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ }
+ ],
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${GIT_REPO}",
+ "ref": "${GIT_REF}"
+ }
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "fis-java-openshift:${BUILDER_VERSION}"
+ },
+ "forcePull": true,
+ "incremental": true,
+ "env": [
+ {
+ "name": "BUILD_LOGLEVEL",
+ "value": "5"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "value": "${MAVEN_ARGS}"
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "value": "${MAVEN_ARGS_APPEND}"
+ }
+ ]
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ },
+ "resources": {}
+ },
+ "status": {
+ "lastVersion": 0
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "resources": {}
+ },
+ "triggers": [
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APP_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ }
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "component": "${APP_NAME}",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "${APP_NAME}",
+ "image": "library/${APP_NAME}:latest",
+ "readinessProbe" : {
+ "httpGet" : {
+ "path" : "/health",
+ "port" : 8081
+ },
+ "initialDelaySeconds" : 10
+ },
+ "livenessProbe" : {
+ "httpGet" : {
+ "path" : "/health",
+ "port" : 8081
+ },
+ "initialDelaySeconds" : 180
+ },
+ "ports": [
+ {
+ "containerPort": 8778,
+ "name": "jolokia"
+ }
+ ],
+ "env" : [ {
+ "name" : "KUBERNETES_NAMESPACE",
+ "valueFrom" : {
+ "fieldRef" : {
+ "fieldPath" : "metadata.namespace"
+ }
+ }
+ } , {
+ "name": "ACTIVEMQ_SERVICE_NAME",
+ "value": "${ACTIVEMQ_SERVICE_NAME}"
+ }, {
+ "name": "ACTIVEMQ_BROKER_USERNAME",
+ "value": "${ACTIVEMQ_BROKER_USERNAME}"
+ }, {
+ "name": "ACTIVEMQ_BROKER_PASSWORD",
+ "value": "${ACTIVEMQ_BROKER_PASSWORD}"
+ } ],
+ "resources": {
+ "requests": {
+ "cpu": "${CPU_REQUEST}"
+ },
+ "limits": {
+ "cpu": "${CPU_LIMIT}"
+ }
+ }
+ }
+ ]
+ }
+ }
+ },
+ "status": {}
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/spring-boot-camel-config-template.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/spring-boot-camel-config-template.json
new file mode 100644
index 000000000..bc5bbad22
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/spring-boot-camel-config-template.json
@@ -0,0 +1,327 @@
+{
+ "apiVersion": "v1",
+ "kind": "Template",
+ "metadata": {
+ "annotations": {
+ "description": "Spring Boot and Camel using ConfigMaps and Secrets. This quickstart demonstrates how to configure a Spring-Boot application using Openshift ConfigMaps and Secrets.",
+ "tags": "quickstart,java,springboot,fis",
+ "iconClass": "icon-jboss",
+ "version": "2.0"
+ },
+ "name": "s2i-spring-boot-camel-config"
+ },
+ "labels": {
+ "template": "s2i-spring-boot-camel-config"
+ },
+ "parameters": [
+ {
+ "name": "APP_NAME",
+ "displayName": "Application Name",
+ "required": true,
+ "value": "s2i-spring-boot-camel-config",
+ "description": "The name assigned to the application."
+ },
+ {
+ "name": "GIT_REPO",
+ "displayName": "Git Repository URL",
+ "required": true,
+ "value": "https://github.com/fabric8-quickstarts/spring-boot-camel-config.git",
+ "description": "The URL of the repository with your application source code."
+ },
+ {
+ "name": "GIT_REF",
+ "displayName": "Git Reference",
+ "value": "spring-boot-camel-config-1.0.0.redhat-000005",
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch."
+ },
+ {
+ "name": "SERVICE_ACCOUNT_NAME",
+ "displayName": "Service Account",
+ "value": "qs-camel-config",
+ "required": true,
+ "description": "The Service Account that will be used to run the container. It must be already present in Openshift and have the view role."
+ },
+ {
+ "name": "SECRET_NAME",
+ "displayName": "Secret Name",
+ "value": "camel-config",
+ "required": true,
+ "description": "The name of the Openshift Secret that will be used to configure the application. It must be already present in Openshift."
+ },
+ {
+ "name": "CONFIGMAP_NAME",
+ "displayName": "ConfigMap Name",
+ "value": "camel-config",
+ "required": true,
+ "description": "The name of the Openshift ConfigMap that will be used to configure the application. It must be already present in Openshift."
+ },
+ {
+ "name": "BUILDER_VERSION",
+ "displayName": "Builder version",
+ "value": "2.0",
+ "description": "The version of the FIS S2I builder image to use."
+ },
+ {
+ "name": "APP_VERSION",
+ "displayName": "Application Version",
+ "value": "1.0.0.redhat-000005",
+ "description": "The application version."
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "displayName": "Maven Arguments",
+ "value": "package -DskipTests -Dfabric8.skip -e -B",
+ "description": "Arguments passed to mvn in the build."
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "displayName": "Extra Maven Arguments",
+ "description": "Extra arguments passed to mvn, e.g. for multi-module builds."
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "displayName": "Maven build directory",
+ "description": "Directory of the artifact to be built, e.g. for multi-module builds."
+ },
+ {
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "displayName": "Image Stream Namespace",
+ "value": "openshift",
+ "required": true,
+ "description": "Namespace in which the Fuse ImageStreams are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project."
+ },
+ {
+ "name": "BUILD_SECRET",
+ "displayName": "Git Build Secret",
+ "generate": "expression",
+ "description": "The secret needed to trigger a build.",
+ "from": "[a-zA-Z0-9]{40}"
+ }
+ ],
+ "objects": [
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {},
+ "status": {
+ "dockerImageRepository": ""
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ }
+ ],
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${GIT_REPO}",
+ "ref": "${GIT_REF}"
+ }
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "fis-java-openshift:${BUILDER_VERSION}"
+ },
+ "forcePull": true,
+ "incremental": true,
+ "env": [
+ {
+ "name": "BUILD_LOGLEVEL",
+ "value": "5"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "value": "${MAVEN_ARGS}"
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "value": "${MAVEN_ARGS_APPEND}"
+ }
+ ]
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ },
+ "resources": {}
+ },
+ "status": {
+ "lastVersion": 0
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "resources": {}
+ },
+ "triggers": [
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APP_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ }
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "component": "${APP_NAME}",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "${SERVICE_ACCOUNT_NAME}",
+ "volumes": [
+ {
+ "name": "camel-config",
+ "secret": {
+ "secretName": "${SECRET_NAME}"
+ }
+ }
+ ],
+ "containers": [
+ {
+ "name": "${APP_NAME}",
+ "image": "library/${APP_NAME}:latest",
+ "readinessProbe" : {
+ "httpGet" : {
+ "path" : "/health",
+ "port" : 8081
+ },
+ "initialDelaySeconds" : 10
+ },
+ "livenessProbe" : {
+ "httpGet" : {
+ "path" : "/health",
+ "port" : 8081
+ },
+ "initialDelaySeconds" : 180
+ },
+ "ports": [
+ {
+ "containerPort": 8778,
+ "name": "jolokia"
+ }
+ ],
+ "env" : [ {
+ "name" : "KUBERNETES_NAMESPACE",
+ "valueFrom" : {
+ "fieldRef" : {
+ "fieldPath" : "metadata.namespace"
+ }
+ }
+ }, {
+ "name": "SPRING_CLOUD_KUBERNETES_SECRETS_NAME",
+ "value": "${SECRET_NAME}"
+ }, {
+ "name": "SPRING_CLOUD_KUBERNETES_CONFIG_NAME",
+ "value": "${CONFIGMAP_NAME}"
+ } ],
+ "resources": {},
+ "volumeMounts": [
+ {
+ "name": "camel-config",
+ "readOnly": true,
+ "mountPath": "/etc/secrets/camel-config"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ },
+ "status": {}
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/spring-boot-camel-drools-template.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/spring-boot-camel-drools-template.json
new file mode 100644
index 000000000..e54fa0d59
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/spring-boot-camel-drools-template.json
@@ -0,0 +1,334 @@
+{
+ "apiVersion": "v1",
+ "kind": "Template",
+ "metadata": {
+ "annotations": {
+ "description": "Spring-Boot, Camel and JBoss BRMS QuickStart. This example demonstrates how you can use Apache Camel and JBoss BRMS with Spring Boot on OpenShift. DRL files contain simple rules which are used to create knowledge session via Spring configuration file. Camel routes, defined via Spring as well, are then used to e.g. pass (insert) the Body of the message as a POJO to Drools engine for execution.",
+ "tags": "quickstart,java,springboot,fis",
+ "iconClass": "icon-jboss",
+ "version": "2.0"
+ },
+ "name": "s2i-spring-boot-camel-drools"
+ },
+ "labels": {
+ "template": "s2i-spring-boot-camel-drools"
+ },
+ "parameters": [
+ {
+ "name": "APP_NAME",
+ "displayName": "Application Name",
+ "required": true,
+ "value": "s2i-spring-boot-camel-drools",
+ "description": "The name assigned to the application."
+ },
+ {
+ "name": "GIT_REPO",
+ "displayName": "Git Repository URL",
+ "required": true,
+ "value": "https://github.com/fabric8-quickstarts/spring-boot-camel-drools.git",
+ "description": "The URL of the repository with your application source code."
+ },
+ {
+ "name": "GIT_REF",
+ "displayName": "Git Reference",
+ "value": "spring-boot-camel-drools-1.0.0.redhat-000054",
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch."
+ },
+ {
+ "name": "KIESERVER_SERVICE",
+ "displayName": "Decision Server Name",
+ "required": true,
+ "value": "kie-app",
+ "description": "Set this to the name of the Decision Server. You may need to create an instance before."
+ },
+ {
+ "name": "KIESERVER_USERNAME",
+ "displayName": "Decision Server Username",
+ "required": true,
+ "value": "kieserver",
+ "description": "The username used to authenticate with the Decision Server."
+ },
+ {
+ "name": "KIESERVER_PASSWORD",
+ "displayName": "Decision Server Password",
+ "required": true,
+ "description": "The password used to authenticate with the Decision Server."
+ },
+ {
+ "name": "BUILDER_VERSION",
+ "displayName": "Builder version",
+ "value": "2.0",
+ "description": "The version of the FIS S2I builder image to use."
+ },
+ {
+ "name": "APP_VERSION",
+ "displayName": "Application Version",
+ "value": "1.0.0.redhat-000054",
+ "description": "The application version."
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "displayName": "Maven Arguments",
+ "value": "package -DskipTests -Dfabric8.skip -e -B",
+ "description": "Arguments passed to mvn in the build."
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "displayName": "Extra Maven Arguments",
+ "description": "Extra arguments passed to mvn, e.g. for multi-module builds."
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "displayName": "Maven build directory",
+ "description": "Directory of the artifact to be built, e.g. for multi-module builds."
+ },
+ {
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "displayName": "Image Stream Namespace",
+ "value": "openshift",
+ "required": true,
+ "description": "Namespace in which the Fuse ImageStreams are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project."
+ },
+ {
+ "name": "BUILD_SECRET",
+ "displayName": "Git Build Secret",
+ "generate": "expression",
+ "description": "The secret needed to trigger a build.",
+ "from": "[a-zA-Z0-9]{40}"
+ },
+ {
+ "name": "CPU_REQUEST",
+ "displayName": "CPU request",
+ "value": "0.2",
+ "required": true,
+ "description": "The amount of CPU to requests."
+ },
+ {
+ "name": "CPU_LIMIT",
+ "displayName": "CPU limit",
+ "value": "1.0",
+ "required": true,
+ "description": "The amount of CPU the container is limited to use."
+ }
+ ],
+ "objects": [
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {},
+ "status": {
+ "dockerImageRepository": ""
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ }
+ ],
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${GIT_REPO}",
+ "ref": "${GIT_REF}"
+ }
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "fis-java-openshift:${BUILDER_VERSION}"
+ },
+ "forcePull": true,
+ "incremental": true,
+ "env": [
+ {
+ "name": "BUILD_LOGLEVEL",
+ "value": "5"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "value": "${MAVEN_ARGS}"
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "value": "${MAVEN_ARGS_APPEND}"
+ }
+ ]
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ },
+ "resources": {}
+ },
+ "status": {
+ "lastVersion": 0
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "resources": {}
+ },
+ "triggers": [
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APP_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ }
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "component": "${APP_NAME}",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "${APP_NAME}",
+ "image": "library/${APP_NAME}:latest",
+ "readinessProbe" : {
+ "httpGet" : {
+ "path" : "/health",
+ "port" : 8081
+ },
+ "initialDelaySeconds" : 10
+ },
+ "livenessProbe" : {
+ "httpGet" : {
+ "path" : "/health",
+ "port" : 8081
+ },
+ "initialDelaySeconds" : 180
+ },
+ "ports": [
+ {
+ "containerPort": 8778,
+ "name": "jolokia"
+ }
+ ],
+ "env" : [ {
+ "name" : "KUBERNETES_NAMESPACE",
+ "valueFrom" : {
+ "fieldRef" : {
+ "fieldPath" : "metadata.namespace"
+ }
+ }
+ }, {
+ "name": "KIESERVER_SERVICE",
+ "value": "${KIESERVER_SERVICE}"
+ }, {
+ "name": "KIESERVER_USERNAME",
+ "value": "${KIESERVER_USERNAME}"
+ }, {
+ "name": "KIESERVER_PASSWORD",
+ "value": "${KIESERVER_PASSWORD}"
+ } ],
+ "resources": {
+ "requests": {
+ "cpu": "${CPU_REQUEST}"
+ },
+ "limits": {
+ "cpu": "${CPU_LIMIT}"
+ }
+ }
+ }
+ ]
+ }
+ }
+ },
+ "status": {}
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/spring-boot-camel-infinispan-template.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/spring-boot-camel-infinispan-template.json
new file mode 100644
index 000000000..20ba97dac
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/spring-boot-camel-infinispan-template.json
@@ -0,0 +1,315 @@
+{
+ "apiVersion": "v1",
+ "kind": "Template",
+ "metadata": {
+ "annotations": {
+ "description": "Spring Boot, Camel and JBoss Data Grid QuickStart. This quickstart demonstrates how to connect a Spring-Boot application to a JBoss Data Grid (or Infinispan) server using the Hot Rod protocol. It requires that the data grid server (or cluster) has been deployed first.",
+ "tags": "quickstart,java,springboot,fis",
+ "iconClass": "icon-jboss",
+ "version": "2.0"
+ },
+ "name": "s2i-spring-boot-camel-infinispan"
+ },
+ "labels": {
+ "template": "s2i-spring-boot-camel-infinispan"
+ },
+ "parameters": [
+ {
+ "name": "APP_NAME",
+ "displayName": "Application Name",
+ "required": true,
+ "value": "s2i-spring-boot-camel-infinispan",
+ "description": "The name assigned to the application."
+ },
+ {
+ "name": "GIT_REPO",
+ "displayName": "Git Repository URL",
+ "required": true,
+ "value": "https://github.com/fabric8-quickstarts/spring-boot-camel-infinispan.git",
+ "description": "The URL of the repository with your application source code."
+ },
+ {
+ "name": "GIT_REF",
+ "displayName": "Git Reference",
+ "value": "spring-boot-camel-infinispan-1.0.0.redhat-000024",
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch."
+ },
+ {
+ "name": "INFINISPAN_SERVICE",
+ "displayName": "JBoss Data Grid Service (Hot Rod)",
+ "required": true,
+ "value": "datagrid-app-hotrod",
+ "description": "Set this to the name of the Hot Rod service of the JBoss Data Grid. You may need to create the data grid first."
+ },
+ {
+ "name": "BUILDER_VERSION",
+ "displayName": "Builder version",
+ "value": "2.0",
+ "description": "The version of the FIS S2I builder image to use."
+ },
+ {
+ "name": "APP_VERSION",
+ "displayName": "Application Version",
+ "value": "1.0.0.redhat-000024",
+ "description": "The application version."
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "displayName": "Maven Arguments",
+ "value": "package -DskipTests -Dfabric8.skip -e -B",
+ "description": "Arguments passed to mvn in the build."
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "displayName": "Extra Maven Arguments",
+ "description": "Extra arguments passed to mvn, e.g. for multi-module builds."
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "displayName": "Maven build directory",
+ "description": "Directory of the artifact to be built, e.g. for multi-module builds."
+ },
+ {
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "displayName": "Image Stream Namespace",
+ "value": "openshift",
+ "required": true,
+ "description": "Namespace in which the Fuse ImageStreams are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project."
+ },
+ {
+ "name": "BUILD_SECRET",
+ "displayName": "Git Build Secret",
+ "generate": "expression",
+ "description": "The secret needed to trigger a build.",
+ "from": "[a-zA-Z0-9]{40}"
+ },
+ {
+ "name": "CPU_REQUEST",
+ "displayName": "CPU request",
+ "value": "0.2",
+ "required": true,
+ "description": "The amount of CPU to requests."
+ },
+ {
+ "name": "CPU_LIMIT",
+ "displayName": "CPU limit",
+ "value": "1.0",
+ "required": true,
+ "description": "The amount of CPU the container is limited to use."
+ }
+ ],
+ "objects": [
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {},
+ "status": {
+ "dockerImageRepository": ""
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ }
+ ],
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${GIT_REPO}",
+ "ref": "${GIT_REF}"
+ }
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "fis-java-openshift:${BUILDER_VERSION}"
+ },
+ "forcePull": true,
+ "incremental": true,
+ "env": [
+ {
+ "name": "BUILD_LOGLEVEL",
+ "value": "5"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "value": "${MAVEN_ARGS}"
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "value": "${MAVEN_ARGS_APPEND}"
+ }
+ ]
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ },
+ "resources": {}
+ },
+ "status": {
+ "lastVersion": 0
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "resources": {}
+ },
+ "triggers": [
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APP_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ }
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "component": "${APP_NAME}",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "${APP_NAME}",
+ "image": "library/${APP_NAME}:latest",
+ "readinessProbe" : {
+ "httpGet" : {
+ "path" : "/health",
+ "port" : 8081
+ },
+ "initialDelaySeconds" : 10
+ },
+ "livenessProbe" : {
+ "httpGet" : {
+ "path" : "/health",
+ "port" : 8081
+ },
+ "initialDelaySeconds" : 180
+ },
+ "ports": [
+ {
+ "containerPort": 8778,
+ "name": "jolokia"
+ }
+ ],
+ "env" : [ {
+ "name" : "KUBERNETES_NAMESPACE",
+ "valueFrom" : {
+ "fieldRef" : {
+ "fieldPath" : "metadata.namespace"
+ }
+ }
+ } , {
+ "name": "INFINISPAN_SERVICE",
+ "value": "${INFINISPAN_SERVICE}"
+ } ],
+ "resources": {
+ "requests": {
+ "cpu": "${CPU_REQUEST}"
+ },
+ "limits": {
+ "cpu": "${CPU_LIMIT}"
+ }
+ }
+ }
+ ]
+ }
+ }
+ },
+ "status": {}
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/spring-boot-camel-rest-sql-template.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/spring-boot-camel-rest-sql-template.json
new file mode 100644
index 000000000..555647fab
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/spring-boot-camel-rest-sql-template.json
@@ -0,0 +1,403 @@
+{
+ "apiVersion": "v1",
+ "kind": "Template",
+ "metadata": {
+ "annotations": {
+ "description": "Spring Boot, Camel REST DSL and MySQL QuickStart. This quickstart demonstrates how to connect a Spring Boot application to a MySQL database and expose a REST API with Camel on OpenShift. In this example we will use two containers, one container to run as a MySQL server, and another as a client to the database, where the Camel routes are running. This quickstart requires the MySQL server to be deployed and started first.",
+ "tags": "quickstart,java,springboot,fis",
+ "iconClass": "icon-jboss",
+ "version": "2.0"
+ },
+ "name": "s2i-spring-boot-camel-rest-sql"
+ },
+ "labels": {
+ "template": "s2i-spring-boot-camel-rest-sql"
+ },
+ "parameters": [
+ {
+ "name": "APP_NAME",
+ "displayName": "Application Name",
+ "required": true,
+ "value": "s2i-spring-boot-camel-rest-sql",
+ "description": "The name assigned to the application."
+ },
+ {
+ "name": "GIT_REPO",
+ "displayName": "Git Repository URL",
+ "required": true,
+ "value": "https://github.com/fabric8-quickstarts/spring-boot-camel-rest-sql.git",
+ "description": "The URL of the repository with your application source code."
+ },
+ {
+ "name": "GIT_REF",
+ "displayName": "Git Reference",
+ "value": "spring-boot-camel-rest-sql-1.0.0.redhat-000055",
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch."
+ },
+ {
+ "name": "SERVICE_NAME",
+ "displayName": "Service Name",
+ "value": "camel-rest-sql",
+ "description": "Exposed service name."
+ },
+ {
+ "name": "MYSQL_SERVICE_NAME",
+ "displayName": "MySQL Server Service",
+ "required": true,
+ "value": "mysql",
+ "description": "Set this to the name of the TCP service of the MySQL server. You may need to create a server first."
+ },
+ {
+ "name": "MYSQL_SERVICE_DATABASE",
+ "displayName": "MySQL Server Database",
+ "value": "sampledb",
+ "description": "The database hosted by the MySQL server to be used by the application."
+ },
+ {
+ "name": "MYSQL_SERVICE_USERNAME",
+ "displayName": "MySQL Server Username",
+ "description": "The username used to authenticate with the MySQL server. Leave it empty if authentication is disabled."
+ },
+ {
+ "name": "MYSQL_SERVICE_PASSWORD",
+ "displayName": "MySQL Server Password",
+ "description": "The password used to authenticate with the MySQL server. Leave it empty if authentication is disabled."
+ },
+ {
+ "name": "BUILDER_VERSION",
+ "displayName": "Builder version",
+ "value": "2.0",
+ "description": "The version of the FIS S2I builder image to use."
+ },
+ {
+ "name": "APP_VERSION",
+ "displayName": "Application Version",
+ "value": "1.0.0.redhat-000055",
+ "description": "The application version."
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "displayName": "Maven Arguments",
+ "value": "package -DskipTests -Dfabric8.skip -e -B",
+ "description": "Arguments passed to mvn in the build."
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "displayName": "Extra Maven Arguments",
+ "description": "Extra arguments passed to mvn, e.g. for multi-module builds."
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "displayName": "Maven build directory",
+ "description": "Directory of the artifact to be built, e.g. for multi-module builds."
+ },
+ {
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "displayName": "Image Stream Namespace",
+ "value": "openshift",
+ "required": true,
+ "description": "Namespace in which the Fuse ImageStreams are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project."
+ },
+ {
+ "name": "BUILD_SECRET",
+ "displayName": "Git Build Secret",
+ "generate": "expression",
+ "description": "The secret needed to trigger a build.",
+ "from": "[a-zA-Z0-9]{40}"
+ },
+ {
+ "name": "CPU_REQUEST",
+ "displayName": "CPU request",
+ "value": "0.2",
+ "required": true,
+ "description": "The amount of CPU to requests."
+ },
+ {
+ "name": "CPU_LIMIT",
+ "displayName": "CPU limit",
+ "value": "1.0",
+ "required": true,
+ "description": "The amount of CPU the container is limited to use."
+ }
+ ],
+ "objects": [
+ {
+ "apiVersion": "v1",
+ "kind": "Route",
+ "metadata": {
+ "labels": {
+ "component": "${APP_NAME}",
+ "provider": "s2i",
+ "project": "${APP_NAME}",
+ "version": "${APP_VERSION}",
+ "group": "quickstarts"
+ },
+ "name": "${SERVICE_NAME}-route"
+ },
+ "spec": {
+ "to": {
+ "kind": "Service",
+ "name": "${SERVICE_NAME}"
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "Service",
+ "metadata": {
+ "annotations": {
+ },
+ "labels": {
+ "component": "${APP_NAME}",
+ "provider": "s2i",
+ "project": "${APP_NAME}",
+ "version": "${APP_VERSION}",
+ "group": "quickstarts"
+ },
+ "name": "${SERVICE_NAME}"
+ },
+ "spec": {
+ "clusterIP": "None",
+ "deprecatedPublicIPs": [],
+ "ports": [
+ {
+ "port": 9411,
+ "protocol": "TCP",
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "project": "${APP_NAME}",
+ "component": "${APP_NAME}",
+ "provider": "s2i",
+ "group": "quickstarts"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {},
+ "status": {
+ "dockerImageRepository": ""
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ }
+ ],
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${GIT_REPO}",
+ "ref": "${GIT_REF}"
+ }
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "fis-java-openshift:${BUILDER_VERSION}"
+ },
+ "forcePull": true,
+ "incremental": true,
+ "env": [
+ {
+ "name": "BUILD_LOGLEVEL",
+ "value": "5"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "value": "${MAVEN_ARGS}"
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "value": "${MAVEN_ARGS_APPEND}"
+ }
+ ]
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ },
+ "resources": {}
+ },
+ "status": {
+ "lastVersion": 0
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "resources": {}
+ },
+ "triggers": [
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APP_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ }
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "component": "${APP_NAME}",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "${APP_NAME}",
+ "image": "library/${APP_NAME}:latest",
+ "readinessProbe" : {
+ "httpGet" : {
+ "path" : "/health",
+ "port" : 8081
+ },
+ "initialDelaySeconds" : 10
+ },
+ "livenessProbe" : {
+ "httpGet" : {
+ "path" : "/health",
+ "port" : 8081
+ },
+ "initialDelaySeconds" : 180
+ },
+ "ports": [
+ {
+ "containerPort": 8080,
+ "name": "http"
+ },
+ {
+ "containerPort": 8778,
+ "name": "jolokia"
+ }
+ ],
+ "env" : [ {
+ "name" : "KUBERNETES_NAMESPACE",
+ "valueFrom" : {
+ "fieldRef" : {
+ "fieldPath" : "metadata.namespace"
+ }
+ }
+ } , {
+ "name": "MYSQL_SERVICE_NAME",
+ "value": "${MYSQL_SERVICE_NAME}"
+ }, {
+ "name": "MYSQL_SERVICE_DATABASE",
+ "value": "${MYSQL_SERVICE_DATABASE}"
+ }, {
+ "name": "MYSQL_SERVICE_USERNAME",
+ "value": "${MYSQL_SERVICE_USERNAME}"
+ }, {
+ "name": "MYSQL_SERVICE_PASSWORD",
+ "value": "${MYSQL_SERVICE_PASSWORD}"
+ } ],
+ "resources": {
+ "requests": {
+ "cpu": "${CPU_REQUEST}"
+ },
+ "limits": {
+ "cpu": "${CPU_LIMIT}"
+ }
+ }
+ }
+ ]
+ }
+ }
+ },
+ "status": {}
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/spring-boot-camel-teiid-template.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/spring-boot-camel-teiid-template.json
new file mode 100644
index 000000000..cf9a4e903
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/spring-boot-camel-teiid-template.json
@@ -0,0 +1,343 @@
+{
+ "apiVersion": "v1",
+ "kind": "Template",
+ "metadata": {
+ "annotations": {
+ "description": "Spring-Boot, Camel and JBoss Data Virtualization QuickStart. This example demonstrates how to connect Apache Camel to a remote JBoss Data Virtualization (or Teiid) Server using the JDBC protocol.",
+ "tags": "quickstart,java,springboot,fis",
+ "iconClass": "icon-jboss",
+ "version": "2.0"
+ },
+ "name": "s2i-spring-boot-camel-teiid"
+ },
+ "labels": {
+ "template": "s2i-spring-boot-camel-teiid"
+ },
+ "parameters": [
+ {
+ "name": "APP_NAME",
+ "displayName": "Application Name",
+ "required": true,
+ "value": "s2i-spring-boot-camel-teiid",
+ "description": "The name assigned to the application."
+ },
+ {
+ "name": "GIT_REPO",
+ "displayName": "Git Repository URL",
+ "required": true,
+ "value": "https://github.com/fabric8-quickstarts/spring-boot-camel-teiid.git",
+ "description": "The URL of the repository with your application source code."
+ },
+ {
+ "name": "GIT_REF",
+ "displayName": "Git Reference",
+ "value": "spring-boot-camel-teiid-1.0.0.redhat-000053",
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch."
+ },
+ {
+ "name": "TEIID_SERVICE_NAME",
+ "displayName": "JDV Server Name",
+ "required": true,
+ "value": "datavirt-app",
+ "description": "Set this to the name of the JDV Server. You may need to create an instance before."
+ },
+ {
+ "name": "TEIID_PORT_NAME",
+ "displayName": "JDV Port Name",
+ "value": "jdbc",
+ "description": "Set this to the name of the JDV port to use. Set this value if the JDV service contains multiple named ports."
+ },
+ {
+ "name": "TEIID_USERNAME",
+ "displayName": "JDV Server Username",
+ "required": true,
+ "description": "The username used to authenticate with the JDV Server."
+ },
+ {
+ "name": "TEIID_PASSWORD",
+ "displayName": "JDV Server Password",
+ "required": true,
+ "description": "The password used to authenticate with the JDV Server."
+ },
+ {
+ "name": "BUILDER_VERSION",
+ "displayName": "Builder version",
+ "value": "2.0",
+ "description": "The version of the FIS S2I builder image to use."
+ },
+ {
+ "name": "APP_VERSION",
+ "displayName": "Application Version",
+ "value": "1.0.0.redhat-000053",
+ "description": "The application version."
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "displayName": "Maven Arguments",
+ "value": "package -DskipTests -Dfabric8.skip -e -B",
+ "description": "Arguments passed to mvn in the build."
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "displayName": "Extra Maven Arguments",
+ "description": "Extra arguments passed to mvn, e.g. for multi-module builds."
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "displayName": "Maven build directory",
+ "description": "Directory of the artifact to be built, e.g. for multi-module builds."
+ },
+ {
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "displayName": "Image Stream Namespace",
+ "value": "openshift",
+ "required": true,
+ "description": "Namespace in which the Fuse ImageStreams are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project."
+ },
+ {
+ "name": "BUILD_SECRET",
+ "displayName": "Git Build Secret",
+ "generate": "expression",
+ "description": "The secret needed to trigger a build.",
+ "from": "[a-zA-Z0-9]{40}"
+ },
+ {
+ "name": "CPU_REQUEST",
+ "displayName": "CPU request",
+ "value": "0.2",
+ "required": true,
+ "description": "The amount of CPU to requests."
+ },
+ {
+ "name": "CPU_LIMIT",
+ "displayName": "CPU limit",
+ "value": "1.0",
+ "required": true,
+ "description": "The amount of CPU the container is limited to use."
+ }
+ ],
+ "objects": [
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {},
+ "status": {
+ "dockerImageRepository": ""
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ }
+ ],
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${GIT_REPO}",
+ "ref": "${GIT_REF}"
+ }
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "fis-java-openshift:${BUILDER_VERSION}"
+ },
+ "forcePull": true,
+ "incremental": true,
+ "env": [
+ {
+ "name": "BUILD_LOGLEVEL",
+ "value": "5"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "value": "${MAVEN_ARGS}"
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "value": "${MAVEN_ARGS_APPEND}"
+ }
+ ]
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ },
+ "resources": {}
+ },
+ "status": {
+ "lastVersion": 0
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "resources": {}
+ },
+ "triggers": [
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APP_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ }
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "component": "${APP_NAME}",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "${APP_NAME}",
+ "image": "library/${APP_NAME}:latest",
+ "readinessProbe" : {
+ "httpGet" : {
+ "path" : "/health",
+ "port" : 8081
+ },
+ "initialDelaySeconds" : 10
+ },
+ "livenessProbe" : {
+ "httpGet" : {
+ "path" : "/health",
+ "port" : 8081
+ },
+ "initialDelaySeconds" : 180
+ },
+ "ports": [
+ {
+ "containerPort": 8778,
+ "name": "jolokia"
+ }
+ ],
+ "env" : [
+ {
+ "name" : "KUBERNETES_NAMESPACE",
+ "valueFrom" : {
+ "fieldRef" : {
+ "fieldPath" : "metadata.namespace"
+ }
+ }
+ }, {
+ "name": "TEIID_SERVICE_NAME",
+ "value": "${TEIID_SERVICE_NAME}"
+ }, {
+ "name": "TEIID_PORT_NAME",
+ "value": "${TEIID_PORT_NAME}"
+ }, {
+ "name": "TEIID_USERNAME",
+ "value": "${TEIID_USERNAME}"
+ }, {
+ "name": "TEIID_PASSWORD",
+ "value": "${TEIID_PASSWORD}"
+ }],
+ "resources": {
+ "requests": {
+ "cpu": "${CPU_REQUEST}"
+ },
+ "limits": {
+ "cpu": "${CPU_LIMIT}"
+ }
+ }
+ }
+ ]
+ }
+ }
+ },
+ "status": {}
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/spring-boot-camel-template.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/spring-boot-camel-template.json
new file mode 100644
index 000000000..c78a96f7c
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/spring-boot-camel-template.json
@@ -0,0 +1,305 @@
+{
+ "apiVersion": "v1",
+ "kind": "Template",
+ "metadata": {
+ "annotations": {
+ "description": "Spring-Boot and Camel QuickStart. This example demonstrates how you can use Apache Camel with Spring Boot on Openshift. The quickstart uses Spring Boot to configure a little application that includes a Camel route that triggeres a message every 5th second, and routes the message to a log.",
+ "tags": "quickstart,java,springboot,fis",
+ "iconClass": "icon-jboss",
+ "version": "2.0"
+ },
+ "name": "s2i-spring-boot-camel"
+ },
+ "labels": {
+ "template": "s2i-spring-boot-camel"
+ },
+ "parameters": [
+ {
+ "name": "APP_NAME",
+ "displayName": "Application Name",
+ "required": true,
+ "value": "s2i-spring-boot-camel",
+ "description": "The name assigned to the application."
+ },
+ {
+ "name": "GIT_REPO",
+ "displayName": "Git Repository URL",
+ "required": true,
+ "value": "https://github.com/fabric8-quickstarts/spring-boot-camel.git",
+ "description": "The URL of the repository with your application source code."
+ },
+ {
+ "name": "GIT_REF",
+ "displayName": "Git Reference",
+ "value": "spring-boot-camel-1.0.0.redhat-000055",
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch."
+ },
+ {
+ "name": "BUILDER_VERSION",
+ "displayName": "Builder version",
+ "value": "2.0",
+ "description": "The version of the FIS S2I builder image to use."
+ },
+ {
+ "name": "APP_VERSION",
+ "displayName": "Application Version",
+ "value": "1.0.0.redhat-000055",
+ "description": "The application version."
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "displayName": "Maven Arguments",
+ "value": "package -DskipTests -Dfabric8.skip -e -B",
+ "description": "Arguments passed to mvn in the build."
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "displayName": "Extra Maven Arguments",
+ "description": "Extra arguments passed to mvn, e.g. for multi-module builds."
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "displayName": "Maven build directory",
+ "description": "Directory of the artifact to be built, e.g. for multi-module builds."
+ },
+ {
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "displayName": "Image Stream Namespace",
+ "value": "openshift",
+ "required": true,
+ "description": "Namespace in which the Fuse ImageStreams are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project."
+ },
+ {
+ "name": "BUILD_SECRET",
+ "displayName": "Git Build Secret",
+ "generate": "expression",
+ "description": "The secret needed to trigger a build.",
+ "from": "[a-zA-Z0-9]{40}"
+ },
+ {
+ "name": "CPU_REQUEST",
+ "displayName": "CPU request",
+ "value": "0.2",
+ "required": true,
+ "description": "The amount of CPU to requests."
+ },
+ {
+ "name": "CPU_LIMIT",
+ "displayName": "CPU limit",
+ "value": "1.0",
+ "required": true,
+ "description": "The amount of CPU the container is limited to use."
+ }
+ ],
+ "objects": [
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {},
+ "status": {
+ "dockerImageRepository": ""
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ }
+ ],
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${GIT_REPO}",
+ "ref": "${GIT_REF}"
+ }
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "fis-java-openshift:${BUILDER_VERSION}"
+ },
+ "forcePull": true,
+ "incremental": true,
+ "env": [
+ {
+ "name": "BUILD_LOGLEVEL",
+ "value": "5"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "value": "${MAVEN_ARGS}"
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "value": "${MAVEN_ARGS_APPEND}"
+ }
+ ]
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ },
+ "resources": {}
+ },
+ "status": {
+ "lastVersion": 0
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "resources": {}
+ },
+ "triggers": [
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APP_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ }
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "component": "${APP_NAME}",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "${APP_NAME}",
+ "image": "library/${APP_NAME}:latest",
+ "readinessProbe" : {
+ "httpGet" : {
+ "path" : "/health",
+ "port" : 8081
+ },
+ "initialDelaySeconds" : 10
+ },
+ "livenessProbe" : {
+ "httpGet" : {
+ "path" : "/health",
+ "port" : 8081
+ },
+ "initialDelaySeconds" : 180
+ },
+ "ports": [
+ {
+ "containerPort": 8778,
+ "name": "jolokia"
+ }
+ ],
+ "env" : [ {
+ "name" : "KUBERNETES_NAMESPACE",
+ "valueFrom" : {
+ "fieldRef" : {
+ "fieldPath" : "metadata.namespace"
+ }
+ }
+ } ],
+ "resources": {
+ "requests": {
+ "cpu": "${CPU_REQUEST}"
+ },
+ "limits": {
+ "cpu": "${CPU_LIMIT}"
+ }
+ }
+ }
+ ]
+ }
+ }
+ },
+ "status": {}
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/spring-boot-camel-xml-template.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/spring-boot-camel-xml-template.json
new file mode 100644
index 000000000..620425902
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/spring-boot-camel-xml-template.json
@@ -0,0 +1,305 @@
+{
+ "apiVersion": "v1",
+ "kind": "Template",
+ "metadata": {
+ "annotations": {
+ "description": "Spring-Boot and Camel Xml QuickStart. This example demonstrates how you can use Apache Camel with Spring Boot on Openshift. The quickstart uses Spring Boot to configure a little application that includes a Camel route (in Spring xml) that triggeres a message every 5th second, and routes the message to a log.",
+ "tags": "quickstart,java,springboot,fis",
+ "iconClass": "icon-jboss",
+ "version": "2.0"
+ },
+ "name": "s2i-spring-boot-camel-xml"
+ },
+ "labels": {
+ "template": "s2i-spring-boot-camel-xml"
+ },
+ "parameters": [
+ {
+ "name": "APP_NAME",
+ "displayName": "Application Name",
+ "required": true,
+ "value": "s2i-spring-boot-camel-xml",
+ "description": "The name assigned to the application."
+ },
+ {
+ "name": "GIT_REPO",
+ "displayName": "Git Repository URL",
+ "required": true,
+ "value": "https://github.com/fabric8-quickstarts/spring-boot-camel-xml.git",
+ "description": "The URL of the repository with your application source code."
+ },
+ {
+ "name": "GIT_REF",
+ "displayName": "Git Reference",
+ "value": "spring-boot-camel-xml-1.0.0.redhat-000055",
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch."
+ },
+ {
+ "name": "BUILDER_VERSION",
+ "displayName": "Builder version",
+ "value": "2.0",
+ "description": "The version of the FIS S2I builder image to use."
+ },
+ {
+ "name": "APP_VERSION",
+ "displayName": "Application Version",
+ "value": "1.0.0.redhat-000055",
+ "description": "The application version."
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "displayName": "Maven Arguments",
+ "value": "package -DskipTests -Dfabric8.skip -e -B",
+ "description": "Arguments passed to mvn in the build."
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "displayName": "Extra Maven Arguments",
+ "description": "Extra arguments passed to mvn, e.g. for multi-module builds."
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "displayName": "Maven build directory",
+ "description": "Directory of the artifact to be built, e.g. for multi-module builds."
+ },
+ {
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "displayName": "Image Stream Namespace",
+ "value": "openshift",
+ "required": true,
+ "description": "Namespace in which the Fuse ImageStreams are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project."
+ },
+ {
+ "name": "BUILD_SECRET",
+ "displayName": "Git Build Secret",
+ "generate": "expression",
+ "description": "The secret needed to trigger a build.",
+ "from": "[a-zA-Z0-9]{40}"
+ },
+ {
+ "name": "CPU_REQUEST",
+ "displayName": "CPU request",
+ "value": "0.2",
+ "required": true,
+ "description": "The amount of CPU to requests."
+ },
+ {
+ "name": "CPU_LIMIT",
+ "displayName": "CPU limit",
+ "value": "1.0",
+ "required": true,
+ "description": "The amount of CPU the container is limited to use."
+ }
+ ],
+ "objects": [
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {},
+ "status": {
+ "dockerImageRepository": ""
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ }
+ ],
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${GIT_REPO}",
+ "ref": "${GIT_REF}"
+ }
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "fis-java-openshift:${BUILDER_VERSION}"
+ },
+ "forcePull": true,
+ "incremental": true,
+ "env": [
+ {
+ "name": "BUILD_LOGLEVEL",
+ "value": "5"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "value": "${MAVEN_ARGS}"
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "value": "${MAVEN_ARGS_APPEND}"
+ }
+ ]
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ },
+ "resources": {}
+ },
+ "status": {
+ "lastVersion": 0
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "resources": {}
+ },
+ "triggers": [
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APP_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ }
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "component": "${APP_NAME}",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "${APP_NAME}",
+ "image": "library/${APP_NAME}:latest",
+ "readinessProbe" : {
+ "httpGet" : {
+ "path" : "/health",
+ "port" : 8081
+ },
+ "initialDelaySeconds" : 10
+ },
+ "livenessProbe" : {
+ "httpGet" : {
+ "path" : "/health",
+ "port" : 8081
+ },
+ "initialDelaySeconds" : 180
+ },
+ "ports": [
+ {
+ "containerPort": 8778,
+ "name": "jolokia"
+ }
+ ],
+ "env" : [ {
+ "name" : "KUBERNETES_NAMESPACE",
+ "valueFrom" : {
+ "fieldRef" : {
+ "fieldPath" : "metadata.namespace"
+ }
+ }
+ } ],
+ "resources": {
+ "requests": {
+ "cpu": "${CPU_REQUEST}"
+ },
+ "limits": {
+ "cpu": "${CPU_LIMIT}"
+ }
+ }
+ }
+ ]
+ }
+ }
+ },
+ "status": {}
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/spring-boot-cxf-jaxrs-template.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/spring-boot-cxf-jaxrs-template.json
new file mode 100644
index 000000000..15cfc93fd
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/spring-boot-cxf-jaxrs-template.json
@@ -0,0 +1,305 @@
+{
+ "apiVersion": "v1",
+ "kind": "Template",
+ "metadata": {
+ "annotations": {
+ "description": "Spring-Boot and CXF JAXRS QuickStart. This example demonstrates how you can use Apache CXF JAXRS with Spring Boot on Openshift. The quickstart uses Spring Boot to configure a little application that includes a CXF JAXRS endpoint with Swagger enabled.",
+ "tags": "quickstart,java,springboot,fis",
+ "iconClass": "icon-jboss",
+ "version": "2.0"
+ },
+ "name": "s2i-spring-boot-cxf-jaxrs"
+ },
+ "labels": {
+ "template": "s2i-spring-boot-cxf-jaxrs"
+ },
+ "parameters": [
+ {
+ "name": "APP_NAME",
+ "displayName": "Application Name",
+ "required": true,
+ "value": "s2i-spring-boot-cxf-jaxrs",
+ "description": "The name assigned to the application."
+ },
+ {
+ "name": "GIT_REPO",
+ "displayName": "Git Repository URL",
+ "required": true,
+ "value": "https://github.com/fabric8-quickstarts/spring-boot-cxf-jaxrs.git",
+ "description": "The URL of the repository with your application source code."
+ },
+ {
+ "name": "GIT_REF",
+ "displayName": "Git Reference",
+ "value": "spring-boot-cxf-jaxrs-1.0.0.redhat-000005",
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch."
+ },
+ {
+ "name": "BUILDER_VERSION",
+ "displayName": "Builder version",
+ "value": "2.0",
+ "description": "The version of the FIS S2I builder image to use."
+ },
+ {
+ "name": "APP_VERSION",
+ "displayName": "Application Version",
+ "value": "1.0.0.redhat-000005",
+ "description": "The application version."
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "displayName": "Maven Arguments",
+ "value": "package -DskipTests -Dfabric8.skip -e -B",
+ "description": "Arguments passed to mvn in the build."
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "displayName": "Extra Maven Arguments",
+ "description": "Extra arguments passed to mvn, e.g. for multi-module builds."
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "displayName": "Maven build directory",
+ "description": "Directory of the artifact to be built, e.g. for multi-module builds."
+ },
+ {
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "displayName": "Image Stream Namespace",
+ "value": "openshift",
+ "required": true,
+ "description": "Namespace in which the Fuse ImageStreams are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project."
+ },
+ {
+ "name": "BUILD_SECRET",
+ "displayName": "Git Build Secret",
+ "generate": "expression",
+ "description": "The secret needed to trigger a build.",
+ "from": "[a-zA-Z0-9]{40}"
+ },
+ {
+ "name": "CPU_REQUEST",
+ "displayName": "CPU request",
+ "value": "0.2",
+ "required": true,
+ "description": "The amount of CPU to requests."
+ },
+ {
+ "name": "CPU_LIMIT",
+ "displayName": "CPU limit",
+ "value": "1.0",
+ "required": true,
+ "description": "The amount of CPU the container is limited to use."
+ }
+ ],
+ "objects": [
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {},
+ "status": {
+ "dockerImageRepository": ""
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ }
+ ],
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${GIT_REPO}",
+ "ref": "${GIT_REF}"
+ }
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "fis-java-openshift:${BUILDER_VERSION}"
+ },
+ "forcePull": true,
+ "incremental": true,
+ "env": [
+ {
+ "name": "BUILD_LOGLEVEL",
+ "value": "5"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "value": "${MAVEN_ARGS}"
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "value": "${MAVEN_ARGS_APPEND}"
+ }
+ ]
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ },
+ "resources": {}
+ },
+ "status": {
+ "lastVersion": 0
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "resources": {}
+ },
+ "triggers": [
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APP_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ }
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "component": "${APP_NAME}",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "${APP_NAME}",
+ "image": "library/${APP_NAME}:latest",
+ "readinessProbe" : {
+ "httpGet" : {
+ "path" : "/health",
+ "port" : 8081
+ },
+ "initialDelaySeconds" : 10
+ },
+ "livenessProbe" : {
+ "httpGet" : {
+ "path" : "/health",
+ "port" : 8081
+ },
+ "initialDelaySeconds" : 180
+ },
+ "ports": [
+ {
+ "containerPort": 8778,
+ "name": "jolokia"
+ }
+ ],
+ "env" : [ {
+ "name" : "KUBERNETES_NAMESPACE",
+ "valueFrom" : {
+ "fieldRef" : {
+ "fieldPath" : "metadata.namespace"
+ }
+ }
+ } ],
+ "resources": {
+ "requests": {
+ "cpu": "${CPU_REQUEST}"
+ },
+ "limits": {
+ "cpu": "${CPU_LIMIT}"
+ }
+ }
+ }
+ ]
+ }
+ }
+ },
+ "status": {}
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/spring-boot-cxf-jaxws-template.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/spring-boot-cxf-jaxws-template.json
new file mode 100644
index 000000000..c70ee7726
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/spring-boot-cxf-jaxws-template.json
@@ -0,0 +1,305 @@
+{
+ "apiVersion": "v1",
+ "kind": "Template",
+ "metadata": {
+ "annotations": {
+ "description": "Spring-Boot and CXF JAXWS QuickStart. This example demonstrates how you can use Apache CXF JAXWS with Spring Boot on Openshift. The quickstart uses Spring Boot to configure a little application that includes a CXF JAXWS endpoint.",
+ "tags": "quickstart,java,springboot,fis",
+ "iconClass": "icon-jboss",
+ "version": "2.0"
+ },
+ "name": "s2i-spring-boot-cxf-jaxws"
+ },
+ "labels": {
+ "template": "s2i-spring-boot-cxf-jaxws"
+ },
+ "parameters": [
+ {
+ "name": "APP_NAME",
+ "displayName": "Application Name",
+ "required": true,
+ "value": "s2i-spring-boot-cxf-jaxws",
+ "description": "The name assigned to the application."
+ },
+ {
+ "name": "GIT_REPO",
+ "displayName": "Git Repository URL",
+ "required": true,
+ "value": "https://github.com/fabric8-quickstarts/spring-boot-cxf-jaxws.git",
+ "description": "The URL of the repository with your application source code."
+ },
+ {
+ "name": "GIT_REF",
+ "displayName": "Git Reference",
+ "value": "spring-boot-cxf-jaxws-1.0.0.redhat-000005",
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch."
+ },
+ {
+ "name": "BUILDER_VERSION",
+ "displayName": "Builder version",
+ "value": "2.0",
+ "description": "The version of the FIS S2I builder image to use."
+ },
+ {
+ "name": "APP_VERSION",
+ "displayName": "Application Version",
+ "value": "1.0.0.redhat-000005",
+ "description": "The application version."
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "displayName": "Maven Arguments",
+ "value": "package -DskipTests -Dfabric8.skip -e -B",
+ "description": "Arguments passed to mvn in the build."
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "displayName": "Extra Maven Arguments",
+ "description": "Extra arguments passed to mvn, e.g. for multi-module builds."
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "displayName": "Maven build directory",
+ "description": "Directory of the artifact to be built, e.g. for multi-module builds."
+ },
+ {
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "displayName": "Image Stream Namespace",
+ "value": "openshift",
+ "required": true,
+ "description": "Namespace in which the Fuse ImageStreams are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project."
+ },
+ {
+ "name": "BUILD_SECRET",
+ "displayName": "Git Build Secret",
+ "generate": "expression",
+ "description": "The secret needed to trigger a build.",
+ "from": "[a-zA-Z0-9]{40}"
+ },
+ {
+ "name": "CPU_REQUEST",
+ "displayName": "CPU request",
+ "value": "0.2",
+ "required": true,
+ "description": "The amount of CPU to requests."
+ },
+ {
+ "name": "CPU_LIMIT",
+ "displayName": "CPU limit",
+ "value": "1.0",
+ "required": true,
+ "description": "The amount of CPU the container is limited to use."
+ }
+ ],
+ "objects": [
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {},
+ "status": {
+ "dockerImageRepository": ""
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ }
+ ],
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${GIT_REPO}",
+ "ref": "${GIT_REF}"
+ }
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "fis-java-openshift:${BUILDER_VERSION}"
+ },
+ "forcePull": true,
+ "incremental": true,
+ "env": [
+ {
+ "name": "BUILD_LOGLEVEL",
+ "value": "5"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "value": "${MAVEN_ARGS}"
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "value": "${MAVEN_ARGS_APPEND}"
+ }
+ ]
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ },
+ "resources": {}
+ },
+ "status": {
+ "lastVersion": 0
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "resources": {}
+ },
+ "triggers": [
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APP_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ }
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "component": "${APP_NAME}",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "${APP_NAME}",
+ "image": "library/${APP_NAME}:latest",
+ "readinessProbe" : {
+ "httpGet" : {
+ "path" : "/health",
+ "port" : 8081
+ },
+ "initialDelaySeconds" : 10
+ },
+ "livenessProbe" : {
+ "httpGet" : {
+ "path" : "/health",
+ "port" : 8081
+ },
+ "initialDelaySeconds" : 180
+ },
+ "ports": [
+ {
+ "containerPort": 8778,
+ "name": "jolokia"
+ }
+ ],
+ "env" : [ {
+ "name" : "KUBERNETES_NAMESPACE",
+ "valueFrom" : {
+ "fieldRef" : {
+ "fieldPath" : "metadata.namespace"
+ }
+ }
+ } ],
+ "resources": {
+ "requests": {
+ "cpu": "${CPU_REQUEST}"
+ },
+ "limits": {
+ "cpu": "${CPU_LIMIT}"
+ }
+ }
+ }
+ ]
+ }
+ }
+ },
+ "status": {}
+ }
+ ]
+}
diff --git a/roles/openshift_excluder/README.md b/roles/openshift_excluder/README.md
index 6c90b4e96..e76a15952 100644
--- a/roles/openshift_excluder/README.md
+++ b/roles/openshift_excluder/README.md
@@ -15,8 +15,11 @@ Facts
| Name | Default Value | Description |
-----------------------------|---------------|----------------------------------------|
-| docker_excluder_enabled | none | Records the status of docker excluder |
-| openshift_excluder_enabled | none | Records the status of the openshift excluder |
+| enable_docker_excluder | enable_excluders | Enable docker excluder. If not set, the docker excluder is ignored. |
+| enable_openshift_excluder | enable_excluders | Enable openshift excluder. If not set, the openshift excluder is ignored. |
+| enable_excluders | None | Enable all excluders
+| enable_docker_excluder_override | None | indication the docker excluder needs to be enabled |
+| disable_openshift_excluder_override | None | indication the openshift excluder needs to be disabled |
Role Variables
--------------
@@ -25,6 +28,16 @@ None
Dependencies
------------
+Tasks to include
+----------------
+
+- exclude: enable excluders (assuming excluders are installed)
+- unexclude: disable excluders (assuming excluders are installed)
+- install: install excluders (installation is followed by excluder enabling)
+- enable: enable excluders (optionally with installation step)
+- disabled: disable excluders (optionally with installation and status step, the status check that can override which excluder gets enabled/disabled)
+- status: determine status of excluders
+
Example Playbook
----------------
diff --git a/roles/openshift_excluder/defaults/main.yml b/roles/openshift_excluder/defaults/main.yml
new file mode 100644
index 000000000..7c3ae2a86
--- /dev/null
+++ b/roles/openshift_excluder/defaults/main.yml
@@ -0,0 +1,6 @@
+---
+# keep the 'current' package or update to 'latest' if available?
+openshift_excluder_package_state: present
+docker_excluder_package_state: present
+
+enable_excluders: true
diff --git a/roles/openshift_excluder/meta/main.yml b/roles/openshift_excluder/meta/main.yml
index 8bca38e77..4d1c1efca 100644
--- a/roles/openshift_excluder/meta/main.yml
+++ b/roles/openshift_excluder/meta/main.yml
@@ -13,3 +13,4 @@ galaxy_info:
- cloud
dependencies:
- { role: openshift_facts }
+- { role: openshift_repos }
diff --git a/roles/openshift_excluder/tasks/adjust.yml b/roles/openshift_excluder/tasks/adjust.yml
new file mode 100644
index 000000000..cbdd7785b
--- /dev/null
+++ b/roles/openshift_excluder/tasks/adjust.yml
@@ -0,0 +1,25 @@
+---
+# Depending on enablement of individual excluders and their status
+# some excluders needs to be disabled, resp. enabled
+# By default, all excluders are disabled unless overrided.
+- block:
+ - include: init.yml
+ # All excluders that are to be enabled are enabled
+ - include: exclude.yml
+ vars:
+ # Enable the docker excluder only if it is overrided
+ # BZ #1430612: docker excluders should be enabled even during installation and upgrade
+ exclude_docker_excluder: "{{ enable_docker_excluder | default(true) | bool }}"
+ # excluder is to be disabled by default
+ exclude_openshift_excluder: false
+ # All excluders that are to be disabled are disabled
+ - include: unexclude.yml
+ vars:
+ # If the docker override is not set, default to the generic behaviour
+ # BZ #1430612: docker excluders should be enabled even during installation and upgrade
+ unexclude_docker_excluder: false
+ # disable openshift excluder is never overrided to be enabled
+ # disable it if the docker excluder is enabled
+ unexclude_openshift_excluder: "{{ openshift_excluder_on | bool }}"
+ when:
+ - not openshift.common.is_atomic | bool
diff --git a/roles/openshift_excluder/tasks/disable.yml b/roles/openshift_excluder/tasks/disable.yml
new file mode 100644
index 000000000..2245c7b21
--- /dev/null
+++ b/roles/openshift_excluder/tasks/disable.yml
@@ -0,0 +1,22 @@
+---
+# input variables
+# - with_status_check
+# - with_install
+# - excluder_package_state
+# - docker_excluder_package_state
+- include: init.yml
+
+# Install any excluder that is enabled
+- include: install.yml
+ vars:
+ # Both docker_excluder_on and openshift_excluder_on are set in openshift_excluder->init task
+ install_docker_excluder: "{{ docker_excluder_on | bool }}"
+ install_openshift_excluder: "{{ openshift_excluder_on | bool }}"
+ when: docker_excluder_on or openshift_excluder_on
+
+ # if the docker excluder is not enabled, we don't care about its status
+ # it the docker excluder is enabled, we install it and in case its status is non-zero
+ # it is enabled no matter what
+
+ # And finally adjust an excluder in order to update host components correctly
+- include: adjust.yml
diff --git a/roles/openshift_excluder/tasks/enable.yml b/roles/openshift_excluder/tasks/enable.yml
new file mode 100644
index 000000000..9122c9aeb
--- /dev/null
+++ b/roles/openshift_excluder/tasks/enable.yml
@@ -0,0 +1,21 @@
+---
+# input variables:
+# - with_install
+- block:
+ - include: init.yml
+
+ - include: install.yml
+ vars:
+ install_docker_excluder: "{{ docker_excluder_on | bool }}"
+ install_openshift_excluder: "{{ openshift_excluder_on | bool }}"
+ when: with_install | default(docker_excluder_on or openshift_excluder_on) | bool
+
+ - include: exclude.yml
+ vars:
+ # Enable the docker excluder only if it is overrided, resp. enabled by default (in that order)
+ exclude_docker_excluder: "{{ enable_docker_excluder_override | default(docker_excluder_on) | bool }}"
+ # Enable the openshift excluder only if it is not overrided, resp. enabled by default (in that order)
+ exclude_openshift_excluder: "{{ not disable_openshift_excluder_override | default(not openshift_excluder_on) | bool }}"
+
+ when:
+ - not openshift.common.is_atomic | bool
diff --git a/roles/openshift_excluder/tasks/exclude.yml b/roles/openshift_excluder/tasks/exclude.yml
index 570183aef..d31351aea 100644
--- a/roles/openshift_excluder/tasks/exclude.yml
+++ b/roles/openshift_excluder/tasks/exclude.yml
@@ -1,11 +1,20 @@
---
-- include: install.yml
- when: not openshift.common.is_containerized | bool
+# input variables:
+# - exclude_docker_excluder
+# - exclude_openshift_excluder
+- block:
+ - name: Enable docker excluder
+ command: "{{ openshift.common.service_type }}-docker-excluder exclude"
+ # if the docker override is set, it means the docker excluder needs to be enabled no matter what
+ # if the docker override is not set, the excluder is set based on enable_docker_excluder
+ when:
+ - exclude_docker_excluder | default(false) | bool
-- name: Enable docker excluder
- command: "{{ openshift.common.service_type }}-docker-excluder exclude"
- when: not openshift.common.is_containerized | bool
-
-- name: Enable excluder
- command: "{{ openshift.common.service_type }}-excluder exclude"
- when: not openshift.common.is_containerized | bool
+ - name: Enable openshift excluder
+ command: "{{ openshift.common.service_type }}-excluder exclude"
+ # if the openshift override is set, it means the openshift excluder is disabled no matter what
+ # if the openshift override is not set, the excluder is set based on enable_openshift_excluder
+ when:
+ - exclude_openshift_excluder | default(false) | bool
+ when:
+ - not openshift.common.is_atomic | bool
diff --git a/roles/openshift_excluder/tasks/init.yml b/roles/openshift_excluder/tasks/init.yml
new file mode 100644
index 000000000..1ea18f363
--- /dev/null
+++ b/roles/openshift_excluder/tasks/init.yml
@@ -0,0 +1,12 @@
+---
+- name: Evalute if docker excluder is to be enabled
+ set_fact:
+ docker_excluder_on: "{{ enable_docker_excluder | default(enable_excluders) | bool }}"
+
+- debug: var=docker_excluder_on
+
+- name: Evalute if openshift excluder is to be enabled
+ set_fact:
+ openshift_excluder_on: "{{ enable_openshift_excluder | default(enable_excluders) | bool }}"
+
+- debug: var=openshift_excluder_on
diff --git a/roles/openshift_excluder/tasks/install.yml b/roles/openshift_excluder/tasks/install.yml
index ee4cb2c05..dcc8df0cb 100644
--- a/roles/openshift_excluder/tasks/install.yml
+++ b/roles/openshift_excluder/tasks/install.yml
@@ -1,16 +1,21 @@
---
-- name: Install latest excluder
- package:
- name: "{{ openshift.common.service_type }}-excluder"
- state: latest
- when:
- - openshift_excluder_enabled | default(false) | bool
- - not openshift.common.is_containerized | bool
+# input Variables
+# - install_docker_excluder
+# - install_openshift_excluder
+- block:
+
+ - name: Install docker excluder
+ package:
+ name: "{{ openshift.common.service_type }}-docker-excluder"
+ state: "{{ docker_excluder_package_state }}"
+ when:
+ - install_docker_excluder | default(true) | bool
-- name: Install latest docker excluder
- package:
- name: "{{ openshift.common.service_type }}-excluder"
- state: latest
+ - name: Install openshift excluder
+ package:
+ name: "{{ openshift.common.service_type }}-excluder"
+ state: "{{ openshift_excluder_package_state }}"
+ when:
+ - install_openshift_excluder | default(true) | bool
when:
- - docker_excluder_enabled | default(false) | bool
- - not openshift.common.is_containerized | bool
+ - not openshift.common.is_atomic | bool
diff --git a/roles/openshift_excluder/tasks/main.yml b/roles/openshift_excluder/tasks/main.yml
deleted file mode 100644
index 78a3d37cb..000000000
--- a/roles/openshift_excluder/tasks/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-include: status.yml
diff --git a/roles/openshift_excluder/tasks/reset.yml b/roles/openshift_excluder/tasks/reset.yml
deleted file mode 100644
index 486a23fd0..000000000
--- a/roles/openshift_excluder/tasks/reset.yml
+++ /dev/null
@@ -1,12 +0,0 @@
----
-- name: Enable docker excluder
- command: "{{ openshift.common.service_type }}-docker-excluder exclude"
- when:
- - docker_excluder_enabled | default(false) | bool
- - not openshift.common.is_containerized | bool
-
-- name: Enable excluder
- command: "{{ openshift.common.service_type }}-excluder exclude"
- when:
- - openshift_excluder_enabled | default(false) | bool
- - not openshift.common.is_containerized | bool
diff --git a/roles/openshift_excluder/tasks/status.yml b/roles/openshift_excluder/tasks/status.yml
deleted file mode 100644
index 6ef4af22d..000000000
--- a/roles/openshift_excluder/tasks/status.yml
+++ /dev/null
@@ -1,56 +0,0 @@
----
-# Latest versions of the excluders include a status function, old packages dont
-# So, if packages are installed, upgrade them to the latest so we get the status
-# If they're not installed when we should assume they're disabled
-
-- name: Determine if excluder packages are installed
- rpm_q:
- name: "{{ openshift.common.service_type }}-excluder"
- state: present
- register: openshift_excluder_installed
- failed_when: false
-
-- name: Determine if docker packages are installed
- rpm_q:
- name: "{{ openshift.common.service_type }}-excluder"
- state: present
- register: docker_excluder_installed
- failed_when: false
-
-- name: Update to latest excluder packages
- package:
- name: "{{ openshift.common.service_type }}-excluder"
- when:
- - "{{ openshift_excluder_installed.installed_versions | default([]) | length > 0 }}"
- - not openshift.common.is_containerized | bool
-
-- name: Update to the latest docker-excluder packages
- package:
- name: "{{ openshift.common.service_type }}-docker-excluder"
- when:
- - "{{ docker_excluder_installed.installed_versions | default([]) | length > 0 }}"
- - not openshift.common.is_containerized | bool
-
-- name: Record excluder status
- command: "{{ openshift.common.service_type }}-excluder"
- register: excluder_status
- when:
- - "{{ openshift_excluder_installed.installed_versions | default([]) | length > 0 }}"
- - not openshift.common.is_containerized | bool
- failed_when: false
-
-- name: Record docker excluder status
- command: "{{ openshift.common.service_type }}-docker-excluder"
- register: docker_excluder_status
- when:
- - "{{ docker_excluder_installed.installed_versions | default([]) | length > 0 }}"
- - not openshift.common.is_containerized | bool
- failed_when: false
-
-- name: Set excluder status facts
- set_fact:
- docker_excluder_enabled: "{{ 'false' if docker_excluder_status.rc | default(0) == 0 or docker_excluder_installed.installed_versions | default(0) | length == 0 else 'true' }}"
- openshift_excluder_enabled: "{{ 'false' if docker_excluder_status.rc | default(0) == 0 or openshift_excluder_installed.installed_versions | default(0) | length == 0 else 'true' }}"
-
-- debug: var=docker_excluder_enabled
-- debug: var=openshift_excluder_enabled
diff --git a/roles/openshift_excluder/tasks/unexclude.yml b/roles/openshift_excluder/tasks/unexclude.yml
index 38f0759aa..9112adbac 100644
--- a/roles/openshift_excluder/tasks/unexclude.yml
+++ b/roles/openshift_excluder/tasks/unexclude.yml
@@ -1,12 +1,17 @@
---
-- name: disable docker excluder
- command: "{{ openshift.common.service_type }}-docker-excluder unexclude"
- when:
- - docker_excluder_enabled | bool
- - not openshift.common.is_containerized | bool
+# input variables:
+# - unexclude_docker_excluder
+# - unexclude_openshift_excluder
+- block:
+ - name: disable docker excluder
+ command: "{{ openshift.common.service_type }}-docker-excluder unexclude"
+ when:
+ - unexclude_docker_excluder | default(false) | bool
+
+ - name: disable openshift excluder
+ command: "{{ openshift.common.service_type }}-excluder unexclude"
+ when:
+ - unexclude_openshift_excluder | default(false) | bool
-- name: disable excluder
- command: "{{ openshift.common.service_type }}-excluder unexclude"
when:
- - openshift_excluder_enabled | bool
- - not openshift.common.is_containerized | bool
+ - not openshift.common.is_atomic | bool
diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py
index 2503d6212..eeab8a99c 100755
--- a/roles/openshift_facts/library/openshift_facts.py
+++ b/roles/openshift_facts/library/openshift_facts.py
@@ -19,8 +19,8 @@ import struct
import socket
from distutils.util import strtobool
from distutils.version import LooseVersion
-from six import string_types, text_type
-from six.moves import configparser
+from ansible.module_utils.six import string_types, text_type
+from ansible.module_utils.six.moves import configparser
# ignore pylint errors related to the module_utils import
# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import
@@ -485,6 +485,24 @@ def set_nuage_facts_if_unset(facts):
return facts
+def set_contiv_facts_if_unset(facts):
+ """ Set contiv facts if not already present in facts dict
+ dict: the facts dict updated with the contiv facts if
+ missing
+ Args:
+ facts (dict): existing facts
+ Returns:
+ dict: the facts dict updated with the contiv
+ facts if they were not already present
+
+ """
+ if 'common' in facts:
+ if 'use_contiv' not in facts['common']:
+ use_contiv = False
+ facts['common']['use_contiv'] = use_contiv
+ return facts
+
+
def set_node_schedulability(facts):
""" Set schedulable facts if not already present in facts dict
Args:
@@ -875,7 +893,7 @@ def set_version_facts_if_unset(facts):
version_gte_3_3_or_1_3 = version >= LooseVersion('1.3.0')
version_gte_3_4_or_1_4 = version >= LooseVersion('1.4.0')
version_gte_3_5_or_1_5 = version >= LooseVersion('1.5.0')
- version_gte_3_6_or_1_6 = version >= LooseVersion('1.6.0')
+ version_gte_3_6_or_1_6 = version >= LooseVersion('3.6.0') or version >= LooseVersion('1.6.0')
else:
version_gte_3_1_or_1_1 = version >= LooseVersion('3.0.2.905')
version_gte_3_1_1_or_1_1_1 = version >= LooseVersion('3.1.1')
@@ -1936,6 +1954,7 @@ class OpenShiftFacts(object):
facts = set_project_cfg_facts_if_unset(facts)
facts = set_flannel_facts_if_unset(facts)
facts = set_nuage_facts_if_unset(facts)
+ facts = set_contiv_facts_if_unset(facts)
facts = set_node_schedulability(facts)
facts = set_selectors(facts)
facts = set_identity_providers_if_unset(facts)
@@ -2300,14 +2319,19 @@ class OpenShiftFacts(object):
protected_facts_to_overwrite)
if 'docker' in new_local_facts:
- # remove duplicate and empty strings from registry lists
+ # remove duplicate and empty strings from registry lists, preserving order
for cat in ['additional', 'blocked', 'insecure']:
key = '{0}_registries'.format(cat)
if key in new_local_facts['docker']:
val = new_local_facts['docker'][key]
if isinstance(val, string_types):
val = [x.strip() for x in val.split(',')]
- new_local_facts['docker'][key] = list(set(val) - set(['']))
+ seen = set()
+ new_local_facts['docker'][key] = list()
+ for registry in val:
+ if registry not in seen and registry != '':
+ seen.add(registry)
+ new_local_facts['docker'][key].append(registry)
# Convert legacy log_options comma sep string to a list if present:
if 'log_options' in new_local_facts['docker'] and \
isinstance(new_local_facts['docker']['log_options'], string_types):
diff --git a/roles/openshift_facts/tasks/main.yml b/roles/openshift_facts/tasks/main.yml
index 0ec294bbc..73c668c72 100644
--- a/roles/openshift_facts/tasks/main.yml
+++ b/roles/openshift_facts/tasks/main.yml
@@ -13,6 +13,8 @@
l_is_node_system_container: "{{ (use_node_system_container | default(use_system_containers) | bool) }}"
l_is_master_system_container: "{{ (use_master_system_container | default(use_system_containers) | bool) }}"
l_is_etcd_system_container: "{{ (use_etcd_system_container | default(use_system_containers) | bool) }}"
+- set_fact:
+ l_any_system_container: "{{ l_is_etcd_system_container or l_is_openvswitch_system_container or l_is_node_system_container or l_is_master_system_container }}"
- name: Validate python version
fail:
@@ -26,11 +28,37 @@
msg: "openshift-ansible requires Python 2 for {{ ansible_distribution }}"
when: ansible_distribution != 'Fedora' and ansible_python['version']['major'] != 2
+# Fail as early as possible if Atomic and old version of Docker
+- block:
+
+ # See https://access.redhat.com/articles/2317361
+ # and https://github.com/ansible/ansible/issues/15892
+ # NOTE: the "'s can not be removed at this level else the docker command will fail
+ # NOTE: When ansible >2.2.1.x is used this can be updated per
+ # https://github.com/openshift/openshift-ansible/pull/3475#discussion_r103525121
+ - name: Determine Atomic Host Docker Version
+ shell: 'CURLY="{"; docker version --format "$CURLY{json .Server.Version}}"'
+ register: l_atomic_docker_version
+
+ - assert:
+ msg: Installation on Atomic Host requires Docker 1.12 or later. Please upgrade and restart the Atomic Host.
+ that:
+ - l_atomic_docker_version.stdout | replace('"', '') | version_compare('1.12','>=')
+
+ when: l_is_atomic | bool
+
- name: Ensure various deps are installed
package: name={{ item }} state=present
with_items: "{{ required_packages }}"
when: not l_is_atomic | bool
+- name: Ensure various deps for running system containers are installed
+ package: name={{ item }} state=present
+ with_items: "{{ required_system_containers_packages }}"
+ when:
+ - not l_is_atomic | bool
+ - l_any_system_container | bool
+
- name: Gather Cluster facts and set is_containerized if needed
openshift_facts:
role: common
diff --git a/roles/openshift_facts/vars/main.yml b/roles/openshift_facts/vars/main.yml
index 9c3110ff6..07f5100ad 100644
--- a/roles/openshift_facts/vars/main.yml
+++ b/roles/openshift_facts/vars/main.yml
@@ -5,3 +5,8 @@ required_packages:
- python-six
- PyYAML
- yum-utils
+
+required_system_containers_packages:
+ - atomic
+ - ostree
+ - runc
diff --git a/roles/openshift_health_checker/openshift_checks/__init__.py b/roles/openshift_health_checker/openshift_checks/__init__.py
index 2c70438c9..8433923ed 100644
--- a/roles/openshift_health_checker/openshift_checks/__init__.py
+++ b/roles/openshift_health_checker/openshift_checks/__init__.py
@@ -2,13 +2,17 @@
Health checks for OpenShift clusters.
"""
+import operator
import os
+
from abc import ABCMeta, abstractmethod, abstractproperty
from importlib import import_module
-import operator
-import six
-from six.moves import reduce
+# add_metaclass is not available in the embedded six from module_utils in Ansible 2.2.1
+from six import add_metaclass
+# pylint import-error disabled because pylint cannot find the package
+# when installed in a virtualenv
+from ansible.module_utils.six.moves import reduce # pylint: disable=import-error, redefined-builtin
class OpenShiftCheckException(Exception):
@@ -16,7 +20,7 @@ class OpenShiftCheckException(Exception):
pass
-@six.add_metaclass(ABCMeta)
+@add_metaclass(ABCMeta)
class OpenShiftCheck(object):
"""A base class for defining checks for an OpenShift cluster environment."""
diff --git a/roles/openshift_hosted/defaults/main.yml b/roles/openshift_hosted/defaults/main.yml
index 17a0d5301..0a6299c9b 100644
--- a/roles/openshift_hosted/defaults/main.yml
+++ b/roles/openshift_hosted/defaults/main.yml
@@ -1,2 +1,30 @@
---
registry_volume_claim: 'registry-claim'
+
+openshift_hosted_router_edits:
+- key: spec.strategy.rollingParams.intervalSeconds
+ value: 1
+ action: put
+- key: spec.strategy.rollingParams.updatePeriodSeconds
+ value: 1
+ action: put
+- key: spec.strategy.activeDeadlineSeconds
+ value: 21600
+ action: put
+
+openshift_hosted_routers:
+- name: router
+ replicas: "{{ replicas }}"
+ namespace: default
+ serviceaccount: router
+ selector: "{{ openshift_hosted_router_selector }}"
+ images: "{{ openshift_hosted_router_image }}"
+ edits: "{{ openshift_hosted_router_edits }}"
+ stats_port: 1936
+ ports:
+ - 80:80
+ - 443:443
+ certificates: "{{ openshift_hosted_router_certificate | default({}) }}"
+
+
+openshift_hosted_router_certificates: {}
diff --git a/roles/openshift_hosted/filter_plugins/filters.py b/roles/openshift_hosted/filter_plugins/filters.py
new file mode 100644
index 000000000..7f41529ac
--- /dev/null
+++ b/roles/openshift_hosted/filter_plugins/filters.py
@@ -0,0 +1,42 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+'''
+Custom filters for use in openshift_hosted
+'''
+
+
+class FilterModule(object):
+ ''' Custom ansible filters for use by openshift_hosted role'''
+
+ @staticmethod
+ def get_router_replicas(replicas=None, router_nodes=None):
+ ''' This function will return the number of replicas
+ based on the results from the defined
+ openshift.hosted.router.replicas OR
+ the query from oc_obj on openshift nodes with a selector OR
+ default to 1
+
+ '''
+ # We always use what they've specified if they've specified a value
+ if replicas is not None:
+ return replicas
+
+ replicas = 1
+
+ # Ignore boolean expression limit of 5.
+ # pylint: disable=too-many-boolean-expressions
+ if (isinstance(router_nodes, dict) and
+ 'results' in router_nodes and
+ 'results' in router_nodes['results'] and
+ isinstance(router_nodes['results']['results'], list) and
+ len(router_nodes['results']['results']) > 0 and
+ 'items' in router_nodes['results']['results'][0]):
+
+ if len(router_nodes['results']['results'][0]['items']) > 0:
+ replicas = len(router_nodes['results']['results'][0]['items'])
+
+ return replicas
+
+ def filters(self):
+ ''' returns a mapping of filters to methods '''
+ return {'get_router_replicas': self.get_router_replicas}
diff --git a/roles/openshift_hosted/handlers/main.yml b/roles/openshift_hosted/handlers/main.yml
deleted file mode 100644
index e69de29bb..000000000
--- a/roles/openshift_hosted/handlers/main.yml
+++ /dev/null
diff --git a/roles/openshift_hosted/meta/main.yml b/roles/openshift_hosted/meta/main.yml
index afea0ac59..bbbb76414 100644
--- a/roles/openshift_hosted/meta/main.yml
+++ b/roles/openshift_hosted/meta/main.yml
@@ -17,18 +17,3 @@ dependencies:
- role: lib_openshift
- role: openshift_projects
openshift_projects: "{{ openshift_additional_projects | default({}) | oo_merge_dicts({'default':{'default_node_selector':''},'openshift-infra':{'default_node_selector':''},'logging':{'default_node_selector':''}}) }}"
-- role: openshift_serviceaccounts
- openshift_serviceaccounts_names:
- - router
- openshift_serviceaccounts_namespace: default
- openshift_serviceaccounts_sccs:
- - hostnetwork
- when: openshift.common.version_gte_3_2_or_1_2
-- role: openshift_serviceaccounts
- openshift_serviceaccounts_names:
- - router
- - registry
- openshift_serviceaccounts_namespace: default
- openshift_serviceaccounts_sccs:
- - privileged
- when: not openshift.common.version_gte_3_2_or_1_2
diff --git a/roles/openshift_hosted/tasks/main.yml b/roles/openshift_hosted/tasks/main.yml
index 67c6bbfd7..fe254f72d 100644
--- a/roles/openshift_hosted/tasks/main.yml
+++ b/roles/openshift_hosted/tasks/main.yml
@@ -1,25 +1,6 @@
---
-- name: Create temp directory for kubeconfig
- command: mktemp -d /tmp/openshift-ansible-XXXXXX
- register: mktemp
- changed_when: False
-
-- set_fact:
- openshift_hosted_kubeconfig: "{{ mktemp.stdout }}/admin.kubeconfig"
-
-- name: Copy the admin client config(s)
- command: >
- cp {{ openshift_master_config_dir }}/admin.kubeconfig {{ openshift_hosted_kubeconfig }}
- changed_when: False
-
- include: router/router.yml
when: openshift_hosted_manage_router | default(true) | bool
- include: registry/registry.yml
when: openshift_hosted_manage_registry | default(true) | bool
-
-- name: Delete temp directory
- file:
- name: "{{ mktemp.stdout }}"
- state: absent
- changed_when: False
diff --git a/roles/openshift_hosted/tasks/registry/registry.yml b/roles/openshift_hosted/tasks/registry/registry.yml
index 93b701ebc..0b8042473 100644
--- a/roles/openshift_hosted/tasks/registry/registry.yml
+++ b/roles/openshift_hosted/tasks/registry/registry.yml
@@ -1,64 +1,125 @@
---
-- name: Retrieve list of openshift nodes matching registry selector
- command: >
- {{ openshift.common.client_binary }} --api-version='v1' -o json
- get nodes -n default --config={{ openshift_hosted_kubeconfig }}
- --selector={{ openshift.hosted.registry.selector | default('') }}
- register: registry_nodes_json
- changed_when: false
+- block:
+
+ - name: Retrieve list of openshift nodes matching registry selector
+ oc_obj:
+ state: list
+ kind: node
+ selector: "{{ openshift.hosted.registry.selector | default(omit) }}"
+ register: registry_nodes
+
+ - name: set_fact l_node_count to number of nodes matching registry selector
+ set_fact:
+ l_node_count: "{{ registry_nodes.results.results[0]['items'] | length }}"
+
+ # Determine the default number of registry/router replicas to use if no count
+ # has been specified.
+ # If no registry nodes defined, the default should be 0.
+ - name: set_fact l_default_replicas when l_node_count == 0
+ set_fact:
+ l_default_replicas: 0
+ when: l_node_count | int == 0
+
+ # If registry nodes are defined and the registry storage kind is
+ # defined, default should be the number of registry nodes, otherwise
+ # just 1:
+ - name: set_fact l_default_replicas when l_node_count > 0
+ set_fact:
+ l_default_replicas: "{{ l_node_count if openshift.hosted.registry.storage.kind | default(none) is not none else 1 }}"
+ when: l_node_count | int > 0
+
when: openshift.hosted.registry.replicas | default(none) is none
-- set_fact:
- l_node_count: "{{ (registry_nodes_json.stdout | default('{\"items\":[]}') | from_json)['items'] | length }}"
+- name: set openshift_hosted facts
+ set_fact:
+ openshift_hosted_registry_replicas: "{{ openshift.hosted.registry.replicas | default(l_default_replicas) }}"
+ openshift_hosted_registry_name: docker-registry
+ openshift_hosted_registry_serviceaccount: registry
+ openshift_hosted_registry_namespace: "{{ openshift.hosted.registry.namespace | default('default') }}"
+ openshift_hosted_registry_selector: "{{ openshift.hosted.registry.selector }}"
+ openshift_hosted_registry_images: "{{ openshift.hosted.registry.registryurl | default('openshift3/ose-${component}:${version}')}}"
+ openshift_hosted_registry_volumes: []
+ openshift_hosted_registry_env_vars: {}
+ openshift_hosted_registry_routecertificates: "{{ ('routecertificates' in openshift.hosted.registry.keys()) | ternary(openshift.hosted.registry.routecertificates, {}) }}"
+ openshift_hosted_registry_routehost: "{{ ('routehost' in openshift.hosted.registry.keys()) | ternary(openshift.hosted.registry.routehost, False) }}"
+ openshift_hosted_registry_routetermination: "{{ ('routetermination' in openshift.hosted.registry.keys()) | ternary(openshift.hosted.registry.routetermination, 'passthrough') }}"
+ openshift_hosted_registry_edits:
+ # These edits are being specified only to prevent 'changed' on rerun
+ - key: spec.strategy.rollingParams
+ value:
+ intervalSeconds: 1
+ maxSurge: "25%"
+ maxUnavailable: "25%"
+ timeoutSeconds: 600
+ updatePeriodSeconds: 1
+ action: put
+ openshift_hosted_registry_force:
+ - False
-# Determine the default number of registry/router replicas to use if no count
-# has been specified.
-# If no registry nodes defined, the default should be 0.
-- set_fact:
- l_default_replicas: 0
- when: l_node_count | int == 0
+- name: Create the registry service account
+ oc_serviceaccount:
+ name: "{{ openshift_hosted_registry_serviceaccount }}"
+ namespace: "{{ openshift_hosted_registry_namespace }}"
-# If registry nodes are defined and the registry storage kind is
-# defined, default should be the number of registry nodes, otherwise
-# just 1:
-- set_fact:
- l_default_replicas: "{{ l_node_count if openshift.hosted.registry.storage.kind | default(none) is not none else 1 }}"
- when: l_node_count | int > 0
+- name: Grant the registry serivce account access to the appropriate scc
+ oc_adm_policy_user:
+ user: "system:serviceaccount:{{ openshift_hosted_registry_namespace }}:{{ openshift_hosted_registry_serviceaccount }}"
+ namespace: "{{ openshift_hosted_registry_namespace }}"
+ resource_kind: scc
+ resource_name: hostnetwork
-- set_fact:
- replicas: "{{ openshift.hosted.registry.replicas | default(l_default_replicas) }}"
+- name: oc adm policy add-cluster-role-to-user system:registry system:serviceaccount:default:registry
+ oc_adm_policy_user:
+ user: "system:serviceaccount:{{ openshift_hosted_registry_namespace }}:{{ openshift_hosted_registry_serviceaccount }}"
+ namespace: "{{ openshift_hosted_registry_namespace }}"
+ resource_kind: cluster-role
+ resource_name: system:registry
-- name: Create OpenShift registry
- command: >
- {{ openshift.common.client_binary }} adm registry --create
- --config={{ openshift_hosted_kubeconfig }}
- {% if replicas > 1 -%}
- --replicas={{ replicas }}
- {% endif -%}
- --namespace={{ openshift.hosted.registry.namespace | default('default') }}
- --service-account=registry
- {% if openshift.hosted.registry.selector | default(none) is not none -%}
- --selector='{{ openshift.hosted.registry.selector }}'
- {% endif -%}
- {% if not openshift.common.version_gte_3_2_or_1_2 | bool -%}
- --credentials={{ openshift_master_config_dir }}/openshift-registry.kubeconfig
- {% endif -%}
- {% if openshift.hosted.registry.registryurl | default(none) is not none -%}
- --images='{{ openshift.hosted.registry.registryurl }}'
- {% endif -%}
- register: openshift_hosted_registry_results
- changed_when: "'service exists' not in openshift_hosted_registry_results.stdout"
- failed_when: "openshift_hosted_registry_results.rc != 0 and 'service exists' not in openshift_hosted_registry_results.stdout and 'deployment_config' not in openshift_hosted_registry_results.stderr and 'service' not in openshift_hosted_registry_results.stderr"
- when: replicas | int > 0
+- name: create the default registry service
+ oc_service:
+ namespace: "{{ openshift_hosted_registry_namespace }}"
+ name: "{{ openshift_hosted_registry_name }}"
+ ports:
+ - name: 5000-tcp
+ port: 5000
+ protocol: TCP
+ targetPort: 5000
+ selector:
+ docker-registry: default
+ session_affinity: ClientIP
+ service_type: ClusterIP
- include: secure.yml
static: no
- when: replicas | int > 0 and not (openshift.docker.hosted_registry_insecure | default(false) | bool)
+ run_once: true
+ when:
+ - not (openshift.docker.hosted_registry_insecure | default(false) | bool)
- include: storage/object_storage.yml
static: no
- when: replicas | int > 0 and openshift.hosted.registry.storage.kind | default(none) == 'object'
+ when:
+ - openshift.hosted.registry.storage.kind | default(none) == 'object'
-- include: storage/persistent_volume.yml
- static: no
- when: replicas | int > 0 and openshift.hosted.registry.storage.kind | default(none) in ['nfs', 'openstack']
+- name: Update openshift_hosted facts for persistent volumes
+ set_fact:
+ openshift_hosted_registry_volumes: "{{ openshift_hosted_registry_volumes | union(pvc_volume_mounts) }}"
+ vars:
+ pvc_volume_mounts:
+ - name: registry-storage
+ type: persistentVolumeClaim
+ claim_name: "{{ openshift.hosted.registry.storage.volume.name }}-claim"
+ when:
+ - openshift.hosted.registry.storage.kind | default(none) in ['nfs', 'openstack']
+
+- name: Create OpenShift registry
+ oc_adm_registry:
+ name: "{{ openshift_hosted_registry_name }}"
+ namespace: "{{ openshift_hosted_registry_namespace }}"
+ selector: "{{ openshift_hosted_registry_selector }}"
+ replicas: "{{ openshift_hosted_registry_replicas }}"
+ service_account: "{{ openshift_hosted_registry_serviceaccount }}"
+ images: "{{ openshift_hosted_registry_images }}"
+ env_vars: "{{ openshift_hosted_registry_env_vars }}"
+ volume_mounts: "{{ openshift_hosted_registry_volumes }}"
+ edits: "{{ openshift_hosted_registry_edits }}"
+ force: "{{ True|bool in openshift_hosted_registry_force }}"
diff --git a/roles/openshift_hosted/tasks/registry/secure.yml b/roles/openshift_hosted/tasks/registry/secure.yml
index 8b44b94c6..f9ea2ebeb 100644
--- a/roles/openshift_hosted/tasks/registry/secure.yml
+++ b/roles/openshift_hosted/tasks/registry/secure.yml
@@ -1,132 +1,105 @@
---
-- name: Create passthrough route for docker-registry
+- name: Set fact docker_registry_route_hostname
+ set_fact:
+ docker_registry_route_hostname: "{{ 'docker-registry-default.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true)) }}"
+
+- name: Get the certificate contents for registry
+ copy:
+ backup: True
+ dest: "/etc/origin/master/named_certificates/{{ item.value | basename }}"
+ src: "{{ item.value }}"
+ when: item.key in ['certfile', 'keyfile', 'cafile'] and item.value
+ with_dict: "{{ openshift_hosted_registry_routecertificates }}"
+
+# When certificates are defined we will create the reencrypt
+# docker-registry route
+- name: Create a reencrypt route for docker-registry
oc_route:
- kubeconfig: "{{ openshift_hosted_kubeconfig }}"
name: docker-registry
- namespace: default
+ namespace: "{{ openshift_hosted_registry_namespace }}"
service_name: docker-registry
- state: present
- tls_termination: passthrough
- run_once: true
+ tls_termination: "{{ openshift_hosted_registry_routetermination }}"
+ host: "{{ openshift_hosted_registry_routehost | default(docker_registry_route_hostname) }}"
+ cert_path: "/etc/origin/master/named_certificates/{{ openshift_hosted_registry_routecertificates['certfile'] | basename }}"
+ key_path: "/etc/origin/master/named_certificates/{{ openshift_hosted_registry_routecertificates['keyfile'] | basename }}"
+ cacert_path: "/etc/origin/master/named_certificates/{{ openshift_hosted_registry_routecertificates['cafile'] | basename }}"
+ dest_cacert_path: /etc/origin/master/ca.crt
+ when:
+ - "'cafile' in openshift_hosted_registry_routecertificates"
+ - "'certfile' in openshift_hosted_registry_routecertificates"
+ - "'keyfile' in openshift_hosted_registry_routecertificates"
-- name: Determine if registry certificate must be created
- stat:
- path: "{{ openshift_master_config_dir }}/{{ item }}"
- with_items:
- - registry.crt
- - registry.key
- register: docker_registry_certificates_stat_result
- changed_when: false
- failed_when: false
+# When routetermination is passthrough we will create the route
+- name: Create passthrough route for docker-registry
+ oc_route:
+ name: docker-registry
+ namespace: "{{ openshift_hosted_registry_namespace }}"
+ service_name: docker-registry
+ tls_termination: "{{ openshift_hosted_registry_routetermination }}"
+ host: "{{ openshift_hosted_registry_routehost | ternary(openshift_hosted_registry_routehost, docker_registry_route_hostname) }}"
+ when: openshift_hosted_registry_routetermination == 'passthrough'
- name: Retrieve registry service IP
oc_service:
- namespace: default
+ namespace: "{{ openshift_hosted_registry_namespace }}"
name: docker-registry
state: list
register: docker_registry_service_ip
- changed_when: false
-- set_fact:
- docker_registry_route_hostname: "{{ 'docker-registry-default.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true)) }}"
-
-- name: Create registry certificates if they do not exist
- command: >
- {{ openshift.common.client_binary }} adm ca create-server-cert
- --signer-cert={{ openshift_master_config_dir }}/ca.crt
- --signer-key={{ openshift_master_config_dir }}/ca.key
- --signer-serial={{ openshift_master_config_dir }}/ca.serial.txt
- --hostnames="{{ docker_registry_service_ip.results.clusterip }},docker-registry.default.svc.cluster.local,{{ docker_registry_route_hostname }}"
- --cert={{ openshift_master_config_dir }}/registry.crt
- --key={{ openshift_master_config_dir }}/registry.key
- when: False in (docker_registry_certificates_stat_result.results | default([]) | oo_collect(attribute='stat.exists') | list)
+- name: Create registry certificates
+ oc_adm_ca_server_cert:
+ signer_cert: "{{ openshift_master_config_dir }}/ca.crt"
+ signer_key: "{{ openshift_master_config_dir }}/ca.key"
+ signer_serial: "{{ openshift_master_config_dir }}/ca.serial.txt"
+ hostnames:
+ - "{{ docker_registry_service_ip.results.clusterip }}"
+ - docker-registry.default.svc.cluster.local
+ - "{{ docker_registry_route_hostname }}"
+ cert: "{{ openshift_master_config_dir }}/registry.crt"
+ key: "{{ openshift_master_config_dir }}/registry.key"
+ register: server_cert_out
- name: Create the secret for the registry certificates
oc_secret:
- kubeconfig: "{{ openshift_hosted_kubeconfig }}"
name: registry-certificates
- namespace: default
- state: present
+ namespace: "{{ openshift_hosted_registry_namespace }}"
files:
- name: registry.crt
path: "{{ openshift_master_config_dir }}/registry.crt"
- name: registry.key
path: "{{ openshift_master_config_dir }}/registry.key"
- register: create_registry_certificates_secret
- run_once: true
+ register: create_registry_certificates_secret_out
-- name: "Add the secret to the registry's pod service accounts"
+- name: Add the secret to the registry's pod service accounts
oc_serviceaccount_secret:
service_account: "{{ item }}"
secret: registry-certificates
- namespace: default
- kubeconfig: "{{ openshift_hosted_kubeconfig }}"
- state: present
+ namespace: "{{ openshift_hosted_registry_namespace }}"
with_items:
- registry
- default
-- name: Determine if registry-certificates secret volume attached
- command: >
- {{ openshift.common.client_binary }} get dc/docker-registry
- -o jsonpath='{.spec.template.spec.volumes[?(@.secret)].secret.secretName}'
- --config={{ openshift_hosted_kubeconfig }}
- -n default
- register: docker_registry_volumes
- changed_when: false
- failed_when: "docker_registry_volumes.stdout != '' and 'secretName is not found' not in docker_registry_volumes.stdout and docker_registry_volumes.rc != 0"
-
-- name: Attach registry-certificates secret volume
- command: >
- {{ openshift.common.client_binary }} volume dc/docker-registry --add --type=secret
- --secret-name=registry-certificates
- -m /etc/secrets
- --config={{ openshift_hosted_kubeconfig }}
- -n default
- when: "'registry-certificates' not in docker_registry_volumes.stdout"
-
-- name: Determine if registry environment variables must be set
- command: >
- {{ openshift.common.client_binary }} env dc/docker-registry
- --list
- --config={{ openshift_hosted_kubeconfig }}
- -n default
- register: docker_registry_env
- changed_when: false
-
-- name: Configure certificates in registry deplomentConfig
- command: >
- {{ openshift.common.client_binary }} env dc/docker-registry
- REGISTRY_HTTP_TLS_CERTIFICATE=/etc/secrets/registry.crt
- REGISTRY_HTTP_TLS_KEY=/etc/secrets/registry.key
- --config={{ openshift_hosted_kubeconfig }}
- -n default
- when: "'REGISTRY_HTTP_TLS_CERTIFICATE=/etc/secrets/registry.crt' not in docker_registry_env.stdout or 'REGISTRY_HTTP_TLS_KEY=/etc/secrets/registry.key' not in docker_registry_env.stdout"
-
-- name: Determine if registry liveness probe scheme is HTTPS
- command: >
- {{ openshift.common.client_binary }} get dc/docker-registry
- -o jsonpath='{.spec.template.spec.containers[*].livenessProbe.httpGet.scheme}'
- --config={{ openshift_hosted_kubeconfig }}
- -n default
- register: docker_registry_liveness_probe
- changed_when: false
-
-# This command is on a single line to preserve patch json.
-- name: Update registry liveness probe from HTTP to HTTPS
- command: "{{ openshift.common.client_binary }} patch dc/docker-registry --api-version=v1 -p '{\"spec\":{\"template\":{\"spec\":{\"containers\":[{\"name\":\"registry\",\"livenessProbe\":{\"httpGet\":{\"scheme\":\"HTTPS\"}}}]}}}}' --config={{ openshift_hosted_kubeconfig }} -n default"
- when: "'HTTPS' not in docker_registry_liveness_probe.stdout"
-
-- name: Determine if registry readiness probe scheme is HTTPS
- command: >
- {{ openshift.common.client_binary }} get dc/docker-registry
- -o jsonpath='{.spec.template.spec.containers[*].readinessProbe.httpGet.scheme}'
- --config={{ openshift_hosted_kubeconfig }}
- -n default
- register: docker_registry_readiness_probe
- changed_when: false
+- name: Set facts for secure registry
+ set_fact:
+ registry_secure_volume_mounts:
+ - name: registry-certificates
+ path: /etc/secrets
+ type: secret
+ secret_name: registry-certificates
+ registry_secure_env_vars:
+ REGISTRY_HTTP_TLS_CERTIFICATE: /etc/secrets/registry.crt
+ REGISTRY_HTTP_TLS_KEY: /etc/secrets/registry.key
+ registry_secure_edits:
+ - key: spec.template.spec.containers[0].livenessProbe.httpGet.scheme
+ value: HTTPS
+ action: put
+ - key: spec.template.spec.containers[0].readinessProbe.httpGet.scheme
+ value: HTTPS
+ action: put
-# This command is on a single line to preserve patch json.
-- name: Update registry readiness probe from HTTP to HTTPS
- command: "{{ openshift.common.client_binary }} patch dc/docker-registry --api-version=v1 -p '{\"spec\":{\"template\":{\"spec\":{\"containers\":[{\"name\":\"registry\",\"readinessProbe\":{\"httpGet\":{\"scheme\":\"HTTPS\"}}}]}}}}' --config={{ openshift_hosted_kubeconfig }} -n default"
- when: "'HTTPS' not in docker_registry_readiness_probe.stdout"
+- name: Update openshift_hosted facts with secure registry variables
+ set_fact:
+ openshift_hosted_registry_volumes: "{{ openshift_hosted_registry_volumes | union(registry_secure_volume_mounts) }}"
+ openshift_hosted_registry_env_vars: "{{ openshift_hosted_registry_env_vars | combine(registry_secure_env_vars) }}"
+ openshift_hosted_registry_edits: "{{ openshift_hosted_registry_edits | union(registry_secure_edits) }}"
+ openshift_hosted_registry_force: "{{ openshift_hosted_registry_force | union([server_cert_out.changed]) | union([create_registry_certificates_secret_out.changed]) }}"
diff --git a/roles/openshift_hosted/tasks/registry/storage/object_storage.yml b/roles/openshift_hosted/tasks/registry/storage/object_storage.yml
index 15128784e..3dde83bee 100644
--- a/roles/openshift_hosted/tasks/registry/storage/object_storage.yml
+++ b/roles/openshift_hosted/tasks/registry/storage/object_storage.yml
@@ -1,105 +1,52 @@
---
-- fail:
+- name: Assert supported openshift.hosted.registry.storage.provider
+ assert:
+ that:
+ - openshift.hosted.registry.storage.provider in ['azure_blob', 's3', 'swift']
msg: >
- Object Storage Provider: {{ openshift.hosted.registry.storage.provider }}
+ Object Storage Provider: "{{ openshift.hosted.registry.storage.provider }}"
is not currently supported
- when: openshift.hosted.registry.storage.provider not in ['azure_blob', 's3', 'swift']
-- fail:
+- name: Assert implemented openshift.hosted.registry.storage.provider
+ assert:
+ that:
+ - openshift.hosted.registry.storage.provider not in ['azure_blob', 'swift']
msg: >
Support for provider: "{{ openshift.hosted.registry.storage.provider }}"
not implemented yet
- when: openshift.hosted.registry.storage.provider in ['azure_blob', 'swift']
- include: s3.yml
when: openshift.hosted.registry.storage.provider == 's3'
-- name: Test if docker registry config secret exists
- command: >
- {{ openshift.common.client_binary }}
- --config={{ openshift_hosted_kubeconfig }}
- --namespace={{ openshift.hosted.registry.namespace | default('default') }}
- get secrets {{ registry_config_secret_name }} -o json
- register: secrets
- changed_when: false
- failed_when: false
-
-- set_fact:
- registry_config: "{{ lookup('template', 'registry_config.j2') | b64encode }}"
-
-- set_fact:
- registry_config_secret: "{{ lookup('template', 'registry_config_secret.j2') | from_yaml }}"
-
-- set_fact:
- same_storage_provider: "{{ (secrets.stdout|from_json)['metadata']['annotations']['provider'] | default(none) == openshift.hosted.registry.storage.provider }}"
- when: secrets.rc == 0
-
-- name: Update registry config secret
- command: >
- {{ openshift.common.client_binary }}
- --config={{ openshift_hosted_kubeconfig }}
- --namespace={{ openshift.hosted.registry.namespace | default('default') }}
- patch secret/{{ registry_config_secret_name }}
- -p '{"data": {"config.yml": "{{ registry_config }}"}}'
- register: update_config_secret
- when: secrets.rc == 0 and (secrets.stdout|from_json)['data']['config.yml'] != registry_config and same_storage_provider | bool
-
-- name: Create registry config secret
- shell: >
- echo '{{ registry_config_secret |to_json }}' |
- {{ openshift.common.client_binary }}
- --config={{ openshift_hosted_kubeconfig }}
- --namespace={{ openshift.hosted.registry.namespace | default('default') }}
- create -f -
- when: secrets.rc == 1
+- name: Ensure the resgistry secret exists
+ oc_secret:
+ name: "{{ registry_config_secret_name }}"
+ state: present
+ contents:
+ - path: /tmp/config.yml
+ data: "{{ lookup('template', 'registry_config.j2') }}"
+ register: registry_config_out
- name: Add secrets to registry service account
oc_serviceaccount_secret:
service_account: registry
secret: "{{ registry_config_secret_name }}"
- namespace: "{{ openshift.hosted.registry.namespace | default('default') }}"
- kubeconfig: "{{ openshift_hosted_kubeconfig }}"
+ namespace: "{{ openshift_hosted_registry_namespace }}"
state: present
-
-- name: Determine if deployment config contains secrets
- command: >
- {{ openshift.common.client_binary }}
- --config={{ openshift_hosted_kubeconfig }}
- --namespace={{ openshift.hosted.registry.namespace | default('default') }}
- set volumes dc/docker-registry --list
- register: volume
- changed_when: false
-
-- name: Add secrets to registry deployment config
- command: >
- {{ openshift.common.client_binary }}
- --config={{ openshift_hosted_kubeconfig }}
- --namespace={{ openshift.hosted.registry.namespace | default('default') }}
- set volumes dc/docker-registry --add --name=docker-config -m /etc/registry
- --type=secret --secret-name={{ registry_config_secret_name }}
- when: registry_config_secret_name not in volume.stdout
-
-- name: Determine if registry environment variable needs to be created
- command: >
- {{ openshift.common.client_binary }}
- --config={{ openshift_hosted_kubeconfig }}
- --namespace={{ openshift.hosted.registry.namespace | default('default') }}
- set env --list dc/docker-registry
- register: oc_env
- changed_when: false
-
-- name: Add registry environment variable
- command: >
- {{ openshift.common.client_binary }}
- --config={{ openshift_hosted_kubeconfig }}
- --namespace={{ openshift.hosted.registry.namespace | default('default') }}
- set env dc/docker-registry REGISTRY_CONFIGURATION_PATH=/etc/registry/config.yml
- when: "'REGISTRY_CONFIGURATION_PATH' not in oc_env.stdout"
-
-- name: Redeploy registry
- command: >
- {{ openshift.common.client_binary }}
- --config={{ openshift_hosted_kubeconfig }}
- --namespace={{ openshift.hosted.registry.namespace | default('default') }}
- deploy dc/docker-registry --latest
- when: secrets.rc == 0 and not update_config_secret | skipped and update_config_secret.rc == 0 and same_storage_provider | bool
+ register: svcac
+
+- name: Set facts for registry object storage
+ set_fact:
+ registry_obj_storage_volume_mounts:
+ - name: docker-config
+ path: /etc/registry
+ type: secret
+ secret_name: "{{ registry_config_secret_name }}"
+ registry_obj_storage_env_vars:
+ REGISTRY_CONFIGURATION_PATH: /etc/registry/config.yml
+
+- name: Update openshift_hosted registry facts for storage
+ set_fact:
+ openshift_hosted_registry_volumes: "{{ openshift_hosted_registry_volumes | union(registry_obj_storage_volume_mounts) }}"
+ openshift_hosted_registry_env_vars: "{{ openshift_hosted_registry_env_vars | combine(registry_obj_storage_env_vars) }}"
+ openshift_hosted_registry_force: "{{ openshift_hosted_registry_force | union([registry_config_out.changed]) | union([svcac.changed]) }}"
diff --git a/roles/openshift_hosted/tasks/registry/storage/persistent_volume.yml b/roles/openshift_hosted/tasks/registry/storage/persistent_volume.yml
deleted file mode 100644
index 0172f5ca0..000000000
--- a/roles/openshift_hosted/tasks/registry/storage/persistent_volume.yml
+++ /dev/null
@@ -1,26 +0,0 @@
----
-- set_fact:
- registry_volume_claim: "{{ openshift.hosted.registry.storage.volume.name }}-claim"
-
-- name: Determine if volume is already attached to dc/docker-registry
- command: >
- {{ openshift.common.client_binary }}
- --config={{ openshift_hosted_kubeconfig }}
- --namespace={{ openshift.hosted.registry.namespace | default('default') }}
- get -o template dc/docker-registry --template=\\{\\{.spec.template.spec.volumes\\}\\} --output-version=v1
- changed_when: false
- failed_when: false
- register: registry_volumes_output
-
-- set_fact:
- volume_attached: "{{ registry_volume_claim in (registry_volumes_output).stdout | default(['']) }}"
-
-- name: Add volume to dc/docker-registry
- command: >
- {{ openshift.common.client_binary }}
- --config={{ openshift_hosted_kubeconfig }}
- --namespace={{ openshift.hosted.registry.namespace | default('default') }}
- volume dc/docker-registry
- --add --overwrite -t persistentVolumeClaim --claim-name={{ registry_volume_claim }}
- --name=registry-storage
- when: not volume_attached | bool
diff --git a/roles/openshift_hosted/tasks/registry/storage/s3.yml b/roles/openshift_hosted/tasks/registry/storage/s3.yml
index 16709dfef..26f921f15 100644
--- a/roles/openshift_hosted/tasks/registry/storage/s3.yml
+++ b/roles/openshift_hosted/tasks/registry/storage/s3.yml
@@ -1,47 +1,49 @@
---
-- fail:
- msg: >
- openshift_hosted_registry_storage_s3_accesskey and
- openshift_hosted_registry_storage_s3_secretkey are required
- when: openshift.hosted.registry.storage.s3.accesskey | default(none) is none or openshift.hosted.registry.storage.s3.secretkey | default(none) is none
-
-- fail:
- msg: >
- openshift_hosted_registry_storage_s3_bucket and
- openshift_hosted_registry_storage_s3_region are required
- when: openshift.hosted.registry.storage.s3.bucket | default(none) is none or openshift.hosted.registry.storage.s3.region | default(none) is none
+- name: Assert that S3 variables are provided for registry_config template
+ assert:
+ that:
+ - openshift.hosted.registry.storage.s3.accesskey | default(none) is not none
+ - openshift.hosted.registry.storage.s3.secretkey | default(none) is not none
+ - openshift.hosted.registry.storage.s3.bucket | default(none) is not none
+ - openshift.hosted.registry.storage.s3.region | default(none) is not none
+ msg: |
+ When using S3 storage, the following variables are required:
+ openshift_hosted_registry_storage_s3_accesskey
+ openshift_hosted_registry_storage_s3_secretkey
+ openshift_hosted_registry_storage_s3_bucket
+ openshift_hosted_registry_storage_s3_region
-# If cloudfront is being used, fail if we don't have all the required variables
-- assert:
+- name: If cloudfront is being used, assert that we have all the required variables
+ assert:
that:
- - "openshift_hosted_registry_storage_s3_cloudfront_baseurl is not defined or openshift_hosted_registry_storage_s3_cloudfront_privatekeyfile | default(none) is not none"
- - "openshift_hosted_registry_storage_s3_cloudfront_baseurl is not defined or openshift_hosted_registry_storage_s3_cloudfront_keypairid | default(none) is not none"
- msg: >
+ - "openshift_hosted_registry_storage_s3_cloudfront_privatekeyfile | default(none) is not none"
+ - "openshift_hosted_registry_storage_s3_cloudfront_keypairid | default(none) is not none"
+ msg: |
When openshift_hosted_registry_storage_s3_cloudfront_baseurl is provided
- openshift_hosted_registry_storage_s3_cloudfront_keypairid and
- openshift_hosted_registry_storage_s3_cloudfront_privatekeyfile are required
-
+ openshift_hosted_registry_storage_s3_cloudfront_keypairid and
+ openshift_hosted_registry_storage_s3_cloudfront_privatekeyfile are required
+ when: openshift_hosted_registry_storage_s3_cloudfront_baseurl is defined
# Inject the cloudfront private key as a secret when required
- block:
- - name: Create registry secret for cloudfront
- oc_secret:
- state: present
- namespace: "{{ openshift.hosted.registry.namespace | default('default') }}"
- name: docker-registry-s3-cloudfront
- contents:
- - path: cloudfront.pem
- data: "{{ lookup('file', openshift_hosted_registry_storage_s3_cloudfront_privatekeyfile) }}"
+ - name: Create registry secret for cloudfront
+ oc_secret:
+ state: present
+ namespace: "{{ openshift_hosted_registry_namespace }}"
+ name: docker-registry-s3-cloudfront
+ contents:
+ - path: cloudfront.pem
+ data: "{{ lookup('file', openshift_hosted_registry_storage_s3_cloudfront_privatekeyfile) }}"
- - name: Add cloudfront secret to the registry deployment config
- command: >
- oc volume dc/docker-registry --add --name=cloudfront-vol
- --namespace="{{ openshift.hosted.registry.namespace | default('default') }}"
- -m /etc/origin --type=secret --secret-name=docker-registry-s3-cloudfront
- register: cloudfront_vol_attach
- failed_when:
- - "'already exists' not in cloudfront_vol_attach.stderr"
- - "cloudfront_vol_attach.rc != 0"
+ - name: Append cloudfront secret registry volume to openshift_hosted_registry_volumes
+ set_fact:
+ openshift_hosted_registry_volumes: "{{ openshift_hosted_registry_volumes | union(s3_volume_mount) }}"
+ vars:
+ s3_volume_mount:
+ - name: cloudfront-vol
+ path: /etc/origin
+ type: secret
+ secret_name: docker-registry-s3-cloudfront
when: openshift_hosted_registry_storage_s3_cloudfront_baseurl | default(none) is not none
diff --git a/roles/openshift_hosted/tasks/router/router.yml b/roles/openshift_hosted/tasks/router/router.yml
index 3d5713d6b..0861b9ec2 100644
--- a/roles/openshift_hosted/tasks/router/router.yml
+++ b/roles/openshift_hosted/tasks/router/router.yml
@@ -1,104 +1,80 @@
---
-- fail:
- msg: "'certfile', 'keyfile' and 'cafile' keys must be specified when supplying the openshift_hosted_router_certificate variable."
- when: openshift_hosted_router_certificate is defined and ('certfile' not in openshift_hosted_router_certificate or 'keyfile' not in openshift_hosted_router_certificate or 'cafile' not in openshift_hosted_router_certificate)
-
-- name: Read router certificate and key
- become: no
- local_action:
- module: slurp
- src: "{{ item }}"
- register: openshift_router_certificate_output
- # Defaulting dictionary keys to none to avoid deprecation warnings
- # (future fatal errors) during template evaluation. Dictionary keys
- # won't be accessed unless openshift_hosted_router_certificate is
- # defined and has all keys (certfile, keyfile, cafile) which we
- # check above.
- with_items:
- - "{{ (openshift_hosted_router_certificate | default({'certfile':none})).certfile }}"
- - "{{ (openshift_hosted_router_certificate | default({'keyfile':none})).keyfile }}"
- - "{{ (openshift_hosted_router_certificate | default({'cafile':none})).cafile }}"
- when: openshift_hosted_router_certificate is defined
+- name: Retrieve list of openshift nodes matching router selector
+ oc_obj:
+ state: list
+ kind: node
+ namespace: "{{ openshift.hosted.router.namespace | default('default') }}"
+ selector: "{{ openshift.hosted.router.selector | default(omit) }}"
+ register: router_nodes
+ when: openshift.hosted.router.replicas | default(none) is none
-- name: Persist certificate contents
- openshift_facts:
- role: hosted
- openshift_env:
- openshift_hosted_router_certificate_contents: "{% for certificate in openshift_router_certificate_output.results -%}{{ certificate.content | b64decode }}{% endfor -%}"
- when: openshift_hosted_router_certificate is defined
+- name: set_fact replicas
+ set_fact:
+ replicas: "{{ openshift.hosted.router.replicas|default(None) | get_router_replicas(router_nodes) }}"
+ openshift_hosted_router_selector: "{{ openshift.hosted.router.selector | default(None) }}"
+ openshift_hosted_router_image: "{{ openshift.hosted.router.registryurl }}"
-- name: Create PEM certificate
+- name: Get the certificate contents for router
copy:
- content: "{{ openshift.hosted.router.certificate.contents }}"
- dest: "{{ openshift_master_config_dir }}/openshift-router.pem"
- mode: 0600
- when: "'certificate' in openshift.hosted.router and 'contents' in openshift.hosted.router.certificate"
+ backup: True
+ dest: "/etc/origin/master/{{ item | basename }}"
+ src: "{{ item }}"
+ with_items: "{{ openshift_hosted_routers | oo_collect(attribute='certificates') |
+ oo_select_keys_from_list(['keyfile', 'certfile', 'cafile']) }}"
-- name: Retrieve list of openshift nodes matching router selector
- command: >
- {{ openshift.common.client_binary }} --api-version='v1' -o json
- get nodes -n default --config={{ openshift_hosted_kubeconfig }}
- --selector={{ openshift.hosted.router.selector | default('') }}
- register: router_nodes_json
- changed_when: false
- when: openshift.hosted.router.replicas | default(none) is none
+- name: Create the router service account(s)
+ oc_serviceaccount:
+ name: "{{ item.serviceaccount }}"
+ namespace: "{{ item.namespace }}"
+ state: present
+ with_items: "{{ openshift_hosted_routers }}"
-- set_fact:
- replicas: "{{ openshift.hosted.router.replicas | default((router_nodes_json.stdout | default('{\"items\":[]}') | from_json)['items'] | length) }}"
+- name: Grant the router serivce account(s) access to the appropriate scc
+ oc_adm_policy_user:
+ user: "system:serviceaccount:{{ item.namespace }}:{{ item.serviceaccount }}"
+ namespace: "{{ item.namespace }}"
+ resource_kind: scc
+ resource_name: hostnetwork
+ with_items: "{{ openshift_hosted_routers }}"
-- name: Create OpenShift router
- command: >
- {{ openshift.common.client_binary }} adm router --create
- --config={{ openshift_hosted_kubeconfig }}
- {% if replicas > 1 -%}
- --replicas={{ replicas }}
- {% endif -%}
- {% if 'certificate' in openshift.hosted.router and 'contents' in openshift.hosted.router.certificate -%}
- --default-cert={{ openshift_master_config_dir }}/openshift-router.pem
- {% endif -%}
- --namespace={{ openshift.hosted.router.namespace | default('default') }}
- {% if openshift.hosted.router.force_subdomain | default(none) is not none %}
- --force-subdomain={{ openshift.hosted.router.force_subdomain }}
- {% endif %}
- --service-account=router
- {% if openshift.hosted.router.selector | default(none) is not none -%}
- --selector='{{ openshift.hosted.router.selector }}'
- {% endif -%}
- {% if not openshift.common.version_gte_3_2_or_1_2 | bool -%}
- --credentials={{ openshift_master_config_dir }}/openshift-router.kubeconfig
- {% endif -%}
- {% if openshift.hosted.router.registryurl | default(none) is not none -%}
- --images='{{ openshift.hosted.router.registryurl }}'
- {% endif -%}
- {% if openshift.hosted.router.name | default(none) is not none -%}
- {{ openshift.hosted.router.name }}
- {% endif -%}
+- name: Set additional permissions for router service account
+ oc_adm_policy_user:
+ user: "system:serviceaccount:{{ item.namespace }}:{{ item.serviceaccount }}"
+ namespace: "{{ item.namespace }}"
+ resource_kind: cluster-role
+ resource_name: cluster-reader
+ when: item.namespace == 'default'
+ with_items: "{{ openshift_hosted_routers }}"
- register: openshift_hosted_router_results
- changed_when: "'service exists' not in openshift_hosted_router_results.stdout"
- failed_when: "openshift_hosted_router_results.rc != 0 and 'service exists' not in openshift_hosted_router_results.stdout and 'deployment_config' not in openshift_hosted_router_results.stderr and 'service' not in openshift_hosted_router_results.stderr"
- when: replicas | int > 0
+- name: Create OpenShift router
+ oc_adm_router:
+ name: "{{ item.name }}"
+ replicas: "{{ item.replicas }}"
+ namespace: "{{ item.namespace | default('default') }}"
+ # This option is not yet implemented
+ # force_subdomain: "{{ openshift.hosted.router.force_subdomain | default(none) }}"
+ service_account: "{{ item.serviceaccount | default('router') }}"
+ selector: "{{ item.selector | default(none) }}"
+ images: "{{ item.images | default(omit) }}"
+ cert_file: "{{ ('/etc/origin/master/' ~ (item.certificates.certfile | basename)) if 'certfile' in item.certificates else omit }}"
+ key_file: "{{ ('/etc/origin/master/' ~ (item.certificates.keyfile | basename)) if 'keyfile' in item.certificates else omit }}"
+ cacert_file: "{{ ('/etc/origin/master/' ~ (item.certificates.cafile | basename)) if 'cafile' in item.certificates else omit }}"
+ edits: "{{ openshift_hosted_router_edits | union(item.edits) }}"
+ ports: "{{ item.ports }}"
+ stats_port: "{{ item.stats_port }}"
+ with_items: "{{ openshift_hosted_routers }}"
+ register: routerout
-- command: >
- {{ openshift.common.client_binary }}
- {% if openshift.hosted.router.name | default(none) is not none -%}
- get dc/{{ openshift.hosted.router.name }}
- {% else %}
- get dc/router
- {% endif%}
- --template=\\{\\{.spec.replicas\\}\\}
- --namespace={{ openshift.hosted.router.namespace | default('default') }}
- register: current_replicas
- when: replicas | int > 0
+# This should probably move to module
+- name: wait for deploy
+ pause:
+ seconds: 30
+ when: routerout.changed
- name: Ensure router replica count matches desired
- command: >
- {{ openshift.common.client_binary }}
- scale --replicas={{ replicas }}
- {% if openshift.hosted.router.name | default(none) is not none -%}
- dc/{{ openshift.hosted.router.name }}
- {% else %}
- dc/router
- {% endif%}
- --namespace={{ openshift.hosted.router.namespace | default('default') }}
- when: replicas | int > 0 and replicas | int != current_replicas.stdout | int
+ oc_scale:
+ kind: dc
+ name: "{{ item.name | default('router') }}"
+ namespace: "{{ item.namespace | default('default') }}"
+ replicas: "{{ item.replicas }}"
+ with_items: "{{ openshift_hosted_routers }}"
diff --git a/roles/openshift_logging/defaults/main.yml b/roles/openshift_logging/defaults/main.yml
index 5440a3647..04fd42cbf 100644
--- a/roles/openshift_logging/defaults/main.yml
+++ b/roles/openshift_logging/defaults/main.yml
@@ -1,11 +1,12 @@
---
openshift_logging_image_prefix: "{{ openshift_hosted_logging_deployer_prefix | default('docker.io/openshift/origin-') }}"
openshift_logging_image_version: "{{ openshift_hosted_logging_deployer_version | default('latest') }}"
-openshift_logging_use_ops: False
+openshift_logging_use_ops: "{{ openshift_hosted_logging_enable_ops_cluster | default('false') | bool }}"
openshift_logging_master_url: "https://kubernetes.default.svc.{{ openshift.common.dns_domain }}"
-openshift_logging_master_public_url: "{{ openshift_hosted_logging_master_public_url | default('https://' + openshift.common.public_hostname + ':' + openshift.master.api_port) }}"
+openshift_logging_master_public_url: "{{ openshift_hosted_logging_master_public_url | default('https://' + openshift.common.public_hostname + ':' ~ (openshift_master_api_port | default('8443', true))) }}"
openshift_logging_namespace: logging
openshift_logging_install_logging: True
+openshift_logging_image_pull_secret: "{{ openshift_hosted_logging_image_pull_secret | default('') }}"
openshift_logging_curator_default_days: 30
openshift_logging_curator_run_hour: 0
@@ -15,11 +16,13 @@ openshift_logging_curator_script_log_level: INFO
openshift_logging_curator_log_level: ERROR
openshift_logging_curator_cpu_limit: 100m
openshift_logging_curator_memory_limit: null
+openshift_logging_curator_nodeselector: "{{ openshift_hosted_logging_curator_nodeselector | default('') | map_from_pairs }}"
openshift_logging_curator_ops_cpu_limit: 100m
openshift_logging_curator_ops_memory_limit: null
+openshift_logging_curator_ops_nodeselector: "{{ openshift_hosted_logging_curator_ops_nodeselector | default('') | map_from_pairs }}"
-openshift_logging_kibana_hostname: "{{ openshift_hosted_logging_hostname | default('kibana.' + openshift.common.dns_domain) }}"
+openshift_logging_kibana_hostname: "{{ openshift_hosted_logging_hostname | default('kibana.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true))) }}"
openshift_logging_kibana_cpu_limit: null
openshift_logging_kibana_memory_limit: null
openshift_logging_kibana_proxy_debug: false
@@ -28,6 +31,9 @@ openshift_logging_kibana_proxy_memory_limit: null
openshift_logging_kibana_replica_count: 1
openshift_logging_kibana_edge_term_policy: Redirect
+openshift_logging_kibana_nodeselector: "{{ openshift_hosted_logging_kibana_nodeselector | default('') | map_from_pairs }}"
+openshift_logging_kibana_ops_nodeselector: "{{ openshift_hosted_logging_kibana_ops_nodeselector | default('') | map_from_pairs }}"
+
#The absolute path on the control node to the cert file to use
#for the public facing kibana certs
openshift_logging_kibana_cert: ""
@@ -40,7 +46,7 @@ openshift_logging_kibana_key: ""
#for the public facing kibana certs
openshift_logging_kibana_ca: ""
-openshift_logging_kibana_ops_hostname: "{{ openshift_hosted_logging_ops_hostname | default('kibana-ops.' + openshift.common.dns_domain) }}"
+openshift_logging_kibana_ops_hostname: "{{ openshift_hosted_logging_ops_hostname | default('kibana-ops.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true))) }}"
openshift_logging_kibana_ops_cpu_limit: null
openshift_logging_kibana_ops_memory_limit: null
openshift_logging_kibana_ops_proxy_debug: false
@@ -48,12 +54,13 @@ openshift_logging_kibana_ops_proxy_cpu_limit: null
openshift_logging_kibana_ops_proxy_memory_limit: null
openshift_logging_kibana_ops_replica_count: 1
-openshift_logging_fluentd_nodeselector: {'logging-infra-fluentd': 'true'}
+openshift_logging_fluentd_nodeselector: "{{ openshift_hosted_logging_fluentd_nodeselector_label | default('logging-infra-fluentd=true') | map_from_pairs }}"
openshift_logging_fluentd_cpu_limit: 100m
openshift_logging_fluentd_memory_limit: 512Mi
openshift_logging_fluentd_es_copy: false
-openshift_logging_fluentd_use_journal: ''
-openshift_logging_fluentd_journal_read_from_head: ''
+openshift_logging_fluentd_use_journal: "{{ openshift_hosted_logging_use_journal | default('') }}"
+openshift_logging_fluentd_journal_source: "{{ openshift_hosted_logging_journal_source | default('') }}"
+openshift_logging_fluentd_journal_read_from_head: "{{ openshift_hosted_logging_journal_read_from_head | default('') }}"
openshift_logging_fluentd_hosts: ['--all']
openshift_logging_es_host: logging-es
@@ -63,13 +70,14 @@ openshift_logging_es_client_cert: /etc/fluent/keys/cert
openshift_logging_es_client_key: /etc/fluent/keys/key
openshift_logging_es_cluster_size: "{{ openshift_hosted_logging_elasticsearch_cluster_size | default(1) }}"
openshift_logging_es_cpu_limit: null
-openshift_logging_es_memory_limit: 8Gi
+openshift_logging_es_memory_limit: "{{ openshift_hosted_logging_elasticsearch_instance_ram | default('8Gi') }}"
openshift_logging_es_pv_selector: null
openshift_logging_es_pvc_dynamic: "{{ openshift_hosted_logging_elasticsearch_pvc_dynamic | default(False) }}"
openshift_logging_es_pvc_size: "{{ openshift_hosted_logging_elasticsearch_pvc_size | default('') }}"
openshift_logging_es_pvc_prefix: "{{ openshift_hosted_logging_elasticsearch_pvc_prefix | default('logging-es') }}"
openshift_logging_es_recover_after_time: 5m
-openshift_logging_es_storage_group: 65534
+openshift_logging_es_storage_group: "{{ openshift_hosted_logging_elasticsearch_storage_group | default('65534') }}"
+openshift_logging_es_nodeselector: "{{ openshift_hosted_logging_elasticsearch_nodeselector | default('') | map_from_pairs }}"
# allow cluster-admin or cluster-reader to view operations index
openshift_logging_es_ops_allow_cluster_reader: False
@@ -81,13 +89,18 @@ openshift_logging_es_ops_client_cert: /etc/fluent/keys/cert
openshift_logging_es_ops_client_key: /etc/fluent/keys/key
openshift_logging_es_ops_cluster_size: "{{ openshift_hosted_logging_elasticsearch_ops_cluster_size | default(1) }}"
openshift_logging_es_ops_cpu_limit: null
-openshift_logging_es_ops_memory_limit: 8Gi
+openshift_logging_es_ops_memory_limit: "{{ openshift_hosted_logging_elasticsearch_ops_instance_ram | default('8Gi') }}"
openshift_logging_es_ops_pv_selector: None
openshift_logging_es_ops_pvc_dynamic: "{{ openshift_hosted_logging_elasticsearch_ops_pvc_dynamic | default(False) }}"
openshift_logging_es_ops_pvc_size: "{{ openshift_hosted_logging_elasticsearch_ops_pvc_size | default('') }}"
openshift_logging_es_ops_pvc_prefix: "{{ openshift_hosted_logging_elasticsearch_ops_pvc_prefix | default('logging-es-ops') }}"
openshift_logging_es_ops_recover_after_time: 5m
-openshift_logging_es_ops_storage_group: 65534
+openshift_logging_es_ops_storage_group: "{{ openshift_hosted_logging_elasticsearch_storage_group | default('65534') }}"
+openshift_logging_es_ops_nodeselector: "{{ openshift_hosted_logging_elasticsearch_ops_nodeselector | default('') | map_from_pairs }}"
+
+# storage related defaults
+openshift_logging_storage_access_modes: "{{ openshift_hosted_logging_storage_access_modes | default(['ReadWriteOnce']) }}"
+
# following can be uncommented to provide values for configmaps -- take care when providing file contents as it may cause your cluster to not operate correctly
#es_logging_contents:
diff --git a/roles/openshift_logging/files/fluent.conf b/roles/openshift_logging/files/fluent.conf
index c0c1c8a44..aeaa705ee 100644
--- a/roles/openshift_logging/files/fluent.conf
+++ b/roles/openshift_logging/files/fluent.conf
@@ -22,7 +22,7 @@
@include configs.d/openshift/filter-k8s-flatten-hash.conf
@include configs.d/openshift/filter-k8s-record-transform.conf
@include configs.d/openshift/filter-syslog-record-transform.conf
- @include configs.d/openshift/filter-common-data-model.conf
+ @include configs.d/openshift/filter-viaq-data-model.conf
@include configs.d/openshift/filter-post-*.conf
##
diff --git a/roles/openshift_logging/filter_plugins/openshift_logging.py b/roles/openshift_logging/filter_plugins/openshift_logging.py
index 007be3ac0..44b0b2d48 100644
--- a/roles/openshift_logging/filter_plugins/openshift_logging.py
+++ b/roles/openshift_logging/filter_plugins/openshift_logging.py
@@ -5,6 +5,18 @@
import random
+def es_storage(os_logging_facts, dc_name, pvc_claim, root='elasticsearch'):
+ '''Return a hash with the desired storage for the given ES instance'''
+ deploy_config = os_logging_facts[root]['deploymentconfigs'].get(dc_name)
+ if deploy_config:
+ storage = deploy_config['volumes']['elasticsearch-storage']
+ if storage.get('hostPath'):
+ return dict(kind='hostpath', path=storage.get('hostPath').get('path'))
+ if len(pvc_claim.strip()) > 0:
+ return dict(kind='pvc', pvc_claim=pvc_claim)
+ return dict(kind='emptydir')
+
+
def random_word(source_alpha, length):
''' Returns a random word given the source of characters to pick from and resulting length '''
return ''.join(random.choice(source_alpha) for i in range(length))
@@ -25,6 +37,14 @@ def entry_from_named_pair(register_pairs, key):
raise RuntimeError("There was no entry found in the dict that had an item with a name that matched {}".format(key))
+def map_from_pairs(source, delim="="):
+ ''' Returns a dict given the source and delim delimited '''
+ if source == '':
+ return dict()
+
+ return dict(source.split(delim) for item in source.split(","))
+
+
# pylint: disable=too-few-public-methods
class FilterModule(object):
''' OpenShift Logging Filters '''
@@ -35,4 +55,6 @@ class FilterModule(object):
return {
'random_word': random_word,
'entry_from_named_pair': entry_from_named_pair,
+ 'map_from_pairs': map_from_pairs,
+ 'es_storage': es_storage
}
diff --git a/roles/openshift_logging/meta/main.yaml b/roles/openshift_logging/meta/main.yaml
index bc45dcdab..9c480f73a 100644
--- a/roles/openshift_logging/meta/main.yaml
+++ b/roles/openshift_logging/meta/main.yaml
@@ -14,4 +14,3 @@ galaxy_info:
dependencies:
- role: lib_openshift
- role: openshift_facts
-- role: openshift_master_facts
diff --git a/roles/openshift_logging/tasks/generate_pvcs.yaml b/roles/openshift_logging/tasks/generate_pvcs.yaml
index d782d621e..fa7a86c27 100644
--- a/roles/openshift_logging/tasks/generate_pvcs.yaml
+++ b/roles/openshift_logging/tasks/generate_pvcs.yaml
@@ -2,28 +2,27 @@
- name: Init pool of PersistentVolumeClaim names
set_fact: es_pvc_pool={{es_pvc_pool|default([]) + [pvc_name]}}
vars:
- pvc_name: "{{openshift_logging_es_pvc_prefix}}-{{item| int}}"
- start: "{{es_pvc_names | map('regex_search',openshift_logging_es_pvc_prefix+'.*')|select('string')|list|length}}"
- with_sequence: start={{start}} end={{ (start|int > openshift_logging_es_cluster_size|int - 1) | ternary(start, openshift_logging_es_cluster_size|int - 1)}}
+ pvc_name: "{{es_pvc_prefix}}-{{item| int}}"
+ start: "{{es_pvc_names | map('regex_search', es_pvc_prefix+'.*')|select('string')|list|length}}"
+ with_sequence: start={{start}} end={{ (start|int > es_cluster_size|int - 1) | ternary(start, es_cluster_size|int - 1)}}
when:
- - openshift_logging_es_pvc_size | search('^\d.*')
- - "{{ es_dc_names|default([]) | length < openshift_logging_es_cluster_size|int }}"
+ - "{{ es_dc_names|default([]) | length <= es_cluster_size|int }}"
+ - es_pvc_size | search('^\d.*')
check_mode: no
- name: Generating PersistentVolumeClaims
template: src=pvc.j2 dest={{mktemp.stdout}}/templates/logging-{{obj_name}}-pvc.yaml
vars:
obj_name: "{{claim_name}}"
- size: "{{openshift_logging_es_pvc_size}}"
- access_modes:
- - ReadWriteOnce
- pv_selector: "{{openshift_logging_es_pv_selector}}"
+ size: "{{es_pvc_size}}"
+ access_modes: "{{ es_access_modes | list }}"
+ pv_selector: "{{es_pv_selector}}"
with_items:
- "{{es_pvc_pool | default([])}}"
loop_control:
loop_var: claim_name
when:
- - not openshift_logging_es_pvc_dynamic
+ - not es_pvc_dynamic
- es_pvc_pool is defined
check_mode: no
changed_when: no
@@ -34,16 +33,15 @@
obj_name: "{{claim_name}}"
annotations:
volume.alpha.kubernetes.io/storage-class: "dynamic"
- size: "{{openshift_logging_es_pvc_size}}"
- access_modes:
- - ReadWriteOnce
- pv_selector: "{{openshift_logging_es_pv_selector}}"
+ size: "{{es_pvc_size}}"
+ access_modes: "{{ es_access_modes | list }}"
+ pv_selector: "{{es_pv_selector}}"
with_items:
- "{{es_pvc_pool|default([])}}"
loop_control:
loop_var: claim_name
when:
- - openshift_logging_es_pvc_dynamic
+ - es_pvc_dynamic
- es_pvc_pool is defined
check_mode: no
changed_when: no
diff --git a/roles/openshift_logging/tasks/generate_secrets.yaml b/roles/openshift_logging/tasks/generate_secrets.yaml
index 81fac8b5e..f396bcc6d 100644
--- a/roles/openshift_logging/tasks/generate_secrets.yaml
+++ b/roles/openshift_logging/tasks/generate_secrets.yaml
@@ -31,8 +31,6 @@
- fluentd
loop_control:
loop_var: component
- when: secret_name not in openshift_logging_facts.{{component}}.secrets or
- secret_keys | difference(openshift_logging_facts.{{component}}.secrets["{{secret_name}}"]["keys"]) | length != 0
check_mode: no
changed_when: no
@@ -50,8 +48,6 @@
kibana_key_file: "{{key_pairs | entry_from_named_pair('kibana_internal_key')| b64decode }}"
kibana_cert_file: "{{key_pairs | entry_from_named_pair('kibana_internal_cert')| b64decode }}"
server_tls_file: "{{key_pairs | entry_from_named_pair('server_tls')| b64decode }}"
- when: secret_name not in openshift_logging_facts.kibana.secrets or
- secret_keys | difference(openshift_logging_facts.kibana.secrets["{{secret_name}}"]["keys"]) | length != 0
check_mode: no
changed_when: no
@@ -64,10 +60,8 @@
admin-ca={{generated_certs_dir}}/ca.crt admin.jks={{generated_certs_dir}}/system.admin.jks -o yaml
vars:
secret_name: logging-elasticsearch
- secret_keys: ["admin-cert", "searchguard.key", "admin-ca", "key", "truststore", "admin-key"]
+ secret_keys: ["admin-cert", "searchguard.key", "admin-ca", "key", "truststore", "admin-key", "searchguard.truststore"]
register: logging_es_secret
- when: secret_name not in openshift_logging_facts.elasticsearch.secrets or
- secret_keys | difference(openshift_logging_facts.elasticsearch.secrets["{{secret_name}}"]["keys"]) | length != 0
check_mode: no
changed_when: no
diff --git a/roles/openshift_logging/tasks/install_curator.yaml b/roles/openshift_logging/tasks/install_curator.yaml
index 5b474ff39..ab8e207f1 100644
--- a/roles/openshift_logging/tasks/install_curator.yaml
+++ b/roles/openshift_logging/tasks/install_curator.yaml
@@ -31,7 +31,7 @@
curator_cpu_limit: "{{openshift_logging_curator_cpu_limit }}"
curator_memory_limit: "{{openshift_logging_curator_memory_limit }}"
replicas: "{{curator_replica_count.stdout | default (0)}}"
- curator_node_selector: "{{openshift_logging_curator_nodeselector | default({}) }}"
+ curator_node_selector: "{{openshift_logging_curator_nodeselector | default({})}}"
check_mode: no
changed_when: no
diff --git a/roles/openshift_logging/tasks/install_elasticsearch.yaml b/roles/openshift_logging/tasks/install_elasticsearch.yaml
index 244949505..1b750bcbe 100644
--- a/roles/openshift_logging/tasks/install_elasticsearch.yaml
+++ b/roles/openshift_logging/tasks/install_elasticsearch.yaml
@@ -2,12 +2,24 @@
- name: Getting current ES deployment size
set_fact: openshift_logging_current_es_size={{ openshift_logging_facts.elasticsearch.deploymentconfigs.keys() | length }}
+- set_fact: openshift_logging_es_pvc_prefix="logging-es"
+ when: "not openshift_logging_es_pvc_prefix or openshift_logging_es_pvc_prefix == ''"
+
+- set_fact: es_pvc_pool={{[]}}
+
+- set_fact: openshift_logging_es_pvc_prefix="{{ openshift_logging_es_pvc_prefix | default('logging-es') }}"
+
- name: Generate PersistentVolumeClaims
include: "{{ role_path}}/tasks/generate_pvcs.yaml"
vars:
- es_pvc_pool: []
+ es_pv_selector: "{{openshift_logging_es_pv_selector}}"
+ es_pvc_dynamic: "{{openshift_logging_es_pvc_dynamic | bool}}"
es_pvc_names: "{{openshift_logging_facts.elasticsearch.pvcs.keys()}}"
+ es_pvc_prefix: "{{openshift_logging_es_pvc_prefix}}"
+ es_pvc_size: "{{openshift_logging_es_pvc_size}}"
es_dc_names: "{{openshift_logging_facts.elasticsearch.deploymentconfigs.keys()}}"
+ es_cluster_size: "{{openshift_logging_es_cluster_size}}"
+ es_access_modes: "{{ openshift_logging_storage_access_modes }}"
# we should initialize the es_dc_pool with the current keys
- name: Init pool of DeploymentConfig names for Elasticsearch
@@ -37,10 +49,10 @@
es_cluster_name: "{{component}}"
es_cpu_limit: "{{openshift_logging_es_cpu_limit }}"
es_memory_limit: "{{openshift_logging_es_memory_limit}}"
- volume_names: "{{es_pvc_pool | default([])}}"
- pvc_claim: "{{(volume_names | length > item.0) | ternary(volume_names[item.0], None)}}"
+ pvc_claim: "{{(es_pvc_pool | length > item.0) | ternary(es_pvc_pool[item.0], None)}}"
deploy_name: "{{item.1}}"
- es_node_selector: "{{openshift_logging_es_nodeselector | default({})}}"
+ es_node_selector: "{{openshift_logging_es_nodeselector | default({}) }}"
+ es_storage: "{{openshift_logging_facts|es_storage(deploy_name, pvc_claim)}}"
with_indexed_items:
- "{{ es_dc_pool }}"
check_mode: no
@@ -51,6 +63,8 @@
- name: Getting current ES deployment size
set_fact: openshift_logging_current_es_ops_size={{ openshift_logging_facts.elasticsearch_ops.deploymentconfigs.keys() | length }}
+- set_fact: openshift_logging_es_ops_pvc_prefix="{{ openshift_logging_es_ops_pvc_prefix | default('logging-es-ops') }}"
+
- name: Validate Elasticsearch cluster size for Ops
fail: msg="The openshift_logging_es_ops_cluster_size may not be scaled down more than 1 less (or 0) the number of Elasticsearch nodes already deployed"
vars:
@@ -61,17 +75,22 @@
- "{{es_dcs | length - openshift_logging_es_ops_cluster_size|int | abs > 1}}"
check_mode: no
+- set_fact: openshift_logging_es_ops_pvc_prefix="logging-es-ops"
+ when: "not openshift_logging_es_ops_pvc_prefix or openshift_logging_es_ops_pvc_prefix == ''"
+
+- set_fact: es_pvc_pool={{[]}}
+
- name: Generate PersistentVolumeClaims for Ops
include: "{{ role_path}}/tasks/generate_pvcs.yaml"
vars:
- es_pvc_pool: []
es_pvc_names: "{{openshift_logging_facts.elasticsearch_ops.pvcs.keys()}}"
es_dc_names: "{{openshift_logging_facts.elasticsearch_ops.deploymentconfigs.keys()}}"
- openshift_logging_es_pvc_prefix: "{{openshift_logging_es_ops_pvc_prefix}}"
- openshift_logging_es_cluster_size: "{{openshift_logging_es_ops_cluster_size|int}}"
- openshift_logging_es_pvc_size: "{{openshift_logging_es_ops_pvc_size}}"
- openshift_logging_es_pvc_dynamic: "{{openshift_logging_es_ops_pvc_dynamic}}"
- openshift_logging_es_pv_selector: "{{openshift_logging_es_ops_pv_selector}}"
+ es_pvc_size: "{{openshift_logging_es_ops_pvc_size}}"
+ es_pvc_prefix: "{{openshift_logging_es_ops_pvc_prefix}}"
+ es_cluster_size: "{{openshift_logging_es_ops_cluster_size|int}}"
+ es_pvc_dynamic: "{{openshift_logging_es_ops_pvc_dynamic | bool}}"
+ es_pv_selector: "{{openshift_logging_es_ops_pv_selector}}"
+ es_access_modes: "{{ openshift_logging_storage_access_modes }}"
when:
- openshift_logging_use_ops | bool
check_mode: no
@@ -104,8 +123,7 @@
logging_component: elasticsearch
deploy_name_prefix: "logging-{{component}}"
image: "{{openshift_logging_image_prefix}}logging-elasticsearch:{{openshift_logging_image_version}}"
- volume_names: "{{es_pvc_pool | default([])}}"
- pvc_claim: "{{(volume_names | length > item.0) | ternary(volume_names[item.0], None)}}"
+ pvc_claim: "{{(es_pvc_pool | length > item.0) | ternary(es_pvc_pool[item.0], None)}}"
deploy_name: "{{item.1}}"
es_cluster_name: "{{component}}"
es_cpu_limit: "{{openshift_logging_es_ops_cpu_limit }}"
@@ -114,7 +132,8 @@
es_recover_after_nodes: "{{es_ops_recover_after_nodes}}"
es_recover_expected_nodes: "{{es_ops_recover_expected_nodes}}"
openshift_logging_es_recover_after_time: "{{openshift_logging_es_ops_recover_after_time}}"
- es_node_selector: "{{openshift_logging_es_ops_nodeselector | default({})}}"
+ es_node_selector: "{{openshift_logging_es_ops_nodeselector | default({}) }}"
+ es_storage: "{{openshift_logging_facts|es_storage(deploy_name, pvc_claim,root='elasticsearch_ops')}}"
with_indexed_items:
- "{{ es_ops_dc_pool | default([]) }}"
when:
diff --git a/roles/openshift_logging/tasks/install_kibana.yaml b/roles/openshift_logging/tasks/install_kibana.yaml
index 3aeff2cac..52bdeb50d 100644
--- a/roles/openshift_logging/tasks/install_kibana.yaml
+++ b/roles/openshift_logging/tasks/install_kibana.yaml
@@ -35,7 +35,7 @@
kibana_proxy_cpu_limit: "{{openshift_logging_kibana_proxy_cpu_limit }}"
kibana_proxy_memory_limit: "{{openshift_logging_kibana_proxy_memory_limit }}"
replicas: "{{kibana_replica_count.stdout | default (0)}}"
- kibana_node_selector: "{{openshift_logging_kibana_nodeselector | default({}) }}"
+ kibana_node_selector: "{{openshift_logging_kibana_nodeselector | default({})}}"
check_mode: no
changed_when: no
@@ -54,7 +54,7 @@
kibana_proxy_cpu_limit: "{{openshift_logging_kibana_ops_proxy_cpu_limit }}"
kibana_proxy_memory_limit: "{{openshift_logging_kibana_ops_proxy_memory_limit }}"
replicas: "{{kibana_ops_replica_count.stdout | default (0)}}"
- kibana_node_selector: "{{openshift_logging_kibana_ops_nodeselector | default({}) }}"
+ kibana_node_selector: "{{openshift_logging_kibana_ops_nodeselector | default({})}}"
when: openshift_logging_use_ops | bool
check_mode: no
changed_when: no
diff --git a/roles/openshift_logging/tasks/install_logging.yaml b/roles/openshift_logging/tasks/install_logging.yaml
index e23c3f9f1..83b68fa77 100644
--- a/roles/openshift_logging/tasks/install_logging.yaml
+++ b/roles/openshift_logging/tasks/install_logging.yaml
@@ -76,7 +76,9 @@
register: link_pull_secret
loop_control:
loop_var: sa_account
- when: openshift_logging_image_pull_secret is defined
+ when:
+ - openshift_logging_image_pull_secret is defined
+ - openshift_logging_image_pull_secret != ''
failed_when: link_pull_secret.rc != 0
- name: Scaling up cluster
diff --git a/roles/openshift_logging/templates/elasticsearch.yml.j2 b/roles/openshift_logging/templates/elasticsearch.yml.j2
index 8021a476d..f2d098f10 100644
--- a/roles/openshift_logging/templates/elasticsearch.yml.j2
+++ b/roles/openshift_logging/templates/elasticsearch.yml.j2
@@ -8,7 +8,7 @@ script:
index:
number_of_shards: 1
number_of_replicas: 0
- auto_expand_replicas: 0-3
+ auto_expand_replicas: 0-2
unassigned.node_left.delayed_timeout: 2m
translog:
flush_threshold_size: 256mb
diff --git a/roles/openshift_logging/templates/es-storage-emptydir.partial b/roles/openshift_logging/templates/es-storage-emptydir.partial
new file mode 100644
index 000000000..ccd01a816
--- /dev/null
+++ b/roles/openshift_logging/templates/es-storage-emptydir.partial
@@ -0,0 +1 @@
+ emptyDir: {}
diff --git a/roles/openshift_logging/templates/es-storage-hostpath.partial b/roles/openshift_logging/templates/es-storage-hostpath.partial
new file mode 100644
index 000000000..07ddad9ba
--- /dev/null
+++ b/roles/openshift_logging/templates/es-storage-hostpath.partial
@@ -0,0 +1,2 @@
+ hostPath:
+ path: {{es_storage['path']}}
diff --git a/roles/openshift_logging/templates/es-storage-pvc.partial b/roles/openshift_logging/templates/es-storage-pvc.partial
new file mode 100644
index 000000000..fcbff68de
--- /dev/null
+++ b/roles/openshift_logging/templates/es-storage-pvc.partial
@@ -0,0 +1,2 @@
+ persistentVolumeClaim:
+ claimName: {{es_storage['pvc_claim']}}
diff --git a/roles/openshift_logging/templates/es.j2 b/roles/openshift_logging/templates/es.j2
index 81ae070be..16185fc1d 100644
--- a/roles/openshift_logging/templates/es.j2
+++ b/roles/openshift_logging/templates/es.j2
@@ -103,9 +103,4 @@ spec:
configMap:
name: logging-elasticsearch
- name: elasticsearch-storage
-{% if pvc_claim is defined and pvc_claim | trim | length > 0 %}
- persistentVolumeClaim:
- claimName: {{pvc_claim}}
-{% else %}
- emptyDir: {}
-{% endif %}
+{% include 'es-storage-'+ es_storage['kind'] + '.partial' %}
diff --git a/roles/openshift_logging/templates/fluentd.j2 b/roles/openshift_logging/templates/fluentd.j2
index 223d342b9..0bf1686ad 100644
--- a/roles/openshift_logging/templates/fluentd.j2
+++ b/roles/openshift_logging/templates/fluentd.j2
@@ -119,7 +119,7 @@ spec:
- name: "USE_JOURNAL"
value: "{{openshift_logging_fluentd_use_journal|lower}}"
- name: "JOURNAL_SOURCE"
- value: "{{fluentd_journal_source | default('')}}"
+ value: "{{openshift_logging_fluentd_journal_source | default('')}}"
- name: "JOURNAL_READ_FROM_HEAD"
value: "{{openshift_logging_fluentd_journal_read_from_head|lower}}"
volumes:
diff --git a/roles/openshift_logging/templates/pvc.j2 b/roles/openshift_logging/templates/pvc.j2
index f19a3a750..07d81afff 100644
--- a/roles/openshift_logging/templates/pvc.j2
+++ b/roles/openshift_logging/templates/pvc.j2
@@ -1,7 +1,7 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
- name: {{obj_name}}
+ name: "{{obj_name}}"
labels:
logging-infra: support
{% if annotations is defined %}
diff --git a/roles/openshift_logging/templates/secret.j2 b/roles/openshift_logging/templates/secret.j2
index d73bae9c4..eba4197da 100644
--- a/roles/openshift_logging/templates/secret.j2
+++ b/roles/openshift_logging/templates/secret.j2
@@ -1,9 +1,9 @@
apiVersion: v1
kind: Secret
metadata:
- name: {{secret_name}}
+ name: "{{secret_name}}"
type: Opaque
data:
{% for s in secrets %}
- {{s.key}}: {{s.value | b64encode}}
+ "{{s.key}}" : "{{s.value | b64encode}}"
{% endfor %}
diff --git a/roles/openshift_manage_node/tasks/main.yml b/roles/openshift_manage_node/tasks/main.yml
index 9a883feed..f67aee88b 100644
--- a/roles/openshift_manage_node/tasks/main.yml
+++ b/roles/openshift_manage_node/tasks/main.yml
@@ -39,7 +39,7 @@
delegate_to: "{{ openshift_master_host }}"
- name: Set node schedulability
- oadm_manage_node:
+ oc_adm_manage_node:
node: "{{ openshift.node.nodename | lower }}"
schedulable: "{{ 'true' if openshift.node.schedulable | bool else 'false' }}"
retries: 10
diff --git a/roles/openshift_master/meta/main.yml b/roles/openshift_master/meta/main.yml
index 56af0cf36..18e1b3a54 100644
--- a/roles/openshift_master/meta/main.yml
+++ b/roles/openshift_master/meta/main.yml
@@ -40,5 +40,6 @@ dependencies:
port: 4001/tcp
when: groups.oo_etcd_to_config | default([]) | length == 0
- role: nickhammond.logrotate
-- role: nuage_master
- when: openshift.common.use_nuage | bool
+- role: contiv
+ contiv_role: netmaster
+ when: openshift.common.use_contiv | bool
diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml
index 2ef61cddf..98e0da1a2 100644
--- a/roles/openshift_master/tasks/main.yml
+++ b/roles/openshift_master/tasks/main.yml
@@ -249,7 +249,7 @@
# Using curl here since the uri module requires python-httplib2 and
# wait_for port doesn't provide health information.
command: >
- curl --silent
+ curl --silent --tlsv1.2
{% if openshift.common.version_gte_3_2_or_1_2 | bool %}
--cacert {{ openshift.common.config_base }}/master/ca-bundle.crt
{% else %}
diff --git a/roles/openshift_master/tasks/system_container.yml b/roles/openshift_master/tasks/system_container.yml
index a0e1ac75e..1b3e0dba1 100644
--- a/roles/openshift_master/tasks/system_container.yml
+++ b/roles/openshift_master/tasks/system_container.yml
@@ -1,4 +1,8 @@
---
+- name: Load lib_openshift modules
+ include_role:
+ name: lib_openshift
+
- name: Pre-pull master system container image
command: >
atomic pull --storage=ostree {{ openshift.common.system_images_registry }}/{{ openshift.master.master_system_image }}:{{ openshift_image_tag }}
@@ -10,68 +14,31 @@
atomic containers list --no-trunc -a -f container={{ openshift.common.service_type }}-master
register: result
-- name: Update Master system container package
- command: >
- atomic containers update {{ openshift.common.service_type }}-master
- register: update_result
- changed_when: "'Extracting' in update_result.stdout"
- when:
- - ("master" in result.stdout)
- - l_is_same_version
- - not l_is_ha
-
-- name: Uninstall Master system container package
- command: >
- atomic uninstall {{ openshift.common.service_type }}-master
- failed_when: False
- when:
- - ("master" in result.stdout)
- - not l_is_same_version
- - not l_is_ha
-
-- name: Install Master system container package
- command: >
- atomic install --system --name={{ openshift.common.service_type }}-master {{ openshift.common.system_images_registry }}/{{ openshift.master.master_system_image }}:{{ openshift_image_tag }}
+- name: Install or Update master system container
+ oc_atomic_container:
+ name: "{{ openshift.common.service_type }}-master"
+ image: "{{ openshift.common.system_images_registry }}/{{ openshift.master.master_system_image }}:{{ openshift_image_tag }}"
+ state: latest
when:
- - not l_is_same_version or ("master" not in result.stdout) | bool
- not l_is_ha
- notify:
- - restart master
# HA
-- name: Update Master HA system container package
- command: >
- atomic containers update {{ openshift.common.service_type }}-master-{{ item }}
- register: update_result
- changed_when: "'Extracting' in update_result.stdout"
- with_items:
- - api
- - controllers
- when:
- - ("master" in result.stdout)
- - l_is_same_version
- - l_is_ha
-
-- name: Uninstall Master HA system container package
- command: >
- atomic uninstall {{ openshift.common.service_type }}-master-{{ item }}
- failed_when: False
- with_items:
- - api
- - controllers
+- name: Install or Update HA api master system container
+ oc_atomic_container:
+ name: "{{ openshift.common.service_type }}-master-api"
+ image: "{{ openshift.common.system_images_registry }}/{{ openshift.master.master_system_image }}:{{ openshift_image_tag }}"
+ state: latest
+ values:
+ - COMMAND=api
when:
- - ("master" in result.stdout)
- - not l_is_same_version
- l_is_ha
-- name: Install Master HA system container package
- command: >
- atomic install --system --set COMMAND={{ item }} --name={{ openshift.common.service_type }}-master-{{ item }} {{ openshift.common.system_images_registry }}/{{ openshift.master.master_system_image }}:{{ openshift_image_tag }}
- with_items:
- - api
- - controllers
+- name: Install or Update HA controller master system container
+ oc_atomic_container:
+ name: "{{ openshift.common.service_type }}-master-controllers"
+ image: "{{ openshift.common.system_images_registry }}/{{ openshift.master.master_system_image }}:{{ openshift_image_tag }}"
+ state: latest
+ values:
+ - COMMAND=controllers
when:
- - not l_is_same_version or ("master" not in result.stdout) | bool
- l_is_ha
- notify:
- - restart master
diff --git a/roles/openshift_master/templates/master.yaml.v1.j2 b/roles/openshift_master/templates/master.yaml.v1.j2
index cf2d2e103..938ac2a12 100644
--- a/roles/openshift_master/templates/master.yaml.v1.j2
+++ b/roles/openshift_master/templates/master.yaml.v1.j2
@@ -35,6 +35,15 @@ assetConfig:
keyFile: master.server.key
maxRequestsInFlight: 0
requestTimeoutSeconds: 0
+{% if openshift_master_min_tls_version is defined %}
+ minTLSVersion: {{ openshift_master_min_tls_version }}
+{% endif %}
+{% if openshift_master_cipher_suites is defined %}
+ cipherSuites:
+{% for cipher_suite in openshift_master_cipher_suites %}
+ - {{ cipher_suite }}
+{% endfor %}
+{% endif %}
{% if openshift_master_ha | bool %}
{% if openshift.master.audit_config | default(none) is not none and openshift.common.version_gte_3_2_or_1_2 | bool %}
auditConfig:{{ openshift.master.audit_config | to_padded_yaml(level=1) }}
@@ -165,7 +174,7 @@ masterPublicURL: {{ openshift.master.public_api_url }}
networkConfig:
clusterNetworkCIDR: {{ openshift.master.sdn_cluster_network_cidr }}
hostSubnetLength: {{ openshift.master.sdn_host_subnet_length }}
-{% if openshift.common.use_openshift_sdn or openshift.common.use_nuage or openshift.common.sdn_network_plugin_name == 'cni' %}
+{% if openshift.common.use_openshift_sdn or openshift.common.use_nuage or openshift.common.use_contiv or openshift.common.sdn_network_plugin_name == 'cni' %}
networkPluginName: {{ openshift.common.sdn_network_plugin_name }}
{% endif %}
# serviceNetworkCIDR must match kubernetesMasterConfig.servicesSubnet
@@ -256,5 +265,14 @@ servingInfo:
{% endfor %}
{% endfor %}
{% endif %}
+{% if openshift_master_min_tls_version is defined %}
+ minTLSVersion: {{ openshift_master_min_tls_version }}
+{% endif %}
+{% if openshift_master_cipher_suites is defined %}
+ cipherSuites:
+{% for cipher_suite in openshift_master_cipher_suites %}
+ - {{ cipher_suite }}
+{% endfor %}
+{% endif %}
volumeConfig:
dynamicProvisioningEnabled: {{ openshift.master.dynamic_provisioning_enabled }}
diff --git a/roles/openshift_master/vars/main.yml b/roles/openshift_master/vars/main.yml
index 1e157097d..c5ba20409 100644
--- a/roles/openshift_master/vars/main.yml
+++ b/roles/openshift_master/vars/main.yml
@@ -20,4 +20,3 @@ openshift_master_valid_grant_methods:
- deny
l_is_ha: "{{ openshift.master.ha is defined and openshift.master.ha | bool }}"
-l_is_same_version: "{{ (openshift.common.version is defined) and (openshift.common.version == openshift_version) | bool }}"
diff --git a/roles/openshift_master_certificates/tasks/main.yml b/roles/openshift_master_certificates/tasks/main.yml
index 7a5ed51ec..61541acb8 100644
--- a/roles/openshift_master_certificates/tasks/main.yml
+++ b/roles/openshift_master_certificates/tasks/main.yml
@@ -159,7 +159,7 @@
become: no
- name: Lookup default group for ansible_ssh_user
- command: "/usr/bin/id -g {{ ansible_ssh_user }}"
+ command: "/usr/bin/id -g {{ ansible_ssh_user | quote }}"
changed_when: false
register: _ansible_ssh_user_gid
diff --git a/roles/openshift_master_facts/filter_plugins/openshift_master.py b/roles/openshift_master_facts/filter_plugins/openshift_master.py
index 6d009077a..01806c97f 100644
--- a/roles/openshift_master_facts/filter_plugins/openshift_master.py
+++ b/roles/openshift_master_facts/filter_plugins/openshift_master.py
@@ -7,12 +7,16 @@ Custom filters for use in openshift-master
import copy
import sys
+# pylint import-error disabled because pylint cannot find the package
+# when installed in a virtualenv
from distutils.version import LooseVersion # pylint: disable=no-name-in-module,import-error
from ansible import errors
from ansible.parsing.yaml.dumper import AnsibleDumper
from ansible.plugins.filter.core import to_bool as ansible_bool
-from six import string_types
+# pylint import-error disabled because pylint cannot find the package
+# when installed in a virtualenv
+from ansible.compat.six import string_types # pylint: disable=no-name-in-module,import-error
import yaml
@@ -527,7 +531,7 @@ class FilterModule(object):
'master.kubelet-client.crt',
'master.kubelet-client.key']
if bool(include_ca):
- certs += ['ca.crt', 'ca.key']
+ certs += ['ca.crt', 'ca.key', 'ca-bundle.crt']
if bool(include_keys):
certs += ['serviceaccounts.private.key',
'serviceaccounts.public.key']
diff --git a/roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_predicates.py b/roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_predicates.py
index ef322bd7d..7f7bc4316 100644
--- a/roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_predicates.py
+++ b/roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_predicates.py
@@ -40,7 +40,7 @@ class LookupModule(LookupBase):
# pylint: disable=line-too-long
raise AnsibleError("Either OpenShift needs to be installed or openshift_release needs to be specified")
if deployment_type == 'origin':
- if short_version not in ['1.1', '1.2', '1.3', '1.4', '1.5', '1.6', 'latest']:
+ if short_version not in ['1.1', '1.2', '1.3', '1.4', '1.5', '1.6', '3.6', 'latest']:
raise AnsibleError("Unknown short_version %s" % short_version)
elif deployment_type == 'openshift-enterprise':
if short_version not in ['3.1', '3.2', '3.3', '3.4', '3.5', '3.6', 'latest']:
@@ -48,17 +48,17 @@ class LookupModule(LookupBase):
else:
raise AnsibleError("Unknown deployment_type %s" % deployment_type)
- if deployment_type == 'openshift-enterprise':
- # convert short_version to origin short_version
- short_version = re.sub('^3.', '1.', short_version)
+ if deployment_type == 'origin':
+ # convert short_version to enterpise short_version
+ short_version = re.sub('^1.', '3.', short_version)
if short_version == 'latest':
- short_version = '1.6'
+ short_version = '3.6'
# Predicates ordered according to OpenShift Origin source:
# origin/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithmprovider/defaults/defaults.go
- if short_version == '1.1':
+ if short_version == '3.1':
predicates.extend([
{'name': 'PodFitsHostPorts'},
{'name': 'PodFitsResources'},
@@ -66,7 +66,7 @@ class LookupModule(LookupBase):
{'name': 'MatchNodeSelector'},
])
- if short_version == '1.2':
+ if short_version == '3.2':
predicates.extend([
{'name': 'PodFitsHostPorts'},
{'name': 'PodFitsResources'},
@@ -77,7 +77,7 @@ class LookupModule(LookupBase):
{'name': 'MaxGCEPDVolumeCount'}
])
- if short_version == '1.3':
+ if short_version == '3.3':
predicates.extend([
{'name': 'NoDiskConflict'},
{'name': 'NoVolumeZoneConflict'},
@@ -88,7 +88,7 @@ class LookupModule(LookupBase):
{'name': 'CheckNodeMemoryPressure'}
])
- if short_version == '1.4':
+ if short_version == '3.4':
predicates.extend([
{'name': 'NoDiskConflict'},
{'name': 'NoVolumeZoneConflict'},
@@ -101,7 +101,7 @@ class LookupModule(LookupBase):
{'name': 'MatchInterPodAffinity'}
])
- if short_version in ['1.5', '1.6']:
+ if short_version in ['3.5', '3.6']:
predicates.extend([
{'name': 'NoVolumeZoneConflict'},
{'name': 'MaxEBSVolumeCount'},
diff --git a/roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_priorities.py b/roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_priorities.py
index 6ad40e748..66e6ecea3 100644
--- a/roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_priorities.py
+++ b/roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_priorities.py
@@ -41,7 +41,7 @@ class LookupModule(LookupBase):
raise AnsibleError("Either OpenShift needs to be installed or openshift_release needs to be specified")
if deployment_type == 'origin':
- if short_version not in ['1.1', '1.2', '1.3', '1.4', '1.5', '1.6', 'latest']:
+ if short_version not in ['1.1', '1.2', '1.3', '1.4', '1.5', '1.6', '3.6', 'latest']:
raise AnsibleError("Unknown short_version %s" % short_version)
elif deployment_type == 'openshift-enterprise':
if short_version not in ['3.1', '3.2', '3.3', '3.4', '3.5', '3.6', 'latest']:
@@ -49,21 +49,21 @@ class LookupModule(LookupBase):
else:
raise AnsibleError("Unknown deployment_type %s" % deployment_type)
- if deployment_type == 'openshift-enterprise':
+ if deployment_type == 'origin':
# convert short_version to origin short_version
- short_version = re.sub('^3.', '1.', short_version)
+ short_version = re.sub('^1.', '3.', short_version)
if short_version == 'latest':
- short_version = '1.6'
+ short_version = '3.6'
- if short_version == '1.1':
+ if short_version == '3.1':
priorities.extend([
{'name': 'LeastRequestedPriority', 'weight': 1},
{'name': 'BalancedResourceAllocation', 'weight': 1},
{'name': 'SelectorSpreadPriority', 'weight': 1}
])
- if short_version == '1.2':
+ if short_version == '3.2':
priorities.extend([
{'name': 'LeastRequestedPriority', 'weight': 1},
{'name': 'BalancedResourceAllocation', 'weight': 1},
@@ -71,7 +71,7 @@ class LookupModule(LookupBase):
{'name': 'NodeAffinityPriority', 'weight': 1}
])
- if short_version == '1.3':
+ if short_version == '3.3':
priorities.extend([
{'name': 'LeastRequestedPriority', 'weight': 1},
{'name': 'BalancedResourceAllocation', 'weight': 1},
@@ -80,7 +80,7 @@ class LookupModule(LookupBase):
{'name': 'TaintTolerationPriority', 'weight': 1}
])
- if short_version == '1.4':
+ if short_version == '3.4':
priorities.extend([
{'name': 'LeastRequestedPriority', 'weight': 1},
{'name': 'BalancedResourceAllocation', 'weight': 1},
@@ -91,7 +91,7 @@ class LookupModule(LookupBase):
{'name': 'InterPodAffinityPriority', 'weight': 1}
])
- if short_version in ['1.5', '1.6']:
+ if short_version in ['3.5', '3.6']:
priorities.extend([
{'name': 'SelectorSpreadPriority', 'weight': 1},
{'name': 'InterPodAffinityPriority', 'weight': 1},
diff --git a/roles/openshift_master_facts/test/conftest.py b/roles/openshift_master_facts/test/conftest.py
index e67d24f04..140cced73 100644
--- a/roles/openshift_master_facts/test/conftest.py
+++ b/roles/openshift_master_facts/test/conftest.py
@@ -20,7 +20,7 @@ def priorities_lookup():
@pytest.fixture()
-def facts(request):
+def facts():
return {
'openshift': {
'common': {}
diff --git a/roles/openshift_master_facts/test/openshift_master_facts_default_predicates_tests.py b/roles/openshift_master_facts/test/openshift_master_facts_default_predicates_tests.py
index 25294d91a..1fab84c71 100644
--- a/roles/openshift_master_facts/test/openshift_master_facts_default_predicates_tests.py
+++ b/roles/openshift_master_facts/test/openshift_master_facts_default_predicates_tests.py
@@ -76,6 +76,7 @@ TEST_VARS = [
('1.5', 'origin', DEFAULT_PREDICATES_1_5),
('3.5', 'openshift-enterprise', DEFAULT_PREDICATES_1_5),
('1.6', 'origin', DEFAULT_PREDICATES_1_5),
+ ('3.6', 'origin', DEFAULT_PREDICATES_1_5),
('3.6', 'openshift-enterprise', DEFAULT_PREDICATES_1_5),
]
@@ -130,7 +131,9 @@ def short_version_fixture(request, facts):
def test_short_version_kwarg(predicates_lookup, short_version_kwarg_fixture, regions_enabled):
facts, short_version, default_predicates = short_version_kwarg_fixture
- assert_ok(predicates_lookup, default_predicates, variables=facts, regions_enabled=regions_enabled, short_version=short_version)
+ assert_ok(
+ predicates_lookup, default_predicates, variables=facts,
+ regions_enabled=regions_enabled, short_version=short_version)
@pytest.fixture(params=TEST_VARS)
@@ -142,7 +145,9 @@ def short_version_kwarg_fixture(request, facts):
def test_deployment_type_kwarg(predicates_lookup, deployment_type_kwarg_fixture, regions_enabled):
facts, deployment_type, default_predicates = deployment_type_kwarg_fixture
- assert_ok(predicates_lookup, default_predicates, variables=facts, regions_enabled=regions_enabled, deployment_type=deployment_type)
+ assert_ok(
+ predicates_lookup, default_predicates, variables=facts,
+ regions_enabled=regions_enabled, deployment_type=deployment_type)
@pytest.fixture(params=TEST_VARS)
@@ -152,9 +157,12 @@ def deployment_type_kwarg_fixture(request, facts):
return facts, deployment_type, default_predicates
-def test_short_version_deployment_type_kwargs(predicates_lookup, short_version_deployment_type_kwargs_fixture, regions_enabled):
+def test_short_version_deployment_type_kwargs(
+ predicates_lookup, short_version_deployment_type_kwargs_fixture, regions_enabled):
short_version, deployment_type, default_predicates = short_version_deployment_type_kwargs_fixture
- assert_ok(predicates_lookup, default_predicates, regions_enabled=regions_enabled, short_version=short_version, deployment_type=deployment_type)
+ assert_ok(
+ predicates_lookup, default_predicates, regions_enabled=regions_enabled,
+ short_version=short_version, deployment_type=deployment_type)
@pytest.fixture(params=TEST_VARS)
diff --git a/roles/openshift_master_facts/test/openshift_master_facts_default_priorities_tests.py b/roles/openshift_master_facts/test/openshift_master_facts_default_priorities_tests.py
index cec44faa4..1098f9391 100644
--- a/roles/openshift_master_facts/test/openshift_master_facts_default_priorities_tests.py
+++ b/roles/openshift_master_facts/test/openshift_master_facts_default_priorities_tests.py
@@ -64,6 +64,7 @@ TEST_VARS = [
('1.5', 'origin', DEFAULT_PRIORITIES_1_5),
('3.5', 'openshift-enterprise', DEFAULT_PRIORITIES_1_5),
('1.6', 'origin', DEFAULT_PRIORITIES_1_5),
+ ('3.6', 'origin', DEFAULT_PRIORITIES_1_5),
('3.6', 'openshift-enterprise', DEFAULT_PRIORITIES_1_5),
]
@@ -118,7 +119,9 @@ def short_version_fixture(request, facts):
def test_short_version_kwarg(priorities_lookup, short_version_kwarg_fixture, zones_enabled):
facts, short_version, default_priorities = short_version_kwarg_fixture
- assert_ok(priorities_lookup, default_priorities, variables=facts, zones_enabled=zones_enabled, short_version=short_version)
+ assert_ok(
+ priorities_lookup, default_priorities, variables=facts,
+ zones_enabled=zones_enabled, short_version=short_version)
@pytest.fixture(params=TEST_VARS)
@@ -130,7 +133,9 @@ def short_version_kwarg_fixture(request, facts):
def test_deployment_type_kwarg(priorities_lookup, deployment_type_kwarg_fixture, zones_enabled):
facts, deployment_type, default_priorities = deployment_type_kwarg_fixture
- assert_ok(priorities_lookup, default_priorities, variables=facts, zones_enabled=zones_enabled, deployment_type=deployment_type)
+ assert_ok(
+ priorities_lookup, default_priorities, variables=facts,
+ zones_enabled=zones_enabled, deployment_type=deployment_type)
@pytest.fixture(params=TEST_VARS)
@@ -140,9 +145,12 @@ def deployment_type_kwarg_fixture(request, facts):
return facts, deployment_type, default_priorities
-def test_short_version_deployment_type_kwargs(priorities_lookup, short_version_deployment_type_kwargs_fixture, zones_enabled):
+def test_short_version_deployment_type_kwargs(
+ priorities_lookup, short_version_deployment_type_kwargs_fixture, zones_enabled):
short_version, deployment_type, default_priorities = short_version_deployment_type_kwargs_fixture
- assert_ok(priorities_lookup, default_priorities, zones_enabled=zones_enabled, short_version=short_version, deployment_type=deployment_type)
+ assert_ok(
+ priorities_lookup, default_priorities, zones_enabled=zones_enabled,
+ short_version=short_version, deployment_type=deployment_type)
@pytest.fixture(params=TEST_VARS)
diff --git a/roles/openshift_metrics/defaults/main.yaml b/roles/openshift_metrics/defaults/main.yaml
index edaa7d0df..5921b7bb7 100644
--- a/roles/openshift_metrics/defaults/main.yaml
+++ b/roles/openshift_metrics/defaults/main.yaml
@@ -16,8 +16,8 @@ openshift_metrics_hawkular_ca: ""
openshift_metrics_hawkular_nodeselector: ""
openshift_metrics_cassandra_replicas: 1
-openshift_metrics_cassandra_storage_type: emptydir
-openshift_metrics_cassandra_pvc_size: 10Gi
+openshift_metrics_cassandra_storage_type: "{{ openshift_hosted_metrics_storage_kind | default('emptydir') }}"
+openshift_metrics_cassandra_pvc_size: "{{ openshift_hosted_metrics_storage_volume_size | default('10Gi') }}"
openshift_metrics_cassandra_limits_memory: 2G
openshift_metrics_cassandra_limits_cpu: null
openshift_metrics_cassandra_requests_memory: 1G
@@ -46,7 +46,8 @@ openshift_metrics_master_url: https://kubernetes.default.svc.cluster.local
openshift_metrics_node_id: nodename
openshift_metrics_project: openshift-infra
-openshift_metrics_cassandra_pvc_prefix: metrics-cassandra
+openshift_metrics_cassandra_pvc_prefix: "{{ openshift_hosted_metrics_storage_volume_name | default('metrics-cassandra') }}"
+openshift_metrics_cassandra_pvc_access: "{{ openshift_hosted_metrics_storage_access_modes | default(['ReadWriteOnce']) }}"
openshift_metrics_hawkular_user_write_access: False
diff --git a/roles/openshift_metrics/files/import_jks_certs.sh b/roles/openshift_metrics/files/import_jks_certs.sh
index f4315ef34..c8d5bb3d2 100755
--- a/roles/openshift_metrics/files/import_jks_certs.sh
+++ b/roles/openshift_metrics/files/import_jks_certs.sh
@@ -24,11 +24,10 @@ function import_certs() {
hawkular_cassandra_keystore_password=$(echo $CASSANDRA_KEYSTORE_PASSWD | base64 -d)
hawkular_metrics_truststore_password=$(echo $METRICS_TRUSTSTORE_PASSWD | base64 -d)
hawkular_cassandra_truststore_password=$(echo $CASSANDRA_TRUSTSTORE_PASSWD | base64 -d)
- hawkular_jgroups_password=$(echo $JGROUPS_PASSWD | base64 -d)
-
+
cassandra_alias=`keytool -noprompt -list -keystore $dir/hawkular-cassandra.truststore -storepass ${hawkular_cassandra_truststore_password} | sed -n '7~2s/,.*$//p'`
hawkular_alias=`keytool -noprompt -list -keystore $dir/hawkular-metrics.truststore -storepass ${hawkular_metrics_truststore_password} | sed -n '7~2s/,.*$//p'`
-
+
if [ ! -f $dir/hawkular-metrics.keystore ]; then
echo "Creating the Hawkular Metrics keystore from the PEM file"
keytool -importkeystore -v \
@@ -50,7 +49,7 @@ function import_certs() {
-srcstorepass $hawkular_cassandra_keystore_password \
-deststorepass $hawkular_cassandra_keystore_password
fi
-
+
if [[ ! ${cassandra_alias[*]} =~ hawkular-metrics ]]; then
echo "Importing the Hawkular Certificate into the Cassandra Truststore"
keytool -noprompt -import -v -trustcacerts -alias hawkular-metrics \
@@ -59,7 +58,7 @@ function import_certs() {
-trustcacerts \
-storepass $hawkular_cassandra_truststore_password
fi
-
+
if [[ ! ${hawkular_alias[*]} =~ hawkular-cassandra ]]; then
echo "Importing the Cassandra Certificate into the Hawkular Truststore"
keytool -noprompt -import -v -trustcacerts -alias hawkular-cassandra \
@@ -101,16 +100,6 @@ function import_certs() {
-storepass $hawkular_metrics_truststore_password
fi
done
-
- if [ ! -f $dir/hawkular-jgroups.keystore ]; then
- echo "Generating the jgroups keystore"
- keytool -genseckey -alias hawkular -keypass ${hawkular_jgroups_password} \
- -storepass ${hawkular_jgroups_password} \
- -keyalg Blowfish \
- -keysize 56 \
- -keystore $dir/hawkular-jgroups.keystore \
- -storetype JCEKS
- fi
}
import_certs
diff --git a/roles/openshift_metrics/handlers/main.yml b/roles/openshift_metrics/handlers/main.yml
new file mode 100644
index 000000000..ffb812271
--- /dev/null
+++ b/roles/openshift_metrics/handlers/main.yml
@@ -0,0 +1,26 @@
+---
+- name: restart master
+ systemd: name={{ openshift.common.service_type }}-master state=restarted
+ when: (openshift.master.ha is not defined or not openshift.master.ha | bool) and (not (master_service_status_changed | default(false) | bool))
+ notify: Verify API Server
+
+- name: Verify API Server
+ # Using curl here since the uri module requires python-httplib2 and
+ # wait_for port doesn't provide health information.
+ command: >
+ curl --silent --tlsv1.2
+ {% if openshift.common.version_gte_3_2_or_1_2 | bool %}
+ --cacert {{ openshift.common.config_base }}/master/ca-bundle.crt
+ {% else %}
+ --cacert {{ openshift.common.config_base }}/master/ca.crt
+ {% endif %}
+ {{ openshift.master.api_url }}/healthz/ready
+ args:
+ # Disables the following warning:
+ # Consider using get_url or uri module rather than running curl
+ warn: no
+ register: api_available_output
+ until: api_available_output.stdout == 'ok'
+ retries: 120
+ delay: 1
+ changed_when: false
diff --git a/roles/openshift_metrics/tasks/generate_hawkular_certificates.yaml b/roles/openshift_metrics/tasks/generate_hawkular_certificates.yaml
index 9e7140bfa..61a240a33 100644
--- a/roles/openshift_metrics/tasks/generate_hawkular_certificates.yaml
+++ b/roles/openshift_metrics/tasks/generate_hawkular_certificates.yaml
@@ -24,7 +24,6 @@
with_items:
- hawkular-metrics.pwd
- hawkular-metrics.htpasswd
- - hawkular-jgroups-keystore.pwd
changed_when: no
- set_fact:
@@ -32,11 +31,10 @@
with_items: "{{pwd_file_stat.results}}"
changed_when: no
-- name: generate password for hawkular metrics and jgroups
+- name: generate password for hawkular metrics
local_action: copy dest="{{ local_tmp.stdout}}/{{ item }}.pwd" content="{{ 15 | oo_random_word }}"
with_items:
- hawkular-metrics
- - hawkular-jgroups-keystore
- name: generate htpasswd file for hawkular metrics
local_action: >
@@ -51,7 +49,6 @@
with_items:
- hawkular-metrics.pwd
- hawkular-metrics.htpasswd
- - hawkular-jgroups-keystore.pwd
- include: import_jks_certs.yaml
@@ -69,8 +66,6 @@
- hawkular-metrics-truststore.pwd
- hawkular-metrics.pwd
- hawkular-metrics.htpasswd
- - hawkular-jgroups.keystore
- - hawkular-jgroups-keystore.pwd
- hawkular-cassandra.crt
- hawkular-cassandra.pem
- hawkular-cassandra.keystore
@@ -104,11 +99,6 @@
hawkular-metrics.keystore.alias: "{{ 'hawkular-metrics'|b64encode }}"
hawkular-metrics.htpasswd.file: >
{{ hawkular_secrets['hawkular-metrics.htpasswd'] }}
- hawkular-metrics.jgroups.keystore: >
- {{ hawkular_secrets['hawkular-jgroups.keystore'] }}
- hawkular-metrics.jgroups.keystore.password: >
- {{ hawkular_secrets['hawkular-jgroups-keystore.pwd'] }}
- hawkular-metrics.jgroups.alias: "{{ 'hawkular'|b64encode }}"
when: name not in metrics_secrets.stdout_lines
changed_when: no
diff --git a/roles/openshift_metrics/tasks/import_jks_certs.yaml b/roles/openshift_metrics/tasks/import_jks_certs.yaml
index 57ec70c79..2a67dad0e 100644
--- a/roles/openshift_metrics/tasks/import_jks_certs.yaml
+++ b/roles/openshift_metrics/tasks/import_jks_certs.yaml
@@ -15,10 +15,6 @@
register: metrics_truststore
check_mode: no
-- stat: path="{{mktemp.stdout}}/hawkular-jgroups.keystore"
- register: jgroups_keystore
- check_mode: no
-
- block:
- slurp: src={{ mktemp.stdout }}/hawkular-metrics-keystore.pwd
register: metrics_keystore_password
@@ -26,9 +22,6 @@
- slurp: src={{ mktemp.stdout }}/hawkular-cassandra-keystore.pwd
register: cassandra_keystore_password
- - slurp: src={{ mktemp.stdout }}/hawkular-jgroups-keystore.pwd
- register: jgroups_keystore_password
-
- fetch:
dest: "{{local_tmp.stdout}}/"
src: "{{ mktemp.stdout }}/{{item}}"
@@ -48,7 +41,6 @@
CASSANDRA_KEYSTORE_PASSWD: "{{cassandra_keystore_password.content}}"
METRICS_TRUSTSTORE_PASSWD: "{{hawkular_truststore_password.content}}"
CASSANDRA_TRUSTSTORE_PASSWD: "{{cassandra_truststore_password.content}}"
- JGROUPS_PASSWD: "{{jgroups_keystore_password.content}}"
changed_when: False
- copy:
@@ -59,5 +51,4 @@
when: not metrics_keystore.stat.exists or
not metrics_truststore.stat.exists or
not cassandra_keystore.stat.exists or
- not cassandra_truststore.stat.exists or
- not jgroups_keystore.stat.exists
+ not cassandra_truststore.stat.exists
diff --git a/roles/openshift_metrics/tasks/install_cassandra.yaml b/roles/openshift_metrics/tasks/install_cassandra.yaml
index df39c1e1f..a467c1a51 100644
--- a/roles/openshift_metrics/tasks/install_cassandra.yaml
+++ b/roles/openshift_metrics/tasks/install_cassandra.yaml
@@ -22,6 +22,9 @@
with_sequence: count={{ openshift_metrics_cassandra_replicas }}
changed_when: false
+- set_fact: openshift_metrics_cassandra_pvc_prefix="hawkular-metrics"
+ when: "not openshift_metrics_cassandra_pvc_prefix or openshift_metrics_cassandra_pvc_prefix == ''"
+
- name: generate hawkular-cassandra persistent volume claims
template:
src: pvc.j2
@@ -30,11 +33,12 @@
obj_name: "{{ openshift_metrics_cassandra_pvc_prefix }}-{{ item }}"
labels:
metrics-infra: hawkular-cassandra
- access_modes:
- - ReadWriteOnce
+ access_modes: "{{ openshift_metrics_cassandra_pvc_access | list }}"
size: "{{ openshift_metrics_cassandra_pvc_size }}"
with_sequence: count={{ openshift_metrics_cassandra_replicas }}
- when: openshift_metrics_cassandra_storage_type == 'pv'
+ when:
+ - openshift_metrics_cassandra_storage_type != 'emptydir'
+ - openshift_metrics_cassandra_storage_type != 'dynamic'
changed_when: false
- name: generate hawkular-cassandra persistent volume claims (dynamic)
@@ -47,8 +51,7 @@
metrics-infra: hawkular-cassandra
annotations:
volume.alpha.kubernetes.io/storage-class: dynamic
- access_modes:
- - ReadWriteOnce
+ access_modes: "{{ openshift_metrics_cassandra_pvc_access | list }}"
size: "{{ openshift_metrics_cassandra_pvc_size }}"
with_sequence: count={{ openshift_metrics_cassandra_replicas }}
when: openshift_metrics_cassandra_storage_type == 'dynamic'
diff --git a/roles/openshift_metrics/tasks/install_metrics.yaml b/roles/openshift_metrics/tasks/install_metrics.yaml
index 66a3abdbd..ffe6f63a2 100644
--- a/roles/openshift_metrics/tasks/install_metrics.yaml
+++ b/roles/openshift_metrics/tasks/install_metrics.yaml
@@ -34,6 +34,8 @@
file_content: "{{ item.content | b64decode | from_yaml }}"
with_items: "{{ object_defs.results }}"
+- include: update_master_config.yaml
+
- command: >
{{openshift.common.client_binary}}
--config={{mktemp.stdout}}/admin.kubeconfig
diff --git a/roles/openshift_metrics/tasks/update_master_config.yaml b/roles/openshift_metrics/tasks/update_master_config.yaml
new file mode 100644
index 000000000..20fc45fd4
--- /dev/null
+++ b/roles/openshift_metrics/tasks/update_master_config.yaml
@@ -0,0 +1,9 @@
+---
+- name: Adding metrics route information to metricsPublicURL
+ modify_yaml:
+ dest: "{{ openshift.common.config_base }}/master/master-config.yaml"
+ yaml_key: assetConfig.metricsPublicURL
+ yaml_value: "https://{{ openshift_metrics_hawkular_hostname}}/hawkular/metrics"
+ notify: restart master
+ tags:
+ - update_master_config
diff --git a/roles/openshift_metrics/templates/hawkular_metrics_rc.j2 b/roles/openshift_metrics/templates/hawkular_metrics_rc.j2
index d39f1b43a..361378df3 100644
--- a/roles/openshift_metrics/templates/hawkular_metrics_rc.j2
+++ b/roles/openshift_metrics/templates/hawkular_metrics_rc.j2
@@ -58,9 +58,6 @@ spec:
- "--hmw.truststore=/secrets/hawkular-metrics.truststore"
- "--hmw.keystore_password_file=/secrets/hawkular-metrics.keystore.password"
- "--hmw.truststore_password_file=/secrets/hawkular-metrics.truststore.password"
- - "--hmw.jgroups_keystore=/secrets/hawkular-metrics.jgroups.keystore"
- - "--hmw.jgroups_keystore_password_file=/secrets/hawkular-metrics.jgroups.keystore.password"
- - "--hmw.jgroups_alias_file=/secrets/hawkular-metrics.jgroups.alias"
env:
- name: POD_NAMESPACE
valueFrom:
@@ -68,6 +65,8 @@ spec:
fieldPath: metadata.namespace
- name: MASTER_URL
value: "{{ openshift_metrics_master_url }}"
+ - name: JGROUPS_PASSWORD
+ value: "{{ 17 | oo_random_word }}"
- name: OPENSHIFT_KUBE_PING_NAMESPACE
valueFrom:
fieldRef:
@@ -81,10 +80,10 @@ spec:
mountPath: "/secrets"
- name: hawkular-metrics-client-secrets
mountPath: "/client-secrets"
-{% if ((openshift_metrics_hawkular_limits_cpu is defined and openshift_metrics_hawkular_limits_cpu is not none)
+{% if ((openshift_metrics_hawkular_limits_cpu is defined and openshift_metrics_hawkular_limits_cpu is not none)
or (openshift_metrics_hawkular_limits_memory is defined and openshift_metrics_hawkular_limits_memory is not none)
or (openshift_metrics_hawkular_requests_cpu is defined and openshift_metrics_hawkular_requests_cpu is not none)
- or (openshift_metrics_hawkular_requests_memory is defined and openshift_metrics_hawkular_requests_memory is not none))
+ or (openshift_metrics_hawkular_requests_memory is defined and openshift_metrics_hawkular_requests_memory is not none))
%}
resources:
{% if (openshift_metrics_hawkular_limits_cpu is not none
@@ -98,8 +97,8 @@ spec:
memory: "{{openshift_metrics_hawkular_limits_memory}}"
{% endif %}
{% endif %}
-{% if (openshift_metrics_hawkular_requests_cpu is not none
- or openshift_metrics_hawkular_requests_memory is not none)
+{% if (openshift_metrics_hawkular_requests_cpu is not none
+ or openshift_metrics_hawkular_requests_memory is not none)
%}
requests:
{% if openshift_metrics_hawkular_requests_cpu is not none %}
diff --git a/roles/openshift_metrics/templates/pvc.j2 b/roles/openshift_metrics/templates/pvc.j2
index 8fbfa8b5d..885dd368d 100644
--- a/roles/openshift_metrics/templates/pvc.j2
+++ b/roles/openshift_metrics/templates/pvc.j2
@@ -1,7 +1,7 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
- name: {{obj_name}}
+ name: "{{obj_name}}"
{% if labels is not defined %}
labels:
logging-infra: support
diff --git a/roles/openshift_metrics/vars/main.yaml b/roles/openshift_metrics/vars/main.yaml
index 4a3724e3f..47aa76dd2 100644
--- a/roles/openshift_metrics/vars/main.yaml
+++ b/roles/openshift_metrics/vars/main.yaml
@@ -8,3 +8,4 @@ openshift_metrics_cassandra_storage_types:
- emptydir
- pv
- dynamic
+- nfs
diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml
index 691227915..626248306 100644
--- a/roles/openshift_node/tasks/main.yml
+++ b/roles/openshift_node/tasks/main.yml
@@ -153,7 +153,7 @@
# Using curl here since the uri module requires python-httplib2 and
# wait_for port doesn't provide health information.
command: >
- curl --silent --cacert {{ openshift.common.config_base }}/node/ca.crt
+ curl --silent --tlsv1.2 --cacert {{ openshift.common.config_base }}/node/ca.crt
{{ openshift_node_master_api_url }}/healthz/ready
args:
# Disables the following warning:
diff --git a/roles/openshift_node/tasks/node_system_container.yml b/roles/openshift_node/tasks/node_system_container.yml
index 3ae5c7600..abe139418 100644
--- a/roles/openshift_node/tasks/node_system_container.yml
+++ b/roles/openshift_node/tasks/node_system_container.yml
@@ -1,36 +1,16 @@
---
+- name: Load lib_openshift modules
+ include_role:
+ name: lib_openshift
+
- name: Pre-pull node system container image
command: >
atomic pull --storage=ostree {{ openshift.common.system_images_registry }}/{{ openshift.node.node_system_image }}:{{ openshift_image_tag }}
register: pull_result
changed_when: "'Pulling layer' in pull_result.stdout"
-- name: Check Node system container package
- command: >
- atomic containers list --no-trunc -a -f container={{ openshift.common.service_type }}-node
- register: result
-
-- name: Update Node system container package
- command: >
- atomic containers update {{ openshift.common.service_type }}-node
- register: update_result
- changed_when: "'Extracting' in update_result.stdout"
- when:
- - l_is_same_version
- - ("node" in result.stdout)
-
-- name: Uninstall Node system container package
- command: >
- atomic uninstall {{ openshift.common.service_type }}-node
- failed_when: False
- when:
- - not l_is_same_version
- - ("node" in result.stdout)
-
-- name: Install Node system container package
- command: >
- atomic install --system --name={{ openshift.common.service_type }}-node {{ openshift.common.system_images_registry }}/{{ openshift.node.node_system_image }}:{{ openshift_image_tag }}
- register: install_node_result
- changed_when: "'Extracting' in pull_result.stdout"
- when:
- - not l_is_same_version or ("node" not in result.stdout) | bool
+- name: Install or Update node system container
+ oc_atomic_container:
+ name: "{{ openshift.common.service_type }}-node"
+ image: "{{ openshift.common.system_images_registry }}/{{ openshift.node.node_system_image }}:{{ openshift_image_tag }}"
+ state: latest
diff --git a/roles/openshift_node/tasks/openvswitch_system_container.yml b/roles/openshift_node/tasks/openvswitch_system_container.yml
index 6114230d0..b76ce8797 100644
--- a/roles/openshift_node/tasks/openvswitch_system_container.yml
+++ b/roles/openshift_node/tasks/openvswitch_system_container.yml
@@ -1,36 +1,16 @@
---
+- name: Load lib_openshift modules
+ include_role:
+ name: lib_openshift
+
- name: Pre-pull OpenVSwitch system container image
command: >
atomic pull --storage=ostree {{ openshift.common.system_images_registry }}/{{ openshift.node.ovs_system_image }}:{{ openshift_image_tag }}
register: pull_result
changed_when: "'Pulling layer' in pull_result.stdout"
-- name: Check OpenvSwitch system container package
- command: >
- atomic containers list --no-trunc -a -f container=openvswitch
- register: result
-
-- name: Update OpenvSwitch system container package
- command: >
- atomic containers update openvswitch
- register: update_result
- changed_when: "'Extracting' in update_result.stdout"
- when:
- - l_is_same_version
- - ("openvswitch" in result.stdout) | bool
-
-- name: Uninstall OpenvSwitch system container package
- command: >
- atomic uninstall openvswitch
- failed_when: False
- when:
- - not l_is_same_version
- - ("openvswitch" in result.stdout) | bool
-
-- name: Install OpenvSwitch system container package
- command: >
- atomic install --system --name=openvswitch {{ openshift.common.system_images_registry }}/{{ openshift.node.ovs_system_image }}:{{ openshift_image_tag }}
- when:
- - not l_is_same_version or ("openvswitch" not in result.stdout) | bool
- notify:
- - restart docker
+- name: Install or Update OpenVSwitch system container
+ oc_atomic_container:
+ name: openvswitch
+ image: "{{ openshift.common.system_images_registry }}/{{ openshift.node.ovs_system_image }}:{{ openshift_image_tag }}"
+ state: latest
diff --git a/roles/openshift_node/templates/node.yaml.v1.j2 b/roles/openshift_node/templates/node.yaml.v1.j2
index 55ae4bf54..f2f929232 100644
--- a/roles/openshift_node/templates/node.yaml.v1.j2
+++ b/roles/openshift_node/templates/node.yaml.v1.j2
@@ -27,7 +27,7 @@ networkPluginName: {{ openshift.common.sdn_network_plugin_name }}
# deprecates networkPluginName above. The two should match.
networkConfig:
mtu: {{ openshift.node.sdn_mtu }}
-{% if openshift.common.use_openshift_sdn | bool or openshift.common.use_nuage | bool or openshift.common.sdn_network_plugin_name == 'cni' %}
+{% if openshift.common.use_openshift_sdn | bool or openshift.common.use_nuage | bool or openshift.common.use_contiv | bool or openshift.common.sdn_network_plugin_name == 'cni' %}
networkPluginName: {{ openshift.common.sdn_network_plugin_name }}
{% endif %}
{% if openshift.node.set_node_ip | bool %}
@@ -40,6 +40,15 @@ servingInfo:
certFile: server.crt
clientCA: ca.crt
keyFile: server.key
+{% if openshift_node_min_tls_version is defined %}
+ minTLSVersion: {{ openshift_node_min_tls_version }}
+{% endif %}
+{% if openshift_node_cipher_suites is defined %}
+ cipherSuites:
+{% for cipher_suite in openshift_node_cipher_suites %}
+ - {{ cipher_suite }}
+{% endfor %}
+{% endif %}
volumeDirectory: {{ openshift.common.data_dir }}/openshift.local.volumes
proxyArguments:
proxy-mode:
diff --git a/roles/openshift_node/templates/openshift.docker.node.service b/roles/openshift_node/templates/openshift.docker.node.service
index e33d5d497..b4fd5aeb0 100644
--- a/roles/openshift_node/templates/openshift.docker.node.service
+++ b/roles/openshift_node/templates/openshift.docker.node.service
@@ -6,6 +6,8 @@ PartOf=docker.service
Requires=docker.service
{% if openshift.common.use_openshift_sdn %}
Requires=openvswitch.service
+After=ovsdb-server.service
+After=ovs-vswitchd.service
{% endif %}
Wants={{ openshift.common.service_type }}-master.service
Requires={{ openshift.common.service_type }}-node-dep.service
@@ -15,7 +17,7 @@ After={{ openshift.common.service_type }}-node-dep.service
EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-node
EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-node-dep
ExecStartPre=-/usr/bin/docker rm -f {{ openshift.common.service_type }}-node
-ExecStart=/usr/bin/docker run --name {{ openshift.common.service_type }}-node --rm --privileged --net=host --pid=host --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-node -v /:/rootfs:ro -e CONFIG_FILE=${CONFIG_FILE} -e OPTIONS=${OPTIONS} -e HOST=/rootfs -e HOST_ETC=/host-etc -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }}{{ ':rslave' if openshift.docker.gte_1_10 | default(False) | bool else '' }} -v {{ openshift.common.config_base }}/node:{{ openshift.common.config_base }}/node {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} -v /etc/localtime:/etc/localtime:ro -v /etc/machine-id:/etc/machine-id:ro -v /run:/run -v /sys:/sys:rw -v /sys/fs/cgroup:/sys/fs/cgroup:rw -v /usr/bin/docker:/usr/bin/docker:ro -v /var/lib/docker:/var/lib/docker -v /lib/modules:/lib/modules -v /etc/origin/openvswitch:/etc/openvswitch -v /etc/origin/sdn:/etc/openshift-sdn -v /etc/systemd/system:/host-etc/systemd/system -v /var/log:/var/log -v /dev:/dev $DOCKER_ADDTL_BIND_MOUNTS {{ openshift.node.node_image }}:${IMAGE_VERSION}
+ExecStart=/usr/bin/docker run --name {{ openshift.common.service_type }}-node --rm --privileged --net=host --pid=host --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-node -v /:/rootfs:ro -e CONFIG_FILE=${CONFIG_FILE} -e OPTIONS=${OPTIONS} -e HOST=/rootfs -e HOST_ETC=/host-etc -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }}{{ ':rslave' if openshift.docker.gte_1_10 | default(False) | bool else '' }} -v {{ openshift.common.config_base }}/node:{{ openshift.common.config_base }}/node {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} -v /etc/localtime:/etc/localtime:ro -v /etc/machine-id:/etc/machine-id:ro -v /run:/run -v /sys:/sys:rw -v /sys/fs/cgroup:/sys/fs/cgroup:rw -v /usr/bin/docker:/usr/bin/docker:ro -v /var/lib/docker:/var/lib/docker -v /lib/modules:/lib/modules -v /etc/origin/openvswitch:/etc/openvswitch -v /etc/origin/sdn:/etc/openshift-sdn -v /var/lib/cni:/var/lib/cni -v /etc/systemd/system:/host-etc/systemd/system -v /var/log:/var/log -v /dev:/dev $DOCKER_ADDTL_BIND_MOUNTS {{ openshift.node.node_image }}:${IMAGE_VERSION}
ExecStartPost=/usr/bin/sleep 10
ExecStop=/usr/bin/docker stop {{ openshift.common.service_type }}-node
SyslogIdentifier={{ openshift.common.service_type }}-node
diff --git a/roles/openshift_node/vars/main.yml b/roles/openshift_node/vars/main.yml
deleted file mode 100644
index 0c2abf3b9..000000000
--- a/roles/openshift_node/vars/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-l_is_same_version: "{{ (openshift.common.version is defined) and (openshift.common.version == openshift_version) | bool }}"
diff --git a/roles/openshift_node_upgrade/templates/openshift.docker.node.service b/roles/openshift_node_upgrade/templates/openshift.docker.node.service
index e33d5d497..6ec88f85e 100644
--- a/roles/openshift_node_upgrade/templates/openshift.docker.node.service
+++ b/roles/openshift_node_upgrade/templates/openshift.docker.node.service
@@ -15,7 +15,7 @@ After={{ openshift.common.service_type }}-node-dep.service
EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-node
EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-node-dep
ExecStartPre=-/usr/bin/docker rm -f {{ openshift.common.service_type }}-node
-ExecStart=/usr/bin/docker run --name {{ openshift.common.service_type }}-node --rm --privileged --net=host --pid=host --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-node -v /:/rootfs:ro -e CONFIG_FILE=${CONFIG_FILE} -e OPTIONS=${OPTIONS} -e HOST=/rootfs -e HOST_ETC=/host-etc -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }}{{ ':rslave' if openshift.docker.gte_1_10 | default(False) | bool else '' }} -v {{ openshift.common.config_base }}/node:{{ openshift.common.config_base }}/node {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} -v /etc/localtime:/etc/localtime:ro -v /etc/machine-id:/etc/machine-id:ro -v /run:/run -v /sys:/sys:rw -v /sys/fs/cgroup:/sys/fs/cgroup:rw -v /usr/bin/docker:/usr/bin/docker:ro -v /var/lib/docker:/var/lib/docker -v /lib/modules:/lib/modules -v /etc/origin/openvswitch:/etc/openvswitch -v /etc/origin/sdn:/etc/openshift-sdn -v /etc/systemd/system:/host-etc/systemd/system -v /var/log:/var/log -v /dev:/dev $DOCKER_ADDTL_BIND_MOUNTS {{ openshift.node.node_image }}:${IMAGE_VERSION}
+ExecStart=/usr/bin/docker run --name {{ openshift.common.service_type }}-node --rm --privileged --net=host --pid=host --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-node -v /:/rootfs:ro -e CONFIG_FILE=${CONFIG_FILE} -e OPTIONS=${OPTIONS} -e HOST=/rootfs -e HOST_ETC=/host-etc -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }}{{ ':rslave' if openshift.docker.gte_1_10 | default(False) | bool else '' }} -v {{ openshift.common.config_base }}/node:{{ openshift.common.config_base }}/node {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} -v /etc/localtime:/etc/localtime:ro -v /etc/machine-id:/etc/machine-id:ro -v /run:/run -v /sys:/sys:rw -v /sys/fs/cgroup:/sys/fs/cgroup:rw -v /usr/bin/docker:/usr/bin/docker:ro -v /var/lib/docker:/var/lib/docker -v /lib/modules:/lib/modules -v /etc/origin/openvswitch:/etc/openvswitch -v /etc/origin/sdn:/etc/openshift-sdn -v /var/lib/cni:/var/lib/cni -v /etc/systemd/system:/host-etc/systemd/system -v /var/log:/var/log -v /dev:/dev $DOCKER_ADDTL_BIND_MOUNTS {{ openshift.node.node_image }}:${IMAGE_VERSION}
ExecStartPost=/usr/bin/sleep 10
ExecStop=/usr/bin/docker stop {{ openshift.common.service_type }}-node
SyslogIdentifier={{ openshift.common.service_type }}-node
diff --git a/roles/openshift_repos/files/origin/repos/openshift-ansible-centos-paas-sig.repo b/roles/openshift_repos/files/origin/repos/openshift-ansible-centos-paas-sig.repo
index 1af0bd023..124bff09d 100644
--- a/roles/openshift_repos/files/origin/repos/openshift-ansible-centos-paas-sig.repo
+++ b/roles/openshift_repos/files/origin/repos/openshift-ansible-centos-paas-sig.repo
@@ -5,13 +5,6 @@ enabled=1
gpgcheck=1
gpgkey=file:///etc/pki/rpm-gpg/openshift-ansible-CentOS-SIG-PaaS
-[centos-openshift-origin-common]
-name=CentOS OpenShift Origin Common
-baseurl=http://mirror.centos.org/centos/7/paas/x86_64/openshift-origin/common/
-enabled=1
-gpgcheck=1
-gpgkey=file:///etc/pki/rpm-gpg/openshift-ansible-CentOS-SIG-PaaS
-
[centos-openshift-origin-testing]
name=CentOS OpenShift Origin Testing
baseurl=http://buildlogs.centos.org/centos/7/paas/x86_64/openshift-origin/
diff --git a/roles/openshift_serviceaccounts/tasks/legacy_add_scc_to_user.yml b/roles/openshift_serviceaccounts/tasks/legacy_add_scc_to_user.yml
deleted file mode 100644
index b8cbe9a84..000000000
--- a/roles/openshift_serviceaccounts/tasks/legacy_add_scc_to_user.yml
+++ /dev/null
@@ -1,38 +0,0 @@
----
-####
-#
-# OSE 3.0.z did not have 'oadm policy add-scc-to-user'.
-#
-####
-
-- name: tmp dir for openshift
- file:
- path: /tmp/openshift
- state: directory
- owner: root
- mode: 0700
-
-- name: Create service account configs
- template:
- src: serviceaccount.j2
- dest: "/tmp/openshift/{{ item }}-serviceaccount.yaml"
- with_items: '{{ openshift_serviceaccounts_names }}'
-
-- name: Get current security context constraints
- shell: >
- {{ openshift.common.client_binary }} get scc privileged -o yaml
- --output-version=v1 > /tmp/openshift/scc.yaml
- changed_when: false
-
-- name: Add security context constraint for {{ item }}
- lineinfile:
- dest: /tmp/openshift/scc.yaml
- line: "- system:serviceaccount:{{ openshift_serviceaccounts_namespace }}:{{ item.0 }}"
- insertafter: "^users:$"
- when: "item.1.rc == 0 and 'system:serviceaccount:{{ openshift_serviceaccounts_namespace }}:{{ item.0 }}' not in {{ (item.1.stdout | from_yaml).users }}"
- with_nested:
- - '{{ openshift_serviceaccounts_names }}'
- - '{{ scc_test.results }}'
-
-- name: Apply new scc rules for service accounts
- command: "{{ openshift.common.client_binary }} update -f /tmp/openshift/scc.yaml --api-version=v1"
diff --git a/roles/openshift_serviceaccounts/tasks/main.yml b/roles/openshift_serviceaccounts/tasks/main.yml
deleted file mode 100644
index 1d570fa5b..000000000
--- a/roles/openshift_serviceaccounts/tasks/main.yml
+++ /dev/null
@@ -1,28 +0,0 @@
----
-- name: create the service account
- oc_serviceaccount:
- name: "{{ item }}"
- namespace: "{{ openshift_serviceaccounts_namespace }}"
- state: present
- with_items:
- - "{{ openshift_serviceaccounts_names }}"
-
-- name: test if scc needs to be updated
- command: >
- {{ openshift.common.client_binary }} get scc {{ item }} -o yaml
- changed_when: false
- failed_when: false
- register: scc_test
- with_items: "{{ openshift_serviceaccounts_sccs }}"
-
-- name: Grant the user access to the appropriate scc
- command: >
- {{ openshift.common.client_binary }} adm policy add-scc-to-user
- {{ item.1.item }} system:serviceaccount:{{ openshift_serviceaccounts_namespace }}:{{ item.0 }}
- when: "openshift.common.version_gte_3_1_or_1_1 and item.1.rc == 0 and 'system:serviceaccount:{{ openshift_serviceaccounts_namespace }}:{{ item.0 }}' not in {{ (item.1.stdout | from_yaml).users | default([]) }}"
- with_nested:
- - "{{ openshift_serviceaccounts_names }}"
- - "{{ scc_test.results }}"
-
-- include: legacy_add_scc_to_user.yml
- when: not openshift.common.version_gte_3_1_or_1_1
diff --git a/roles/openshift_serviceaccounts/templates/serviceaccount.j2 b/roles/openshift_serviceaccounts/templates/serviceaccount.j2
deleted file mode 100644
index c5f12421f..000000000
--- a/roles/openshift_serviceaccounts/templates/serviceaccount.j2
+++ /dev/null
@@ -1,4 +0,0 @@
-apiVersion: v1
-kind: ServiceAccount
-metadata:
- name: {{ item.0 }}
diff --git a/setup.cfg b/setup.cfg
index e6bf2c5d1..f808fec5a 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -6,30 +6,3 @@ universal=1
[yamllint]
excludes=.tox,utils,files
-
-[lint]
-lint_disable=fixme,locally-disabled,file-ignored,duplicate-code
-
-[flake8]
-exclude=.tox/*,utils/*,inventory/*
-max_line_length = 120
-ignore = E501,T003
-
-[tool:pytest]
-norecursedirs =
- .*
- __pycache__
- cover
- docs
- # utils have its own config
- utils
-python_files =
- # TODO(rhcarvalho): rename test files to follow a single pattern. "test*.py"
- # is Python unittest's default, while pytest discovers both "test_*.py" and
- # "*_test.py" by default.
- test_*.py
- *_tests.py
-addopts =
- --cov=.
- --cov-report=term
- --cov-report=html
diff --git a/test-requirements.txt b/test-requirements.txt
index 9bb6e058c..805828e1c 100644
--- a/test-requirements.txt
+++ b/test-requirements.txt
@@ -1,11 +1,9 @@
-six
-pyOpenSSL
+# flake8 must be listed before pylint to avoid dependency conflicts
flake8
flake8-mutable
flake8-print
pylint
setuptools-lint
-PyYAML
yamllint
coverage
mock
diff --git a/tox.ini b/tox.ini
index 13c87f5c4..643fa774d 100644
--- a/tox.ini
+++ b/tox.ini
@@ -1,21 +1,24 @@
[tox]
minversion=2.3.1
envlist =
- py{27,35}-ansible22-{pylint,unit,flake8,yamllint,generate_validation}
+ py{27,35}-{flake8,pylint,unit}
+ py27-{yamllint,ansible_syntax,generate_validation}
skipsdist=True
skip_missing_interpreters=True
[testenv]
+skip_install=True
deps =
+ -rrequirements.txt
-rtest-requirements.txt
py35-flake8: flake8-bugbear
- ansible22: ansible~=2.2
commands =
+ unit: pip install -e utils
unit: pytest {posargs}
flake8: flake8 {posargs}
pylint: python setup.py lint
yamllint: python setup.py yamllint
generate_validation: python setup.py generate_validation
-
-
+ # TODO(rhcarvalho): check syntax of other important entrypoint playbooks
+ ansible_syntax: ansible-playbook --syntax-check playbooks/byo/config.yml
diff --git a/utils/.coveragerc b/utils/.coveragerc
deleted file mode 100644
index 551e13192..000000000
--- a/utils/.coveragerc
+++ /dev/null
@@ -1,18 +0,0 @@
-[run]
-branch = True
-omit =
- */lib/python*/site-packages/*
- */lib/python*/*
- /usr/*
- setup.py
- # TODO(rhcarvalho): this is used to ignore test files from coverage report.
- # We can make this less generic when we stick with a single test pattern in
- # the repo.
- test_*.py
- *_tests.py
-
-[report]
-fail_under = 73
-
-[html]
-directory = cover
diff --git a/utils/.pylintrc b/utils/.pylintrc
deleted file mode 120000
index 30b33b524..000000000
--- a/utils/.pylintrc
+++ /dev/null
@@ -1 +0,0 @@
-../.pylintrc \ No newline at end of file
diff --git a/utils/Makefile b/utils/Makefile
deleted file mode 100644
index 038c31fcf..000000000
--- a/utils/Makefile
+++ /dev/null
@@ -1,110 +0,0 @@
-########################################################
-
-# Makefile for OpenShift: Atomic Quick Installer
-#
-# useful targets (not all implemented yet!):
-# make clean -- Clean up garbage
-# make ci ------------------- Execute CI steps (for travis or jenkins)
-
-########################################################
-
-# > VARIABLE = value
-#
-# Normal setting of a variable - values within it are recursively
-# expanded when the variable is USED, not when it's declared.
-#
-# > VARIABLE := value
-#
-# Setting of a variable with simple expansion of the values inside -
-# values within it are expanded at DECLARATION time.
-
-########################################################
-
-
-NAME := oo-install
-VENV := $(NAME)env
-TESTPACKAGE := oo-install
-SHORTNAME := ooinstall
-
-# This doesn't evaluate until it's called. The -D argument is the
-# directory of the target file ($@), kinda like `dirname`.
-ASCII2MAN = a2x -D $(dir $@) -d manpage -f manpage $<
-MANPAGES := docs/man/man1/atomic-openshift-installer.1
-# slipped into the manpage template before a2x processing
-VERSION := 1.4
-
-# YAMLFILES: Skipping all '/files/' folders due to conflicting yaml file definitions
-YAMLFILES = $(shell find ../ -name $(VENV) -prune -o -name .tox -prune -o \( -name '*.yml' -o -name '*.yaml' \) ! -path "*/files/*" -print 2>&1)
-PYFILES = $(shell find ../ -name $(VENV) -prune -o -name ooinstall.egg-info -prune -o -name test -prune -o -name .tox -prune -o -name "*.py" -print)
-
-sdist: clean
- python setup.py sdist
- rm -fR $(SHORTNAME).egg-info
-
-clean:
- @find . -type f -regex ".*\.py[co]$$" -delete
- @find . -type f \( -name "*~" -or -name "#*" \) -delete
- @rm -fR build dist rpm-build MANIFEST htmlcov .coverage cover ooinstall.egg-info oo-install
- @rm -fR $(VENV)
- @rm -fR .tox
-
-# To force a rebuild of the docs run 'touch' on any *.in file under
-# docs/man/man1/
-docs: $(MANPAGES)
-
-# Regenerate %.1.asciidoc if %.1.asciidoc.in has been modified more
-# recently than %.1.asciidoc.
-%.1.asciidoc: %.1.asciidoc.in
- sed "s/%VERSION%/$(VERSION)/" $< > $@
-
-# Regenerate %.1 if %.1.asciidoc or VERSION has been modified more
-# recently than %.1. (Implicitly runs the %.1.asciidoc recipe)
-%.1: %.1.asciidoc
- $(ASCII2MAN)
-
-viewcover:
- xdg-open cover/index.html
-
-# Conditional virtualenv building strategy taken from this great post
-# by Marcel Hellkamp:
-# http://blog.bottlepy.org/2012/07/16/virtualenv-and-makefiles.html
-$(VENV): $(VENV)/bin/activate
-$(VENV)/bin/activate: test-requirements.txt
- @echo "#############################################"
- @echo "# Creating a virtualenv"
- @echo "#############################################"
- test -d $(VENV) || virtualenv $(VENV)
- . $(VENV)/bin/activate && pip install setuptools==17.1.1
- . $(VENV)/bin/activate && pip install -r test-requirements.txt
- touch $(VENV)/bin/activate
-# If there are any special things to install do it here
-# . $(VENV)/bin/activate && INSTALL STUFF
-
-ci-unittests: $(VENV)
- @echo "#############################################"
- @echo "# Running Unit Tests in virtualenv"
- @echo "#############################################"
- . $(VENV)/bin/activate && detox -e py27-unit,py35-unit
- @echo "VIEW CODE COVERAGE REPORT WITH 'xdg-open cover/index.html' or run 'make viewcover'"
-
-ci-pylint: $(VENV)
- @echo "#############################################"
- @echo "# Running PyLint Tests in virtualenv"
- @echo "#############################################"
- . $(VENV)/bin/activate && detox -e py27-pylint,py35-pylint
-
-ci-flake8: $(VENV)
- @echo "#############################################"
- @echo "# Running Flake8 Compliance Tests in virtualenv"
- @echo "#############################################"
- . $(VENV)/bin/activate && detox -e py27-flake8,py35-flake8
-
-ci-tox: $(VENV)
- . $(VENV)/bin/activate && detox
-
-ci: ci-tox
- @echo
- @echo "##################################################################################"
- @echo "VIEW CODE COVERAGE REPORT WITH 'xdg-open cover/index.html' or run 'make viewcover'"
- @echo "To clean your test environment run 'make clean'"
- @echo "Other targets you may run with 'make': 'ci-pylint', 'ci-tox', 'ci-unittests', 'ci-flake8'"
diff --git a/utils/README.md b/utils/README.md
index 7aa045ae4..79ea3fa9f 100644
--- a/utils/README.md
+++ b/utils/README.md
@@ -1,69 +1,14 @@
# Running Tests
-Run the command:
-
- make ci
-
-to run tests and linting tools.
-
-Underneath the covers, we use [tox](http://readthedocs.org/docs/tox/) to manage virtualenvs and run
-tests. Alternatively, tests can be run using [detox](https://pypi.python.org/pypi/detox/) which allows
-for running tests in parallel.
-
-```
-pip install tox detox
-```
-
-List the test environments available:
-
-```
-tox -l
-```
-
-Run all of the tests with:
-
-```
-tox
-```
-
-Run all of the tests in parallel with detox:
-
-```
-detox
-```
-
-Run a particular test environment:
-
-```
-tox -e py27-flake8
-```
-
-Run a particular test environment in a clean virtualenv:
-
-```
-tox -r -e py35-pylint
-```
-
-If you want to enter the virtualenv created by tox to do additional
-testing/debugging:
-
-```
-source .tox/py27-flake8/bin/activate
-```
-
-You will get errors if the log files already exist and can not be
-written to by the current user (`/tmp/ansible.log` and
-`/tmp/installer.txt`). *We're working on it.*
-
+All tests can be run by running `tox`. See [running tests](..//CONTRIBUTING.md#running-tests) for more information.
# Running From Source
You will need to setup a **virtualenv** to run from source:
$ virtualenv oo-install
- $ source ./oo-install/bin/activate
- $ virtualenv --relocatable ./oo-install/
- $ python setup.py install
+ $ source oo-install/bin/activate
+ $ python setup.py develop
The virtualenv `bin` directory should now be at the start of your
`$PATH`, and `oo-install` is ready to use from your shell.
diff --git a/utils/setup.cfg b/utils/setup.cfg
index d730cd3b4..79bc67848 100644
--- a/utils/setup.cfg
+++ b/utils/setup.cfg
@@ -3,30 +3,3 @@
# 3. If at all possible, it is good practice to do this. If you cannot, you
# will need to generate wheels for each Python version that you support.
universal=1
-
-[aliases]
-test=pytest
-
-[flake8]
-max-line-length=120
-exclude=test/*,setup.py,oo-installenv
-ignore=E501
-
-[lint]
-lint_disable=fixme,locally-disabled,file-ignored,duplicate-code
-
-[tool:pytest]
-testpaths = test
-norecursedirs =
- .*
- __pycache__
-python_files =
- # TODO(rhcarvalho): rename test files to follow a single pattern. "test*.py"
- # is Python unittest's default, while pytest discovers both "test_*.py" and
- # "*_test.py" by default.
- test_*.py
- *_tests.py
-addopts =
- --cov=.
- --cov-report=term
- --cov-report=html
diff --git a/utils/setup.py b/utils/setup.py
index 629d39206..6fec7b173 100644
--- a/utils/setup.py
+++ b/utils/setup.py
@@ -38,26 +38,15 @@ setup(
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
- #packages=find_packages(exclude=['contrib', 'docs', 'tests*']),
packages=['ooinstall'],
package_dir={'': 'src'},
-
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=['click', 'PyYAML', 'ansible'],
- # List additional groups of dependencies here (e.g. development
- # dependencies). You can install these using the following syntax,
- # for example:
- # $ pip install -e .[dev,test]
- #extras_require={
- # 'dev': ['check-manifest'],
- # 'test': ['coverage'],
- #},
-
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
diff --git a/utils/src/ooinstall/ansible_plugins/facts_callback.py b/utils/src/ooinstall/ansible_plugins/facts_callback.py
index e51890a22..c881e4b92 100644
--- a/utils/src/ooinstall/ansible_plugins/facts_callback.py
+++ b/utils/src/ooinstall/ansible_plugins/facts_callback.py
@@ -5,6 +5,7 @@
import os
import yaml
from ansible.plugins.callback import CallbackBase
+from ansible.parsing.yaml.dumper import AnsibleDumper
# pylint: disable=super-init-not-called
@@ -38,7 +39,11 @@ class CallbackModule(CallbackBase):
facts = abridged_result['result']['ansible_facts']['openshift']
hosts_yaml = {}
hosts_yaml[res._host.get_name()] = facts
- os.write(self.hosts_yaml, yaml.safe_dump(hosts_yaml))
+ to_dump = yaml.dump(hosts_yaml,
+ allow_unicode=True,
+ default_flow_style=False,
+ Dumper=AnsibleDumper)
+ os.write(self.hosts_yaml, to_dump)
def v2_runner_on_skipped(self, res):
pass
diff --git a/utils/test-requirements.txt b/utils/test-requirements.txt
deleted file mode 100644
index b26e22a7e..000000000
--- a/utils/test-requirements.txt
+++ /dev/null
@@ -1,15 +0,0 @@
-ansible
-# flake8 moved to before setuptools-lint to satisfy mccabe dependency issue
-flake8
-setuptools-lint
-coverage
-mock
-PyYAML
-click
-backports.functools_lru_cache
-pyOpenSSL
-yamllint
-tox
-detox
-pytest
-pytest-cov
diff --git a/utils/test/cli_installer_tests.py b/utils/test/cli_installer_tests.py
index 0cb37eaff..673997c42 100644
--- a/utils/test/cli_installer_tests.py
+++ b/utils/test/cli_installer_tests.py
@@ -409,8 +409,7 @@ class UnattendedCliTests(OOCliFixture):
result = self.runner.invoke(cli.cli, self.cli_args)
if result.exception is None or result.exit_code != 1:
- print("Exit code: %s" % result.exit_code)
- self.fail("Unexpected CLI return")
+ self.fail("Unexpected CLI return. Exit code: %s" % result.exit_code)
# unattended with config file and all installed hosts (with --force)
@patch('ooinstall.openshift_ansible.run_main_playbook')
@@ -600,97 +599,6 @@ class UnattendedCliTests(OOCliFixture):
self.assertEquals('openshift-enterprise',
inventory.get('OSEv3:vars', 'deployment_type'))
- # 2016-09-26 - tbielawa - COMMENTING OUT these tests FOR NOW while
- # we wait to see if anyone notices that we took away their ability
- # to set the ansible_config parameter in the command line options
- # and in the installer config file.
- #
- # We have removed the ability to set the ansible config file
- # manually so that our new quieter output mode is the default and
- # only output mode.
- #
- # RE: https://trello.com/c/DSwwizwP - atomic-openshift-install
- # should only output relevant information.
-
- # @patch('ooinstall.openshift_ansible.run_ansible')
- # @patch('ooinstall.openshift_ansible.load_system_facts')
- # def test_no_ansible_config_specified(self, load_facts_mock, run_ansible_mock):
- # load_facts_mock.return_value = (MOCK_FACTS, 0)
- # run_ansible_mock.return_value = 0
-
- # config = SAMPLE_CONFIG % 'openshift-enterprise'
-
- # self._ansible_config_test(load_facts_mock, run_ansible_mock,
- # config, None, None)
-
- # @patch('ooinstall.openshift_ansible.run_ansible')
- # @patch('ooinstall.openshift_ansible.load_system_facts')
- # def test_ansible_config_specified_cli(self, load_facts_mock, run_ansible_mock):
- # load_facts_mock.return_value = (MOCK_FACTS, 0)
- # run_ansible_mock.return_value = 0
-
- # config = SAMPLE_CONFIG % 'openshift-enterprise'
- # ansible_config = os.path.join(self.work_dir, 'ansible.cfg')
-
- # self._ansible_config_test(load_facts_mock, run_ansible_mock,
- # config, ansible_config, ansible_config)
-
- # @patch('ooinstall.openshift_ansible.run_ansible')
- # @patch('ooinstall.openshift_ansible.load_system_facts')
- # def test_ansible_config_specified_in_installer_config(self,
- # load_facts_mock, run_ansible_mock):
-
- # load_facts_mock.return_value = (MOCK_FACTS, 0)
- # run_ansible_mock.return_value = 0
-
- # ansible_config = os.path.join(self.work_dir, 'ansible.cfg')
- # config = SAMPLE_CONFIG % 'openshift-enterprise'
- # config = "%s\nansible_config: %s" % (config, ansible_config)
- # self._ansible_config_test(load_facts_mock, run_ansible_mock,
- # config, None, ansible_config)
-
- # #pylint: disable=too-many-arguments
- # # This method allows for drastically simpler tests to write, and the args
- # # are all useful.
- # def _ansible_config_test(self, load_facts_mock, run_ansible_mock,
- # installer_config, ansible_config_cli=None, expected_result=None):
- # """
- # Utility method for testing the ways you can specify the ansible config.
- # """
-
- # load_facts_mock.return_value = (MOCK_FACTS, 0)
- # run_ansible_mock.return_value = 0
-
- # config_file = self.write_config(os.path.join(self.work_dir,
- # 'ooinstall.conf'), installer_config)
-
- # self.cli_args.extend(["-c", config_file])
- # if ansible_config_cli:
- # self.cli_args.extend(["--ansible-config", ansible_config_cli])
- # self.cli_args.append("install")
- # result = self.runner.invoke(cli.cli, self.cli_args)
- # self.assert_result(result, 0)
-
- # # Test the env vars for facts playbook:
- # facts_env_vars = load_facts_mock.call_args[0][2]
- # if expected_result:
- # self.assertEquals(expected_result, facts_env_vars['ANSIBLE_CONFIG'])
- # else:
- # # If user running test has rpm installed, this might be set to default:
- # self.assertTrue('ANSIBLE_CONFIG' not in facts_env_vars or
- # facts_env_vars['ANSIBLE_CONFIG'] == cli.DEFAULT_ANSIBLE_CONFIG)
-
- # # Test the env vars for main playbook:
- # env_vars = run_ansible_mock.call_args[0][2]
- # if expected_result:
- # self.assertEquals(expected_result, env_vars['ANSIBLE_CONFIG'])
- # else:
- # # If user running test has rpm installed, this might be set to default:
- # #
- # # By default we will use the quiet config
- # self.assertTrue('ANSIBLE_CONFIG' not in env_vars or
- # env_vars['ANSIBLE_CONFIG'] == cli.QUIET_ANSIBLE_CONFIG)
-
# unattended with bad config file and no installed hosts (without --force)
@patch('ooinstall.openshift_ansible.run_main_playbook')
@patch('ooinstall.openshift_ansible.load_system_facts')
@@ -1011,13 +919,7 @@ class AttendedCliTests(OOCliFixture):
full_line = "%s=%s" % (a, b)
tokens = full_line.split()
if tokens[0] == host:
- found = False
- for token in tokens:
- if token == variable:
- found = True
- continue
- self.assertTrue("Unable to find %s in line: %s" %
- (variable, full_line), found)
+ self.assertTrue(variable in tokens[1:], "Unable to find %s in line: %s" % (variable, full_line))
return
self.fail("unable to find host %s in inventory" % host)
diff --git a/utils/test/fixture.py b/utils/test/fixture.py
index 5200d275d..873ac4a27 100644
--- a/utils/test/fixture.py
+++ b/utils/test/fixture.py
@@ -65,14 +65,13 @@ class OOCliFixture(OOInstallFixture):
def assert_result(self, result, exit_code):
if result.exit_code != exit_code:
- print("Unexpected result from CLI execution")
- print("Exit code: %s" % result.exit_code)
- print("Exception: %s" % result.exception)
- print(result.exc_info)
+ msg = ["Unexpected result from CLI execution\n"]
+ msg.append("Exit code: %s\n" % result.exit_code)
+ msg.append("Exception: %s\n" % result.exception)
import traceback
- traceback.print_exception(*result.exc_info)
- print("Output:\n%s" % result.output)
- self.fail("Exception during CLI execution")
+ msg.extend(traceback.format_exception(*result.exc_info))
+ msg.append("Output:\n%s" % result.output)
+ self.fail("".join(msg))
def _verify_load_facts(self, load_facts_mock):
""" Check that we ran load facts with expected inputs. """
diff --git a/utils/test/oo_config_tests.py b/utils/test/oo_config_tests.py
index 2b4fce512..5651e6e7a 100644
--- a/utils/test/oo_config_tests.py
+++ b/utils/test/oo_config_tests.py
@@ -266,42 +266,3 @@ class HostTests(OOInstallFixture):
self.assertIn(node_labels_expected, legacy_inventory_line)
# An unquoted version is not present
self.assertNotIn(node_labels_bad, legacy_inventory_line)
-
-
- # def test_new_write_inventory_same_as_legacy(self):
- # """Verify the original write_host function produces the same output as the new method"""
- # yaml_props = {
- # 'ip': '192.168.0.1',
- # 'hostname': 'a.example.com',
- # 'connect_to': 'a-private.example.com',
- # 'public_ip': '192.168.0.1',
- # 'public_hostname': 'a.example.com',
- # 'new_host': True,
- # 'roles': ['node'],
- # 'other_variables': {
- # 'zzz': 'last',
- # 'foo': 'bar',
- # 'aaa': 'first',
- # },
- # }
-
- # new_node = Host(**yaml_props)
- # inventory = cStringIO()
-
- # # This is what the original 'write_host' function will
- # # generate. write_host has no return value, it just writes
- # # directly to the file 'inventory' which in this test-case is
- # # a StringIO object
- # ooinstall.openshift_ansible.write_host(
- # new_node,
- # 'node',
- # inventory,
- # schedulable=True)
- # legacy_inventory_line = inventory.getvalue()
-
- # # This is what the new method in the Host class generates
- # new_inventory_line = new_node.inventory_string('node', schedulable=True)
-
- # self.assertEqual(
- # legacy_inventory_line,
- # new_inventory_line)
diff --git a/utils/test/openshift_ansible_tests.py b/utils/test/openshift_ansible_tests.py
index 5847fe37b..02a9754db 100644
--- a/utils/test/openshift_ansible_tests.py
+++ b/utils/test/openshift_ansible_tests.py
@@ -2,7 +2,6 @@ import os
import unittest
import tempfile
import shutil
-import yaml
from six.moves import configparser
@@ -40,17 +39,10 @@ class TestOpenShiftAnsible(unittest.TestCase):
def tearDown(self):
shutil.rmtree(self.work_dir)
- def generate_hosts(self, num_hosts, name_prefix, roles=None, new_host=False):
- hosts = []
- for num in range(1, num_hosts + 1):
- hosts.append(Host(connect_to=name_prefix + str(num),
- roles=roles, new_host=new_host))
- return hosts
-
def test_generate_inventory_new_nodes(self):
- hosts = self.generate_hosts(1, 'master', roles=(['master', 'etcd']))
- hosts.extend(self.generate_hosts(1, 'node', roles=['node']))
- hosts.extend(self.generate_hosts(1, 'new_node', roles=['node'], new_host=True))
+ hosts = generate_hosts(1, 'master', roles=(['master', 'etcd']))
+ hosts.extend(generate_hosts(1, 'node', roles=['node']))
+ hosts.extend(generate_hosts(1, 'new_node', roles=['node'], new_host=True))
openshift_ansible.generate_inventory(hosts)
inventory = configparser.ConfigParser(allow_no_value=True)
inventory.read(self.inventory)
@@ -59,8 +51,8 @@ class TestOpenShiftAnsible(unittest.TestCase):
def test_write_inventory_vars_role_vars(self):
with open(self.inventory, 'w') as inv:
- openshift_ansible.CFG.deployment.roles['master'].variables={'color': 'blue'}
- openshift_ansible.CFG.deployment.roles['node'].variables={'color': 'green'}
+ openshift_ansible.CFG.deployment.roles['master'].variables = {'color': 'blue'}
+ openshift_ansible.CFG.deployment.roles['node'].variables = {'color': 'green'}
openshift_ansible.write_inventory_vars(inv, None)
inventory = configparser.ConfigParser(allow_no_value=True)
@@ -69,3 +61,11 @@ class TestOpenShiftAnsible(unittest.TestCase):
self.assertEquals('blue', inventory.get('masters:vars', 'color'))
self.assertTrue(inventory.has_section('nodes:vars'))
self.assertEquals('green', inventory.get('nodes:vars', 'color'))
+
+
+def generate_hosts(num_hosts, name_prefix, roles=None, new_host=False):
+ hosts = []
+ for num in range(1, num_hosts + 1):
+ hosts.append(Host(connect_to=name_prefix + str(num),
+ roles=roles, new_host=new_host))
+ return hosts
diff --git a/utils/test/test_utils.py b/utils/test/test_utils.py
index cbce64f7e..cabeaee34 100644
--- a/utils/test/test_utils.py
+++ b/utils/test/test_utils.py
@@ -2,14 +2,14 @@
Unittests for ooinstall utils.
"""
-import six
import unittest
-import logging
-import sys
import copy
-from ooinstall.utils import debug_env, is_valid_hostname
import mock
+import six
+
+from ooinstall.utils import debug_env, is_valid_hostname
+
class TestUtils(unittest.TestCase):
"""
diff --git a/utils/tox.ini b/utils/tox.ini
deleted file mode 100644
index 2524923cb..000000000
--- a/utils/tox.ini
+++ /dev/null
@@ -1,19 +0,0 @@
-[tox]
-minversion=2.3.1
-envlist =
- py{27,35}-{flake8,unit,pylint}
-skipsdist=True
-skip_missing_interpreters=True
-
-[testenv]
-usedevelop=True
-deps =
- -rtest-requirements.txt
- py35-flake8: flake8-bugbear
-commands =
- # Needed to make detox work, since it ignores usedevelop
- # https://github.com/tox-dev/tox/issues/180
- unit: pip install -e .
- unit: pytest {posargs}
- flake8: python setup.py flake8
- pylint: python setup.py lint