summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.papr.inventory3
-rw-r--r--.tito/packages/openshift-ansible2
-rw-r--r--DEPLOYMENT_TYPES.md2
-rw-r--r--README.md2
-rw-r--r--docs/proposals/README.md27
-rw-r--r--docs/proposals/proposal_template.md30
-rw-r--r--filter_plugins/oo_filters.py15
-rw-r--r--filter_plugins/openshift_node.py13
-rw-r--r--images/installer/README_INVENTORY_GENERATOR.md85
-rw-r--r--images/installer/root/etc/inventory-generator-config.yaml20
-rw-r--r--images/installer/root/exports/config.json.template2
-rwxr-xr-ximages/installer/root/usr/local/bin/generate397
-rwxr-xr-ximages/installer/root/usr/local/bin/run7
-rw-r--r--inventory/byo/hosts.origin.example29
-rw-r--r--inventory/byo/hosts.ose.example28
-rw-r--r--openshift-ansible.spec43
-rw-r--r--playbooks/adhoc/uninstall.yml2
-rw-r--r--playbooks/aws/README.md220
-rwxr-xr-xplaybooks/aws/openshift-cluster/accept.yml19
-rw-r--r--playbooks/aws/openshift-cluster/build_ami.yml146
-rw-r--r--playbooks/aws/openshift-cluster/build_node_group.yml47
-rw-r--r--playbooks/aws/openshift-cluster/install.yml74
-rw-r--r--playbooks/aws/openshift-cluster/provision.yml158
-rw-r--r--playbooks/aws/openshift-cluster/provision_install.yml16
-rw-r--r--playbooks/aws/openshift-cluster/provision_nodes.yml49
-rw-r--r--playbooks/aws/openshift-cluster/provisioning_vars.example.yml28
-rw-r--r--playbooks/aws/openshift-cluster/vars.yml113
l---------playbooks/byo/openshift-checks/roles1
-rw-r--r--playbooks/byo/openshift-cluster/config.yml2
-rw-r--r--playbooks/byo/openshift-cluster/openshift-logging.yml3
-rw-r--r--playbooks/byo/openshift-cluster/openshift-prometheus.yml4
-rw-r--r--playbooks/byo/openshift-cluster/service-catalog.yml3
-rw-r--r--playbooks/byo/openshift-loadbalancer/config.yml6
-rw-r--r--playbooks/byo/openshift-master/additional_config.yml6
-rw-r--r--playbooks/byo/openshift-master/scaleup.yml7
-rw-r--r--playbooks/byo/openshift-nfs/config.yml6
-rw-r--r--playbooks/common/openshift-cluster/config.yml11
-rw-r--r--playbooks/common/openshift-cluster/enable_dnsmasq.yml6
-rw-r--r--playbooks/common/openshift-cluster/evaluate_groups.yml12
-rw-r--r--playbooks/common/openshift-cluster/initialize_facts.yml4
-rw-r--r--playbooks/common/openshift-cluster/initialize_openshift_version.yml13
-rw-r--r--playbooks/common/openshift-cluster/openshift_hosted.yml4
-rw-r--r--playbooks/common/openshift-cluster/openshift_prometheus.yml9
-rw-r--r--playbooks/common/openshift-cluster/sanity_checks.yml51
-rw-r--r--playbooks/common/openshift-cluster/std_include.yml4
-rw-r--r--playbooks/common/openshift-cluster/tasks/set_etcd_launch_facts.yml2
-rw-r--r--playbooks/common/openshift-cluster/tasks/set_master_launch_facts.yml2
-rw-r--r--playbooks/common/openshift-cluster/tasks/set_node_launch_facts.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml4
-rw-r--r--playbooks/common/openshift-cluster/upgrades/etcd/backup.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/init.yml1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/tasks/verify_docker_upgrade_targets.yml6
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml18
-rw-r--r--playbooks/common/openshift-etcd/migrate.yml6
-rw-r--r--playbooks/common/openshift-etcd/scaleup.yml19
-rw-r--r--playbooks/common/openshift-master/additional_config.yml6
-rw-r--r--playbooks/common/openshift-master/config.yml44
-rw-r--r--playbooks/common/openshift-master/restart.yml2
-rw-r--r--playbooks/common/openshift-nfs/config.yml2
-rw-r--r--playbooks/common/openshift-node/config.yml9
-rw-r--r--playbooks/gcp/openshift-cluster/provision.yml19
-rw-r--r--roles/docker/defaults/main.yml5
-rw-r--r--roles/docker/tasks/package_docker.yml14
-rw-r--r--roles/docker/tasks/systemcontainer_crio.yml21
-rw-r--r--roles/docker/tasks/systemcontainer_docker.yml20
-rw-r--r--roles/docker/templates/crio.conf.j25
-rw-r--r--roles/etcd_common/defaults/main.yml2
-rw-r--r--roles/etcd_common/tasks/backup.yml5
-rw-r--r--roles/etcd_common/tasks/main.yml2
-rw-r--r--roles/etcd_common/tasks/noop.yml4
-rw-r--r--roles/flannel_register/defaults/main.yaml5
-rw-r--r--roles/flannel_register/templates/flannel-config.json1
-rw-r--r--roles/lib_utils/library/repoquery.py18
-rw-r--r--roles/lib_utils/library/yedit.py1
-rw-r--r--roles/lib_utils/src/ansible/repoquery.py17
-rw-r--r--roles/lib_utils/src/lib/import.py1
-rw-r--r--roles/openshift_aws/README.md84
-rw-r--r--roles/openshift_aws/defaults/main.yml209
-rw-r--r--roles/openshift_aws/filter_plugins/filters.py28
-rw-r--r--roles/openshift_aws/meta/main.yml3
-rw-r--r--roles/openshift_aws/tasks/ami_copy.yml34
-rw-r--r--roles/openshift_aws/tasks/build_ami.yml48
-rw-r--r--roles/openshift_aws/tasks/build_node_group.yml34
-rw-r--r--roles/openshift_aws/tasks/elb.yml68
-rw-r--r--roles/openshift_aws/tasks/iam_cert.yml29
-rw-r--r--roles/openshift_aws/tasks/launch_config.yml45
-rw-r--r--roles/openshift_aws/tasks/provision.yml54
-rw-r--r--roles/openshift_aws/tasks/provision_nodes.yml66
-rw-r--r--roles/openshift_aws/tasks/s3.yml7
-rw-r--r--roles/openshift_aws/tasks/scale_group.yml32
-rw-r--r--roles/openshift_aws/tasks/seal_ami.yml49
-rw-r--r--roles/openshift_aws/tasks/security_group.yml45
-rw-r--r--roles/openshift_aws/tasks/ssh_keys.yml (renamed from roles/openshift_aws_ssh_keys/tasks/main.yml)4
-rw-r--r--roles/openshift_aws/tasks/vpc.yml (renamed from roles/openshift_aws_vpc/tasks/main.yml)21
-rw-r--r--roles/openshift_aws_ami_copy/README.md50
-rw-r--r--roles/openshift_aws_ami_copy/tasks/main.yml26
-rw-r--r--roles/openshift_aws_elb/README.md75
-rw-r--r--roles/openshift_aws_elb/defaults/main.yml33
-rw-r--r--roles/openshift_aws_elb/meta/main.yml12
-rw-r--r--roles/openshift_aws_elb/tasks/main.yml57
-rw-r--r--roles/openshift_aws_iam_kms/README.md43
-rw-r--r--roles/openshift_aws_iam_kms/defaults/main.yml1
-rw-r--r--roles/openshift_aws_iam_kms/meta/main.yml13
-rw-r--r--roles/openshift_aws_iam_kms/tasks/main.yml18
-rw-r--r--roles/openshift_aws_launch_config/README.md72
-rw-r--r--roles/openshift_aws_launch_config/defaults/main.yml1
-rw-r--r--roles/openshift_aws_launch_config/meta/main.yml12
-rw-r--r--roles/openshift_aws_launch_config/tasks/main.yml50
-rw-r--r--roles/openshift_aws_launch_config/templates/cloud-init.j29
-rw-r--r--roles/openshift_aws_node_group/README.md77
-rw-r--r--roles/openshift_aws_node_group/defaults/main.yml58
-rw-r--r--roles/openshift_aws_node_group/tasks/main.yml32
-rw-r--r--roles/openshift_aws_s3/README.md43
-rw-r--r--roles/openshift_aws_s3/tasks/main.yml6
-rw-r--r--roles/openshift_aws_sg/README.md59
-rw-r--r--roles/openshift_aws_sg/defaults/main.yml48
-rw-r--r--roles/openshift_aws_sg/tasks/main.yml53
-rw-r--r--roles/openshift_aws_ssh_keys/README.md49
-rw-r--r--roles/openshift_aws_vpc/README.md62
-rw-r--r--roles/openshift_aws_vpc/defaults/main.yml1
-rw-r--r--roles/openshift_cfme/defaults/main.yml3
-rw-r--r--roles/openshift_cfme/meta/main.yml1
-rw-r--r--roles/openshift_cfme/tasks/nfs.yml7
-rw-r--r--roles/openshift_cli/meta/main.yml1
-rw-r--r--roles/openshift_common/README.md45
-rw-r--r--roles/openshift_common/defaults/main.yml3
-rw-r--r--roles/openshift_common/meta/main.yml15
-rw-r--r--roles/openshift_common/tasks/main.yml78
-rw-r--r--roles/openshift_examples/meta/main.yml3
-rwxr-xr-xroles/openshift_facts/library/openshift_facts.py126
-rw-r--r--roles/openshift_gcp/tasks/main.yaml43
-rw-r--r--roles/openshift_gcp/templates/dns.j2.sh13
-rw-r--r--roles/openshift_gcp/templates/provision.j2.sh318
-rw-r--r--roles/openshift_gcp/templates/remove.j2.sh156
-rw-r--r--roles/openshift_gcp_image_prep/files/partition.conf3
-rw-r--r--roles/openshift_gcp_image_prep/tasks/main.yaml18
-rw-r--r--roles/openshift_health_checker/action_plugins/openshift_health_check.py160
-rw-r--r--roles/openshift_health_checker/callback_plugins/zz_failure_summary.py16
-rw-r--r--roles/openshift_health_checker/library/aos_version.py31
-rw-r--r--roles/openshift_health_checker/openshift_checks/__init__.py134
-rw-r--r--roles/openshift_health_checker/openshift_checks/disk_availability.py9
-rw-r--r--roles/openshift_health_checker/openshift_checks/docker_image_availability.py87
-rw-r--r--roles/openshift_health_checker/openshift_checks/logging/elasticsearch.py8
-rw-r--r--roles/openshift_health_checker/openshift_checks/logging/logging.py4
-rw-r--r--roles/openshift_health_checker/openshift_checks/logging/logging_index_time.py2
-rw-r--r--roles/openshift_health_checker/openshift_checks/mixins.py3
-rw-r--r--roles/openshift_health_checker/openshift_checks/package_availability.py2
-rw-r--r--roles/openshift_health_checker/openshift_checks/package_update.py2
-rw-r--r--roles/openshift_health_checker/openshift_checks/package_version.py3
-rw-r--r--roles/openshift_health_checker/test/action_plugin_test.py134
-rw-r--r--roles/openshift_health_checker/test/disk_availability_test.py13
-rw-r--r--roles/openshift_health_checker/test/docker_image_availability_test.py187
-rw-r--r--roles/openshift_health_checker/test/elasticsearch_test.py18
-rw-r--r--roles/openshift_health_checker/test/logging_index_time_test.py8
-rw-r--r--roles/openshift_health_checker/test/openshift_check_test.py43
-rw-r--r--roles/openshift_health_checker/test/ovs_version_test.py2
-rw-r--r--roles/openshift_health_checker/test/package_availability_test.py6
-rw-r--r--roles/openshift_health_checker/test/package_update_test.py6
-rw-r--r--roles/openshift_health_checker/test/package_version_test.py10
-rw-r--r--roles/openshift_health_checker/test/zz_failure_summary_test.py15
-rw-r--r--roles/openshift_hosted/README.md1
-rw-r--r--roles/openshift_hosted/defaults/main.yml10
-rw-r--r--roles/openshift_hosted/tasks/registry/registry.yml10
-rw-r--r--roles/openshift_hosted/tasks/registry/secure.yml2
-rw-r--r--roles/openshift_hosted/tasks/router/router.yml20
-rw-r--r--roles/openshift_hosted_logging/meta/main.yaml1
-rw-r--r--roles/openshift_hosted_templates/files/v1.3/enterprise/registry-console.yaml2
-rw-r--r--roles/openshift_hosted_templates/files/v1.3/origin/registry-console.yaml2
-rw-r--r--roles/openshift_hosted_templates/files/v1.4/enterprise/registry-console.yaml2
-rw-r--r--roles/openshift_hosted_templates/files/v1.4/origin/registry-console.yaml2
-rw-r--r--roles/openshift_hosted_templates/files/v1.5/enterprise/registry-console.yaml2
-rw-r--r--roles/openshift_hosted_templates/files/v1.5/origin/registry-console.yaml2
-rw-r--r--roles/openshift_hosted_templates/files/v3.6/enterprise/registry-console.yaml2
-rw-r--r--roles/openshift_hosted_templates/files/v3.6/origin/registry-console.yaml2
-rw-r--r--roles/openshift_hosted_templates/files/v3.7/enterprise/registry-console.yaml2
-rw-r--r--roles/openshift_hosted_templates/files/v3.7/origin/registry-console.yaml2
-rw-r--r--roles/openshift_hosted_templates/meta/main.yml3
-rw-r--r--roles/openshift_loadbalancer/defaults/main.yml8
-rw-r--r--roles/openshift_logging/tasks/install_logging.yaml2
-rw-r--r--roles/openshift_logging_curator/templates/curator.j22
-rw-r--r--roles/openshift_logging_curator/vars/main.yml4
-rw-r--r--roles/openshift_logging_elasticsearch/templates/elasticsearch.yml.j23
-rw-r--r--roles/openshift_logging_elasticsearch/templates/es.j215
-rw-r--r--roles/openshift_logging_elasticsearch/vars/main.yml4
-rw-r--r--roles/openshift_logging_fluentd/templates/fluentd.j22
-rw-r--r--roles/openshift_logging_fluentd/vars/main.yml4
-rw-r--r--roles/openshift_logging_kibana/templates/kibana.j24
-rw-r--r--roles/openshift_logging_kibana/vars/main.yml4
-rw-r--r--roles/openshift_logging_mux/templates/mux.j22
-rw-r--r--roles/openshift_logging_mux/vars/main.yml4
-rw-r--r--roles/openshift_manageiq/vars/main.yml3
-rw-r--r--roles/openshift_master/README.md9
-rw-r--r--roles/openshift_master/defaults/main.yml21
-rw-r--r--roles/openshift_master/meta/main.yml16
-rw-r--r--roles/openshift_master/tasks/main.yml46
-rw-r--r--roles/openshift_master/tasks/registry_auth.yml27
-rw-r--r--roles/openshift_master/tasks/systemd_units.yml9
-rw-r--r--roles/openshift_master/tasks/update_etcd_client_urls.yml8
-rw-r--r--roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j214
-rw-r--r--roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j214
-rw-r--r--roles/openshift_master/templates/master.yaml.v1.j26
-rw-r--r--roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.service.j22
-rw-r--r--roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.service.j22
-rw-r--r--roles/openshift_master/vars/main.yml19
-rw-r--r--roles/openshift_master_facts/filter_plugins/openshift_master.py5
-rw-r--r--roles/openshift_metrics/tasks/pre_install.yaml2
-rw-r--r--roles/openshift_node/README.md10
-rw-r--r--roles/openshift_node/defaults/main.yml32
-rw-r--r--roles/openshift_node/handlers/main.yml5
-rw-r--r--roles/openshift_node/meta/main.yml2
-rw-r--r--roles/openshift_node/tasks/bootstrap.yml33
-rw-r--r--roles/openshift_node/tasks/config.yml18
-rw-r--r--roles/openshift_node/tasks/install.yml6
-rw-r--r--roles/openshift_node/tasks/main.yml39
-rw-r--r--roles/openshift_node/tasks/registry_auth.yml25
-rw-r--r--roles/openshift_node/tasks/systemd_units.yml4
-rw-r--r--roles/openshift_node/templates/node.yaml.v1.j212
-rw-r--r--roles/openshift_node/templates/openshift.docker.node.service19
-rw-r--r--roles/openshift_node_dnsmasq/meta/main.yml1
-rw-r--r--roles/openshift_node_upgrade/README.md4
-rw-r--r--roles/openshift_node_upgrade/defaults/main.yml5
-rw-r--r--roles/openshift_node_upgrade/handlers/main.yml2
-rw-r--r--roles/openshift_node_upgrade/meta/main.yml1
-rw-r--r--roles/openshift_node_upgrade/tasks/main.yml4
-rw-r--r--roles/openshift_node_upgrade/tasks/systemd_units.yml6
-rw-r--r--roles/openshift_node_upgrade/templates/openshift.docker.node.service4
-rw-r--r--roles/openshift_persistent_volumes/README.md7
-rw-r--r--roles/openshift_persistent_volumes/meta/main.yml1
-rw-r--r--roles/openshift_prometheus/README.md95
-rw-r--r--roles/openshift_prometheus/defaults/main.yaml74
-rw-r--r--roles/openshift_prometheus/files/openshift_prometheus.exports3
-rw-r--r--roles/openshift_prometheus/meta/main.yaml19
-rw-r--r--roles/openshift_prometheus/tasks/create_pvs.yaml36
-rw-r--r--roles/openshift_prometheus/tasks/install_prometheus.yaml244
-rw-r--r--roles/openshift_prometheus/tasks/main.yaml26
-rw-r--r--roles/openshift_prometheus/tasks/nfs.yaml44
-rw-r--r--roles/openshift_prometheus/templates/alertmanager.yml.j220
-rw-r--r--roles/openshift_prometheus/templates/prom-pv-alertbuffer.yml.j215
-rw-r--r--roles/openshift_prometheus/templates/prom-pv-alertmanager.yml.j215
-rw-r--r--roles/openshift_prometheus/templates/prom-pv-server.yml.j215
-rw-r--r--roles/openshift_prometheus/templates/prometheus.rules.j24
-rw-r--r--roles/openshift_prometheus/templates/prometheus.yml.j2174
-rw-r--r--roles/openshift_prometheus/templates/prometheus_deployment.j2240
-rw-r--r--roles/openshift_prometheus/tests/inventory2
-rw-r--r--roles/openshift_prometheus/tests/test.yaml5
-rw-r--r--roles/openshift_sanitize_inventory/tasks/main.yml21
-rw-r--r--roles/openshift_service_catalog/defaults/main.yml4
-rw-r--r--roles/openshift_service_catalog/files/openshift-ansible-catalog-console.js1
-rw-r--r--roles/openshift_service_catalog/tasks/install.yml2
-rw-r--r--roles/openshift_storage_glusterfs/README.md5
-rw-r--r--roles/openshift_storage_glusterfs/defaults/main.yml6
-rw-r--r--roles/openshift_storage_glusterfs/files/v3.7/deploy-heketi-template.yml143
-rw-r--r--roles/openshift_storage_glusterfs/files/v3.7/glusterfs-template.yml136
-rw-r--r--roles/openshift_storage_glusterfs/files/v3.7/heketi-template.yml134
-rw-r--r--roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml1
-rw-r--r--roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml1
-rw-r--r--roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml1
-rw-r--r--roles/openshift_storage_glusterfs/templates/v3.7/glusterfs-registry-endpoints.yml.j212
-rw-r--r--roles/openshift_storage_glusterfs/templates/v3.7/glusterfs-registry-service.yml.j210
-rw-r--r--roles/openshift_storage_glusterfs/templates/v3.7/glusterfs-storageclass.yml.j213
-rw-r--r--roles/openshift_storage_glusterfs/templates/v3.7/heketi-endpoints.yml.j212
-rw-r--r--roles/openshift_storage_glusterfs/templates/v3.7/heketi-service.yml.j210
-rw-r--r--roles/openshift_storage_glusterfs/templates/v3.7/heketi.json.j236
-rw-r--r--roles/openshift_storage_glusterfs/templates/v3.7/topology.json.j249
-rw-r--r--roles/openshift_version/defaults/main.yml1
-rw-r--r--roles/openshift_version/tasks/main.yml10
-rw-r--r--roles/os_firewall/tasks/iptables.yml2
-rw-r--r--roles/rhel_subscribe/tasks/main.yml6
-rw-r--r--setup.py101
-rw-r--r--test/integration/openshift_health_checker/common.go2
271 files changed, 5424 insertions, 2445 deletions
diff --git a/.papr.inventory b/.papr.inventory
index 878d434e2..aa4324c21 100644
--- a/.papr.inventory
+++ b/.papr.inventory
@@ -11,6 +11,9 @@ openshift_image_tag="{{ lookup('env', 'OPENSHIFT_IMAGE_TAG') }}"
openshift_master_default_subdomain="{{ lookup('env', 'RHCI_ocp_node1_IP') }}.xip.io"
openshift_check_min_host_disk_gb=1.5
openshift_check_min_host_memory_gb=1.9
+osm_cluster_network_cidr=10.128.0.0/14
+openshift_portal_net=172.30.0.0/16
+osm_host_subnet_length=9
[masters]
ocp-master
diff --git a/.tito/packages/openshift-ansible b/.tito/packages/openshift-ansible
index a7076c210..9a5acc500 100644
--- a/.tito/packages/openshift-ansible
+++ b/.tito/packages/openshift-ansible
@@ -1 +1 @@
-3.7.0-0.125.0 ./
+3.7.0-0.126.0 ./
diff --git a/DEPLOYMENT_TYPES.md b/DEPLOYMENT_TYPES.md
index 009a1d95c..e52e47202 100644
--- a/DEPLOYMENT_TYPES.md
+++ b/DEPLOYMENT_TYPES.md
@@ -12,6 +12,6 @@ The table below outlines the defaults per `openshift_deployment_type`:
|-----------------------------------------------------------------|------------------------------------------|----------------------------------------|
| **openshift.common.service_type** (also used for package names) | origin | atomic-openshift |
| **openshift.common.config_base** | /etc/origin | /etc/origin |
-| **openshift.common.data_dir** | /var/lib/origin | /var/lib/origin |
+| **openshift_data_dir** | /var/lib/origin | /var/lib/origin |
| **openshift.master.registry_url openshift.node.registry_url** | openshift/origin-${component}:${version} | openshift3/ose-${component}:${version} |
| **Image Streams** | centos | rhel |
diff --git a/README.md b/README.md
index d696a33e7..03dac2282 100644
--- a/README.md
+++ b/README.md
@@ -38,7 +38,7 @@ Follow this release pattern and you can't go wrong:
| Origin/OCP | OpenShift-Ansible version | openshift-ansible branch |
| ------------- | ----------------- |----------------------------------|
| 1.3 / 3.3 | 3.3 | release-1.3 |
-| 1.4 / 3.4 | 3.4 | releaes-1.4 |
+| 1.4 / 3.4 | 3.4 | release-1.4 |
| 1.5 / 3.5 | 3.5 | release-1.5 |
| 3.*X* | 3.*X* | release-3.x |
diff --git a/docs/proposals/README.md b/docs/proposals/README.md
new file mode 100644
index 000000000..89bbe5163
--- /dev/null
+++ b/docs/proposals/README.md
@@ -0,0 +1,27 @@
+# OpenShift-Ansible Proposal Process
+
+## Proposal Decision Tree
+TODO: Add details about when a proposal is or is not required.
+
+## Proposal Process
+The following process should be followed when a proposal is needed:
+
+1. Create a pull request with the initial proposal
+ * Use the [proposal template][template]
+ * Name the proposal using two or three topic words with underscores as a separator (i.e. proposal_template.md)
+ * Place the proposal in the docs/proposals directory
+2. Notify the development team of the proposal and request feedback
+3. Review the proposal on the OpenShift-Ansible Architecture Meeting
+4. Update the proposal as needed and ask for feedback
+5. Approved/Closed Phase
+ * If 75% or more of the active development team give the proposal a :+1: it is Approved
+ * If 50% or more of the active development team disagrees with the proposal it is Closed
+ * If the person proposing the proposal no longer wishes to continue they can request it to be Closed
+ * If there is no activity on a proposal, the active development team may Close the proposal at their discretion
+ * If none of the above is met the cycle can continue to Step 4.
+6. For approved proposals, the current development lead(s) will:
+ * Update the Pull Request with the result and merge the proposal
+ * Create a card on the Cluster Lifecycle [Trello board][trello] so it may be scheduled for implementation.
+
+[template]: proposal_template.md
+[trello]: https://trello.com/b/wJYDst6C
diff --git a/docs/proposals/proposal_template.md b/docs/proposals/proposal_template.md
new file mode 100644
index 000000000..ece288037
--- /dev/null
+++ b/docs/proposals/proposal_template.md
@@ -0,0 +1,30 @@
+# Proposal Title
+
+## Description
+<Short introduction>
+
+## Rationale
+<Summary of main points of Design>
+
+## Design
+<Main content goes here>
+
+## Checklist
+* Item 1
+* Item 2
+* Item 3
+
+## User Story
+As a developer on OpenShift-Ansible,
+I want ...
+so that ...
+
+## Acceptance Criteria
+* Verify that ...
+* Verify that ...
+* Verify that ...
+
+## References
+* Link
+* Link
+* Link
diff --git a/filter_plugins/oo_filters.py b/filter_plugins/oo_filters.py
index 36a90a870..277695f78 100644
--- a/filter_plugins/oo_filters.py
+++ b/filter_plugins/oo_filters.py
@@ -1024,6 +1024,18 @@ def oo_contains_rule(source, apiGroups, resources, verbs):
return False
+def oo_selector_to_string_list(user_dict):
+ """Convert a dict of selectors to a key=value list of strings
+
+Given input of {'region': 'infra', 'zone': 'primary'} returns a list
+of items as ['region=infra', 'zone=primary']
+ """
+ selectors = []
+ for key in user_dict:
+ selectors.append("{}={}".format(key, user_dict[key]))
+ return selectors
+
+
class FilterModule(object):
""" Custom ansible filter mapping """
@@ -1065,5 +1077,6 @@ class FilterModule(object):
"oo_openshift_loadbalancer_backends": oo_openshift_loadbalancer_backends,
"to_padded_yaml": to_padded_yaml,
"oo_random_word": oo_random_word,
- "oo_contains_rule": oo_contains_rule
+ "oo_contains_rule": oo_contains_rule,
+ "oo_selector_to_string_list": oo_selector_to_string_list
}
diff --git a/filter_plugins/openshift_node.py b/filter_plugins/openshift_node.py
index cad95ea6d..50c360e97 100644
--- a/filter_plugins/openshift_node.py
+++ b/filter_plugins/openshift_node.py
@@ -25,18 +25,7 @@ class FilterModule(object):
# We always use what they've specified if they've specified a value
if openshift_dns_ip is not None:
return openshift_dns_ip
-
- if bool(hostvars['openshift']['common']['use_dnsmasq']):
- return hostvars['ansible_default_ipv4']['address']
- elif bool(hostvars['openshift']['common']['version_gte_3_1_or_1_1']):
- if 'openshift_master_cluster_vip' in hostvars:
- return hostvars['openshift_master_cluster_vip']
- else:
- if 'openshift_master_cluster_vip' in hostvars:
- return hostvars['openshift_master_cluster_vip']
- elif 'openshift_node_first_master_ip' in hostvars:
- return hostvars['openshift_node_first_master_ip']
- return None
+ return hostvars['ansible_default_ipv4']['address']
def filters(self):
''' returns a mapping of filters to methods '''
diff --git a/images/installer/README_INVENTORY_GENERATOR.md b/images/installer/README_INVENTORY_GENERATOR.md
new file mode 100644
index 000000000..9c10e4b71
--- /dev/null
+++ b/images/installer/README_INVENTORY_GENERATOR.md
@@ -0,0 +1,85 @@
+Dynamic Inventory Generation
+============================
+
+Script within the openshift-ansible image that can dynamically
+generate an Ansible inventory file from an existing cluster.
+
+## Configure
+
+User configuration helps to provide additional details when creating an inventory file.
+The default location of this file is in `/etc/inventory-generator-config.yaml`. The
+following configuration values are either expected or default to the given values when omitted:
+
+- `master_config_path`:
+ - specifies where to look for the bind-mounted `master-config.yaml` file in the container
+ - if omitted or a `null` value is given, its value is defaulted to `/opt/app-root/src/master-config.yaml`
+
+- `admin_kubeconfig_path`:
+ - specifies where to look for the bind-mounted `admin.kubeconfig` file in the container
+ - if omitted or a `null` value is given, its value is defaulted to `/opt/app-root/src/.kube/config`
+
+- `ansible_ssh_user`:
+ - specifies the ssh user to be used by Ansible when running the specified `PLAYBOOK_FILE` (see `README_CONTAINER_IMAGE.md` for additional information on this environment variable).
+ - if omitted, its value is defaulted to `root`
+
+- `ansible_become_user`:
+ - specifies a user to "become" on the remote host. Used for privilege escalation.
+ - If a non-null value is specified, `ansible_become` is implicitly set to `yes` in the resulting inventory file.
+
+See the supplied sample user configuration file in [`root/etc/inventory-generator-config.yaml`](./root/etc/inventory-generator-config.yaml) for additional optional inventory variables that may be specified.
+
+## Build
+
+See `README_CONTAINER_IMAGE.md` for information on building this image.
+
+## Run
+
+Given a master node's `master-config.yaml` file, a user configuration file (see "Configure" section), and an `admin.kubeconfig` file, the command below will:
+
+1. Use `oc` to query the host about additional node information (using the supplied `kubeconfig` file)
+2. Generate an inventory file based on information retrieved from `oc get nodes` and the given `master-config.yaml` file.
+3. run the specified [openshift-ansible](https://github.com/openshift/openshift-ansible) `health.yml` playbook using the generated inventory file from the previous step
+
+```
+docker run -u `id -u` \
+ -v $HOME/.ssh/id_rsa:/opt/app-root/src/.ssh/id_rsa:Z,ro \
+ -v /tmp/origin/master/admin.kubeconfig:/opt/app-root/src/.kube/config:Z \
+ -v /tmp/origin/master/master-config.yaml:/opt/app-root/src/master-config.yaml:Z \
+ -e OPTS="-v --become-user root" \
+ -e PLAYBOOK_FILE=playbooks/byo/openshift-checks/health.yml \
+ -e GENERATE_INVENTORY=true \
+ -e USER=`whoami` \
+ openshift/origin-ansible
+
+```
+
+**Note** In the command above, specifying the `GENERATE_INVENTORY` environment variable will automatically generate the inventory file in an expected location.
+An `INVENTORY_FILE` variable (or any other inventory location) does not need to be supplied when generating an inventory.
+
+## Debug
+
+To debug the `generate` script, run the above script interactively
+and manually execute `/usr/local/bin/generate`:
+
+```
+...
+docker run -u `id -u` \
+ -v ...
+ ...
+ -it openshift/origin-ansible /bin/bash
+
+---
+
+bash-4.2$ cd $HOME
+bash-4.2$ ls
+master-config.yaml
+bash-4.2$ /usr/local/bin/generate $HOME/generated_hosts
+bash-4.2$ ls
+generated_hosts master-config.yaml
+bash-4.2$ less generated_hosts
+...
+```
+
+## Notes
+
+See `README_CONTAINER_IMAGE.md` for additional information about this image.
diff --git a/images/installer/root/etc/inventory-generator-config.yaml b/images/installer/root/etc/inventory-generator-config.yaml
new file mode 100644
index 000000000..d56e3f4d2
--- /dev/null
+++ b/images/installer/root/etc/inventory-generator-config.yaml
@@ -0,0 +1,20 @@
+---
+# meta config
+master_config_path: "/opt/app-root/src/master-config.yaml"
+admin_kubeconfig_path: "/opt/app-root/src/.kube/config"
+
+# default user configuration
+ansible_ssh_user: ec2-user
+ansible_become: "yes"
+ansible_become_user: "root"
+
+# openshift-ansible inventory vars
+openshift_uninstall_images: false
+openshift_install_examples: true
+openshift_deployment_type: origin
+
+openshift_release: 3.6
+openshift_image_tag: v3.6.0
+openshift_hosted_logging_deploy: null # defaults to "true" if loggingPublicURL is set in master-config.yaml
+openshift_logging_image_version: v3.6.0
+openshift_disable_check: ""
diff --git a/images/installer/root/exports/config.json.template b/images/installer/root/exports/config.json.template
index 739c0080f..1a009fa7b 100644
--- a/images/installer/root/exports/config.json.template
+++ b/images/installer/root/exports/config.json.template
@@ -24,7 +24,7 @@
"PLAYBOOK_FILE=$PLAYBOOK_FILE",
"ANSIBLE_CONFIG=$ANSIBLE_CONFIG"
],
- "cwd": "/opt/app-root/src/",
+ "cwd": "/usr/share/ansible/openshift-ansible",
"rlimits": [
{
"type": "RLIMIT_NOFILE",
diff --git a/images/installer/root/usr/local/bin/generate b/images/installer/root/usr/local/bin/generate
new file mode 100755
index 000000000..3db7a3ee8
--- /dev/null
+++ b/images/installer/root/usr/local/bin/generate
@@ -0,0 +1,397 @@
+#!/bin/env python
+
+"""
+Attempts to read 'master-config.yaml' and extract remote
+host information to dynamically create an inventory file
+in order to run Ansible playbooks against that host.
+"""
+
+import os
+import re
+import shlex
+import shutil
+import subprocess
+import sys
+import yaml
+
+try:
+ HOME = os.environ['HOME']
+except KeyError:
+ print 'A required environment variable "$HOME" has not been set'
+ exit(1)
+
+DEFAULT_USER_CONFIG_PATH = '/etc/inventory-generator-config.yaml'
+DEFAULT_MASTER_CONFIG_PATH = HOME + '/master-config.yaml'
+DEFAULT_ADMIN_KUBECONFIG_PATH = HOME + '/.kube/config'
+
+INVENTORY_FULL_PATH = HOME + '/generated_hosts'
+USE_STDOUT = True
+
+if len(sys.argv) > 1:
+ INVENTORY_FULL_PATH = sys.argv[1]
+ USE_STDOUT = False
+
+
+class OpenShiftClientError(Exception):
+ """Base exception class for OpenShift CLI wrapper"""
+ pass
+
+
+class InvalidHost(Exception):
+ """Base exception class for host creation problems."""
+ pass
+
+
+class InvalidHostGroup(Exception):
+ """Base exception class for host-group creation problems."""
+ pass
+
+
+class OpenShiftClient:
+ oc = None
+ kubeconfig = None
+
+ def __init__(self, kubeconfig=DEFAULT_ADMIN_KUBECONFIG_PATH):
+ """Find and store path to oc binary"""
+ # https://github.com/openshift/openshift-ansible/issues/3410
+ # oc can be in /usr/local/bin in some cases, but that may not
+ # be in $PATH due to ansible/sudo
+ paths = os.environ.get("PATH", os.defpath).split(os.pathsep) + ['/usr/local/bin', os.path.expanduser('~/bin')]
+
+ oc_binary_name = 'oc'
+ oc_binary = None
+
+ # Use shutil.which if it is available, otherwise fallback to a naive path search
+ try:
+ which_result = shutil.which(oc_binary_name, path=os.pathsep.join(paths))
+ if which_result is not None:
+ oc_binary = which_result
+ except AttributeError:
+ for path in paths:
+ if os.path.exists(os.path.join(path, oc_binary_name)):
+ oc_binary = os.path.join(path, oc_binary_name)
+ break
+
+ if oc_binary is None:
+ raise OpenShiftClientError('Unable to locate `oc` binary. Not present in PATH.')
+
+ self.oc = oc_binary
+ self.kubeconfig = kubeconfig
+
+ def call(self, cmd_str):
+ """Execute a remote call using `oc`"""
+ cmd = [
+ self.oc,
+ '--config',
+ self.kubeconfig
+ ] + shlex.split(cmd_str)
+ try:
+ out = subprocess.check_output(list(cmd), stderr=subprocess.STDOUT)
+ except subprocess.CalledProcessError as err:
+ raise OpenShiftClientError('[rc {}] {}\n{}'.format(err.returncode, ' '.join(err.cmd), err.output))
+ return out
+
+ def whoami(self):
+ """Retrieve information about the current user in the given kubeconfig"""
+ return self.call('whoami')
+
+ def get_nodes(self):
+ """Retrieve remote node information as a yaml object"""
+ return self.call('get nodes -o yaml')
+
+
+class HostGroup:
+ groupname = ""
+ hosts = list()
+
+ def __init__(self, hosts):
+ if not hosts:
+ return
+ first = hosts[0].get_group_name()
+ for h in hosts:
+ if h.get_group_name() != first:
+ raise InvalidHostGroup("Attempt to create HostGroup with hosts of varying groups.")
+
+ self.hosts = hosts
+ self.groupname = first
+
+ def add_host(self, host):
+ """Add a new host to this group."""
+ self.hosts.append(host)
+
+ def get_group_name(self):
+ """Return the groupname associated with each aggregated host."""
+ return self.groupname
+
+ def get_hosts(self):
+ """Return aggregated hosts"""
+ return self.hosts
+
+ def string(self):
+ """Call the print method for each aggregated host; separated by newlines."""
+ infos = ""
+ for host in self.hosts:
+ infos += host.string() + "\n"
+ return infos
+
+
+class Host:
+ group = "masters"
+ alias = ""
+ hostname = ""
+ public_hostname = ""
+ ip_addr = ""
+ public_ip_addr = ""
+
+ def __init__(self, groupname):
+ if not groupname:
+ raise InvalidHost("Attempt to create Host with no group name provided.")
+ self.group = groupname
+
+ def get_group_name(self):
+ return self.group
+
+ def get_openshift_hostname(self):
+ return self.hostname
+
+ def host_alias(self, hostalias):
+ """Set an alias for this host."""
+ self.alias = hostalias
+
+ def address(self, ip):
+ """Set the ip address for this host."""
+ self.ip_addr = ip
+
+ def public_address(self, ip):
+ """Set the external ip address for this host."""
+ self.public_ip_addr = ip
+
+ def host_name(self, hname):
+ self.hostname = parse_hostname(hname)
+
+ def public_host_name(self, phname):
+ self.public_hostname = parse_hostname(phname)
+
+ def string(self):
+ """Print an inventory-file compatible string with host information"""
+ info = ""
+ if self.alias:
+ info += self.alias + " "
+ elif self.hostname:
+ info += self.hostname + " "
+ elif self.ip_addr:
+ info += self.ip_addr + " "
+ if self.ip_addr:
+ info += "openshift_ip=" + self.ip_addr + " "
+ if self.public_ip_addr:
+ info += "openshift_public_ip=" + self.public_ip_addr + " "
+ if self.hostname:
+ info += "openshift_hostname=" + self.hostname + " "
+ if self.public_hostname:
+ info += "openshift_public_hostname=" + self.public_hostname
+
+ return info
+
+
+def parse_hostname(host):
+ """Remove protocol and port from given hostname.
+ Return parsed string"""
+ no_proto = re.split('^http(s)?\:\/\/', host)
+ if no_proto:
+ host = no_proto[-1]
+
+ no_port = re.split('\:[0-9]+(/)?$', host)
+ if no_port:
+ host = no_port[0]
+
+ return host
+
+
+def main():
+ """Parse master-config file and populate inventory file."""
+ # set default values
+ USER_CONFIG = os.environ.get('CONFIG')
+ if not USER_CONFIG:
+ USER_CONFIG = DEFAULT_USER_CONFIG_PATH
+
+ # read user configuration
+ try:
+ config_file_obj = open(USER_CONFIG, 'r')
+ raw_config_file = config_file_obj.read()
+ user_config = yaml.load(raw_config_file)
+ if not user_config:
+ user_config = dict()
+ except IOError as err:
+ print "Unable to find or read user configuration file '{}': {}".format(USER_CONFIG, err)
+ exit(1)
+
+ master_config_path = user_config.get('master_config_path', DEFAULT_MASTER_CONFIG_PATH)
+ if not master_config_path:
+ master_config_path = DEFAULT_MASTER_CONFIG_PATH
+
+ admin_kubeconfig_path = user_config.get('admin_kubeconfig_path', DEFAULT_ADMIN_KUBECONFIG_PATH)
+ if not admin_kubeconfig_path:
+ admin_kubeconfig_path = DEFAULT_ADMIN_KUBECONFIG_PATH
+
+ try:
+ file_obj = open(master_config_path, 'r')
+ except IOError as err:
+ print "Unable to find or read host master configuration file '{}': {}".format(master_config_path, err)
+ exit(1)
+
+ raw_text = file_obj.read()
+
+ y = yaml.load(raw_text)
+ if y.get("kind", "") != "MasterConfig":
+ print "Bind-mounted host master configuration file is not of 'kind' MasterConfig. Aborting..."
+ exit(1)
+
+ # finish reading config file and begin gathering
+ # cluster information for inventory file
+ file_obj.close()
+
+ # set inventory values based on user configuration
+ ansible_ssh_user = user_config.get('ansible_ssh_user', 'root')
+ ansible_become_user = user_config.get('ansible_become_user')
+
+ openshift_uninstall_images = user_config.get('openshift_uninstall_images', False)
+ openshift_install_examples = user_config.get('openshift_install_examples', True)
+ openshift_deployment_type = user_config.get('openshift_deployment_type', 'origin')
+
+ openshift_release = user_config.get('openshift_release')
+ openshift_image_tag = user_config.get('openshift_image_tag')
+ openshift_logging_image_version = user_config.get('openshift_logging_image_version')
+ openshift_disable_check = user_config.get('openshift_disable_check')
+
+ # extract host config info from parsed yaml file
+ asset_config = y.get("assetConfig")
+ master_config = y.get("kubernetesMasterConfig")
+ etcd_config = y.get("etcdClientInfo")
+
+ # if master_config is missing, error out; we expect to be running on a master to be able to
+ # gather enough information to generate the rest of the inventory file.
+ if not master_config:
+ msg = "'kubernetesMasterConfig' missing from '{}'; unable to gather all necessary host information..."
+ print msg.format(master_config_path)
+ exit(1)
+
+ master_public_url = y.get("masterPublicURL")
+ if not master_public_url:
+ msg = "'kubernetesMasterConfig.masterPublicURL' missing from '{}'; Unable to connect to master host..."
+ print msg.format(master_config_path)
+ exit(1)
+
+ oc = OpenShiftClient(admin_kubeconfig_path)
+
+ # ensure kubeconfig is logged in with provided user, or fail with a friendly message otherwise
+ try:
+ oc.whoami()
+ except OpenShiftClientError as err:
+ msg = ("Unable to obtain user information using the provided kubeconfig file. "
+ "Current context does not appear to be able to authenticate to the server. "
+ "Error returned from server:\n\n{}")
+ print msg.format(str(err))
+ exit(1)
+
+ # connect to remote host using the provided config and extract all possible node information
+ nodes_config = yaml.load(oc.get_nodes())
+
+ # contains host types (e.g. masters, nodes, etcd)
+ host_groups = dict()
+ openshift_hosted_logging_deploy = False
+ is_etcd_deployed = master_config.get("storage-backend", "") in ["etcd3", "etcd2", "etcd"]
+
+ if asset_config and asset_config.get('loggingPublicURL'):
+ openshift_hosted_logging_deploy = True
+
+ openshift_hosted_logging_deploy = user_config.get("openshift_hosted_logging_deploy", openshift_hosted_logging_deploy)
+
+ m = Host("masters")
+ m.address(master_config["masterIP"])
+ m.public_host_name(master_public_url)
+ host_groups["masters"] = HostGroup([m])
+
+ if nodes_config:
+ node_hosts = list()
+ for node in nodes_config.get("items", []):
+ if node["kind"] != "Node":
+ continue
+
+ n = Host("nodes")
+
+ address = ""
+ internal_hostname = ""
+ for item in node["status"].get("addresses", []):
+ if not address and item['type'] in ['InternalIP', 'LegacyHostIP']:
+ address = item['address']
+
+ if item['type'] == 'Hostname':
+ internal_hostname = item['address']
+
+ n.address(address)
+ n.host_name(internal_hostname)
+ node_hosts.append(n)
+
+ host_groups["nodes"] = HostGroup(node_hosts)
+
+ if etcd_config:
+ etcd_hosts = list()
+ for url in etcd_config.get("urls", []):
+ e = Host("etcd")
+ e.host_name(url)
+ etcd_hosts.append(e)
+
+ host_groups["etcd"] = HostGroup(etcd_hosts)
+
+ # open new inventory file for writing
+ if USE_STDOUT:
+ inv_file_obj = sys.stdout
+ else:
+ try:
+ inv_file_obj = open(INVENTORY_FULL_PATH, 'w+')
+ except IOError as err:
+ print "Unable to create or open generated inventory file: {}".format(err)
+ exit(1)
+
+ inv_file_obj.write("[OSEv3:children]\n")
+ for group in host_groups:
+ inv_file_obj.write("{}\n".format(group))
+ inv_file_obj.write("\n")
+
+ inv_file_obj.write("[OSEv3:vars]\n")
+ if ansible_ssh_user:
+ inv_file_obj.write("ansible_ssh_user={}\n".format(ansible_ssh_user))
+ if ansible_become_user:
+ inv_file_obj.write("ansible_become_user={}\n".format(ansible_become_user))
+ inv_file_obj.write("ansible_become=yes\n")
+
+ if openshift_uninstall_images:
+ inv_file_obj.write("openshift_uninstall_images={}\n".format(str(openshift_uninstall_images)))
+ if openshift_deployment_type:
+ inv_file_obj.write("openshift_deployment_type={}\n".format(openshift_deployment_type))
+ if openshift_install_examples:
+ inv_file_obj.write("openshift_install_examples={}\n".format(str(openshift_install_examples)))
+
+ if openshift_release:
+ inv_file_obj.write("openshift_release={}\n".format(str(openshift_release)))
+ if openshift_image_tag:
+ inv_file_obj.write("openshift_image_tag={}\n".format(str(openshift_image_tag)))
+ if openshift_logging_image_version:
+ inv_file_obj.write("openshift_logging_image_version={}\n".format(str(openshift_logging_image_version)))
+ if openshift_disable_check:
+ inv_file_obj.write("openshift_disable_check={}\n".format(str(openshift_disable_check)))
+ inv_file_obj.write("\n")
+
+ inv_file_obj.write("openshift_hosted_logging_deploy={}\n".format(str(openshift_hosted_logging_deploy)))
+ inv_file_obj.write("\n")
+
+ for group in host_groups:
+ inv_file_obj.write("[{}]\n".format(host_groups[group].get_group_name()))
+ inv_file_obj.write(host_groups[group].string())
+ inv_file_obj.write("\n")
+
+ inv_file_obj.close()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/images/installer/root/usr/local/bin/run b/images/installer/root/usr/local/bin/run
index 9401ea118..70aa0bac3 100755
--- a/images/installer/root/usr/local/bin/run
+++ b/images/installer/root/usr/local/bin/run
@@ -24,9 +24,12 @@ elif [[ -v INVENTORY_URL ]]; then
elif [[ -v DYNAMIC_SCRIPT_URL ]]; then
curl -o ${INVENTORY} ${DYNAMIC_SCRIPT_URL}
chmod 755 ${INVENTORY}
+elif [[ -v GENERATE_INVENTORY ]]; then
+ # dynamically generate inventory file using bind-mounted info
+ /usr/local/bin/generate ${INVENTORY}
else
echo
- echo "One of INVENTORY_FILE, INVENTORY_URL or DYNAMIC_SCRIPT_URL must be provided."
+ echo "One of INVENTORY_FILE, INVENTORY_URL, GENERATE_INVENTORY, or DYNAMIC_SCRIPT_URL must be provided."
exec /usr/local/bin/usage
fi
INVENTORY_ARG="-i ${INVENTORY}"
@@ -36,7 +39,7 @@ if [[ "$ALLOW_ANSIBLE_CONNECTION_LOCAL" = false ]]; then
fi
if [[ -v VAULT_PASS ]]; then
- VAULT_PASS_FILE=.vaultpass
+ VAULT_PASS_FILE="$(mktemp)"
echo ${VAULT_PASS} > ${VAULT_PASS_FILE}
VAULT_PASS_ARG="--vault-password-file ${VAULT_PASS_FILE}"
fi
diff --git a/inventory/byo/hosts.origin.example b/inventory/byo/hosts.origin.example
index ad69bd587..486fe56a0 100644
--- a/inventory/byo/hosts.origin.example
+++ b/inventory/byo/hosts.origin.example
@@ -34,17 +34,17 @@ openshift_deployment_type=origin
# use this to lookup the latest exact version of the container images, which is the tag actually used to configure
# the cluster. For RPM installations we just verify the version detected in your configured repos matches this
# release.
-openshift_release=v3.6
+openshift_release=v3.7
# Specify an exact container image tag to install or configure.
# WARNING: This value will be used for all hosts in containerized environments, even those that have another version installed.
# This could potentially trigger an upgrade and downtime, so be careful with modifying this value after the cluster is set up.
-#openshift_image_tag=v3.6.0
+#openshift_image_tag=v3.7.0
# Specify an exact rpm version to install or configure.
# WARNING: This value will be used for all hosts in RPM based environments, even those that have another version installed.
# This could potentially trigger an upgrade and downtime, so be careful with modifying this value after the cluster is set up.
-#openshift_pkg_version=-3.6.0
+#openshift_pkg_version=-3.7.0
# This enables all the system containers except for docker:
#openshift_use_system_containers=False
@@ -118,7 +118,7 @@ openshift_release=v3.6
# Force the registry to use for the docker/crio system container. By default the registry
# will be built off of the deployment type and ansible_distribution. Only
# use this option if you are sure you know what you are doing!
-#openshift_docker_systemcontainer_image_registry_override="registry.example.com"
+#openshift_docker_systemcontainer_image_override="registry.example.com/container-engine:latest"
#openshift_crio_systemcontainer_image_registry_override="registry.example.com"
# Items added, as is, to end of /etc/sysconfig/docker OPTIONS
# Default value: "--log-driver=journald"
@@ -538,7 +538,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
#openshift_hosted_metrics_public_url=https://hawkular-metrics.example.com/hawkular/metrics
# Configure the prefix and version for the component images
#openshift_hosted_metrics_deployer_prefix=docker.io/openshift/origin-
-#openshift_hosted_metrics_deployer_version=v3.6.0
+#openshift_hosted_metrics_deployer_version=v3.7.0
#
# StorageClass
# openshift_storageclass_name=gp2
@@ -593,7 +593,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
#openshift_hosted_logging_elasticsearch_cluster_size=1
# Configure the prefix and version for the component images
#openshift_hosted_logging_deployer_prefix=docker.io/openshift/origin-
-#openshift_hosted_logging_deployer_version=v3.6.0
+#openshift_hosted_logging_deployer_version=v3.7.0
# Configure the multi-tenant SDN plugin (default is 'redhat/openshift-ovs-subnet')
# os_sdn_network_plugin_name='redhat/openshift-ovs-multitenant'
@@ -613,6 +613,12 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# WORKAROUND : If you must use an overlapping subnet, you can configure a non conflicting
# docker0 CIDR range by adding '--bip=192.168.2.1/24' to DOCKER_NETWORK_OPTIONS
# environment variable located in /etc/sysconfig/docker-network.
+# When upgrading or scaling up the following must match whats in your master config!
+# Inventory: master yaml field
+# osm_cluster_network_cidr: clusterNetworkCIDR
+# openshift_portal_net: serviceNetworkCIDR
+# When installing osm_cluster_network_cidr and openshift_portal_net must be set.
+# Sane examples are provided below.
#osm_cluster_network_cidr=10.128.0.0/14
#openshift_portal_net=172.30.0.0/16
@@ -634,6 +640,10 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# Configure number of bits to allocate to each host’s subnet e.g. 9
# would mean a /23 network on the host.
+# When upgrading or scaling up the following must match whats in your master config!
+# Inventory: master yaml field
+# osm_host_subnet_length: hostSubnetLength
+# When installing osm_host_subnet_length must be set. A sane example is provided below.
#osm_host_subnet_length=9
# Configure master API and console ports.
@@ -719,11 +729,10 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# Setting this variable to true will override that check.
#openshift_override_hostname_check=true
-# Configure dnsmasq for cluster dns, switch the host's local resolver to use dnsmasq
-# and configure node's dnsIP to point at the node's local dnsmasq instance. Defaults
-# to True for Origin 1.2 and OSE 3.2. False for 1.1 / 3.1 installs, this cannot
-# be used with 1.0 and 3.0.
+# openshift_use_dnsmasq is deprecated. This must be true, or installs will fail
+# in versions >= 3.6
#openshift_use_dnsmasq=False
+
# Define an additional dnsmasq.conf file to deploy to /etc/dnsmasq.d/openshift-ansible.conf
# This is useful for POC environments where DNS may not actually be available yet or to set
# options like 'strict-order' to alter dnsmasq configuration.
diff --git a/inventory/byo/hosts.ose.example b/inventory/byo/hosts.ose.example
index b52806bc7..92a0927e5 100644
--- a/inventory/byo/hosts.ose.example
+++ b/inventory/byo/hosts.ose.example
@@ -34,17 +34,17 @@ openshift_deployment_type=openshift-enterprise
# use this to lookup the latest exact version of the container images, which is the tag actually used to configure
# the cluster. For RPM installations we just verify the version detected in your configured repos matches this
# release.
-openshift_release=v3.6
+openshift_release=v3.7
# Specify an exact container image tag to install or configure.
# WARNING: This value will be used for all hosts in containerized environments, even those that have another version installed.
# This could potentially trigger an upgrade and downtime, so be careful with modifying this value after the cluster is set up.
-#openshift_image_tag=v3.6.0
+#openshift_image_tag=v3.7.0
# Specify an exact rpm version to install or configure.
# WARNING: This value will be used for all hosts in RPM based environments, even those that have another version installed.
# This could potentially trigger an upgrade and downtime, so be careful with modifying this value after the cluster is set up.
-#openshift_pkg_version=-3.6.0
+#openshift_pkg_version=-3.7.0
# This enables all the system containers except for docker:
#openshift_use_system_containers=False
@@ -118,7 +118,7 @@ openshift_release=v3.6
# Force the registry to use for the container-engine/crio system container. By default the registry
# will be built off of the deployment type and ansible_distribution. Only
# use this option if you are sure you know what you are doing!
-#openshift_docker_systemcontainer_image_registry_override="registry.example.com"
+#openshift_docker_systemcontainer_image_override="registry.example.com/container-engine:latest"
#openshift_crio_systemcontainer_image_registry_override="registry.example.com"
# Items added, as is, to end of /etc/sysconfig/docker OPTIONS
# Default value: "--log-driver=journald"
@@ -546,7 +546,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
#openshift_hosted_metrics_public_url=https://hawkular-metrics.example.com/hawkular/metrics
# Configure the prefix and version for the component images
#openshift_hosted_metrics_deployer_prefix=registry.example.com:8888/openshift3/
-#openshift_hosted_metrics_deployer_version=3.6.0
+#openshift_hosted_metrics_deployer_version=3.7.0
#
# StorageClass
# openshift_storageclass_name=gp2
@@ -601,7 +601,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
#openshift_hosted_logging_elasticsearch_cluster_size=1
# Configure the prefix and version for the component images
#openshift_hosted_logging_deployer_prefix=registry.example.com:8888/openshift3/
-#openshift_hosted_logging_deployer_version=3.6.0
+#openshift_hosted_logging_deployer_version=3.7.0
# Configure the multi-tenant SDN plugin (default is 'redhat/openshift-ovs-subnet')
# os_sdn_network_plugin_name='redhat/openshift-ovs-multitenant'
@@ -621,6 +621,12 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# WORKAROUND : If you must use an overlapping subnet, you can configure a non conflicting
# docker0 CIDR range by adding '--bip=192.168.2.1/24' to DOCKER_NETWORK_OPTIONS
# environment variable located in /etc/sysconfig/docker-network.
+# When upgrading or scaling up the following must match whats in your master config!
+# Inventory: master yaml field
+# osm_cluster_network_cidr: clusterNetworkCIDR
+# openshift_portal_net: serviceNetworkCIDR
+# When installing osm_cluster_network_cidr and openshift_portal_net must be set.
+# Sane examples are provided below.
#osm_cluster_network_cidr=10.128.0.0/14
#openshift_portal_net=172.30.0.0/16
@@ -642,6 +648,10 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# Configure number of bits to allocate to each host’s subnet e.g. 9
# would mean a /23 network on the host.
+# When upgrading or scaling up the following must match whats in your master config!
+# Inventory: master yaml field
+# osm_host_subnet_length: hostSubnetLength
+# When installing osm_host_subnet_length must be set. A sane example is provided below.
#osm_host_subnet_length=9
# Configure master API and console ports.
@@ -727,10 +737,8 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# Setting this variable to true will override that check.
#openshift_override_hostname_check=true
-# Configure dnsmasq for cluster dns, switch the host's local resolver to use dnsmasq
-# and configure node's dnsIP to point at the node's local dnsmasq instance. Defaults
-# to True for Origin 1.2 and OSE 3.2. False for 1.1 / 3.1 installs, this cannot
-# be used with 1.0 and 3.0.
+# openshift_use_dnsmasq is deprecated. This must be true, or installs will fail
+# in versions >= 3.6
#openshift_use_dnsmasq=False
# Define an additional dnsmasq.conf file to deploy to /etc/dnsmasq.d/openshift-ansible.conf
# This is useful for POC environments where DNS may not actually be available yet or to set
diff --git a/openshift-ansible.spec b/openshift-ansible.spec
index 095f43dd8..3be13145e 100644
--- a/openshift-ansible.spec
+++ b/openshift-ansible.spec
@@ -10,7 +10,7 @@
Name: openshift-ansible
Version: 3.7.0
-Release: 0.125.0%{?dist}
+Release: 0.126.0%{?dist}
Summary: Openshift and Atomic Enterprise Ansible
License: ASL 2.0
URL: https://github.com/openshift/openshift-ansible
@@ -280,6 +280,47 @@ Atomic OpenShift Utilities includes
%changelog
+* Mon Sep 11 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.126.0
+- Fix rpm version logic for hosts (mgugino@redhat.com)
+- Revert back to hostnamectl and previous default of not setting hostname
+ (sdodson@redhat.com)
+- Correct include path to not follow symlink (rteague@redhat.com)
+- Fix include path for docker upgrade tasks (rteague@redhat.com)
+- Fix issue with etcd_common when using pre_upgrade tag (rteague@redhat.com)
+- inventory: Denote new required upgrade variables (smilner@redhat.com)
+- upgrade: Verify required network items are set (smilner@redhat.com)
+- ami build process calls openshift-node/config.yml (kwoodson@redhat.com)
+
+* Fri Sep 08 2017 Scott Dodson <sdodson@redhat.com> 3.7.0-0.125.1
+- Consolidating AWS roles and variables underneath openshift_aws role.
+ (kwoodson@redhat.com)
+- Fix README.md typo (mgugino@redhat.com)
+- Fixing variables and allowing custom ami. (kwoodson@redhat.com)
+- Remove openshift-common (mgugino@redhat.com)
+- Fix openshift_master_config_dir (sdodson@redhat.com)
+- remove experimental-cri flag from node config (sjenning@redhat.com)
+- cri-o: Split RHEL and CentOS images (smilner@redhat.com)
+- openshift_checks aos_version: also check installed under yum
+ (lmeyer@redhat.com)
+- Create ansible role for deploying prometheus on openshift (zgalor@redhat.com)
+- Fix: set openshift_master_config_dir to the correct value.
+ (mgugino@redhat.com)
+- Bump ansible requirement to 2.3 (sdodson@redhat.com)
+- Move master additional config out of base (rteague@redhat.com)
+- Import dnf only if importing yum fails (jhadvig@redhat.com)
+- output skopeo image check command (nakayamakenjiro@gmail.com)
+- skip openshift_cfme_nfs_server if not using nfs (sdw35@cornell.edu)
+- bug 1487573. Bump the allowed ES versions (jcantril@redhat.com)
+- update env in etcd.conf.j2 to reflect the latest naming (jchaloup@redhat.com)
+- logging set memory request to limit (jcantril@redhat.com)
+- Use the proper pod subnet instead the services one (edu@redhat.com)
+- elasticsearch: reintroduce readiness probe (jwozniak@redhat.com)
+- cri-o: add support for additional registries (gscrivan@redhat.com)
+- reverse order between router cert generation (mewt.fr@gmail.com)
+- ensured to always use a certificate for the router (mewt.fr@gmail.com)
+- Adding proxy env vars for dc/docker-registry (kwoodson@redhat.com)
+- oc_atomic_container: support Skopeo output (gscrivan@redhat.com)
+
* Tue Sep 05 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.125.0
-
diff --git a/playbooks/adhoc/uninstall.yml b/playbooks/adhoc/uninstall.yml
index 58b3a7835..5072d10fa 100644
--- a/playbooks/adhoc/uninstall.yml
+++ b/playbooks/adhoc/uninstall.yml
@@ -35,11 +35,9 @@
- /etc/dnsmasq.d/origin-upstream-dns.conf
- /etc/dnsmasq.d/openshift-ansible.conf
- /etc/NetworkManager/dispatcher.d/99-origin-dns.sh
- when: openshift_use_dnsmasq | default(true) | bool
- service:
name: NetworkManager
state: restarted
- when: openshift_use_dnsmasq | default(true) | bool
- name: Stop services
service: name={{ item }} state=stopped
with_items:
diff --git a/playbooks/aws/README.md b/playbooks/aws/README.md
index 0fb29ca06..2b3d4329e 100644
--- a/playbooks/aws/README.md
+++ b/playbooks/aws/README.md
@@ -32,91 +32,54 @@ Before any provisioning may occur, AWS account credentials must be present in th
### Let's Provision!
The newly added playbooks are the following:
-- build_ami.yml
-- provision.yml
-- provision_nodes.yml
+- build_ami.yml - Builds a custom AMI. This currently requires the user to supply a valid AMI with access to repositories that contain openshift repositories.
+- provision.yml - Create a vpc, elbs, security groups, launch config, asg's, etc.
+- install.yml - Calls the openshift-ansible installer on the newly created instances
+- provision_nodes.yml - Creates the infra and compute node scale groups
+- accept.yml - This is a playbook to accept infra and compute nodes into the cluster
+- provision_install.yml - This is a combination of all 3 of the above playbooks. (provision, install, and provision_nodes as well as accept.yml)
-The current expected work flow should be to provide the `vars.yml` file with the
-desired settings for cluster instances. These settings are AWS specific and should
-be tailored to the consumer's AWS custom account settings.
+The current expected work flow should be to provide an AMI with access to Openshift repositories. There should be a repository specified in the `openshift_additional_repos` parameter of the inventory file. The next expectation is a minimal set of values in the `provisioning_vars.yml` file to configure the desired settings for cluster instances. These settings are AWS specific and should be tailored to the consumer's AWS custom account settings.
```yaml
-clusterid: mycluster
-region: us-east-1
-
-provision:
- clusterid: "{{ clusterid }}"
- region: "{{ region }}"
-
- build:
- base_image: ami-bdd5d6ab # base image for AMI to build from
- # when creating an encrypted AMI please specify use_encryption
- use_encryption: False
-
- # for s3 registry backend
- openshift_registry_s3: True
-
- # if using custom certificates these are required for the ELB
- iam_cert_ca:
- name: test_openshift
- cert_path: '/path/to/wildcard.<clusterid>.example.com.crt'
- key_path: '/path/to/wildcard.<clusterid>.example.com.key'
- chain_path: '/path/to/cert.ca.crt'
-
- instance_users:
- - key_name: myuser_key
- username: myuser
- pub_key: |
- ssh-rsa aaa<place public ssh key here>aaaaa user@<clusterid>
-
- node_group_config:
- tags:
- clusterid: "{{ clusterid }}"
- environment: stg
- ssh_key_name: myuser_key # name of the ssh key from above
-
- # configure master settings here
- master:
- instance_type: m4.xlarge
- ami: ami-cdeec8b6 # if using an encrypted AMI this will be replaced
- volumes:
- - device_name: /dev/sdb
- volume_size: 100
- device_type: gp2
- delete_on_termination: False
- health_check:
- period: 60
- type: EC2
- # Set the following number to be the same for masters.
- min_size: 3
- max_size: 3
- desired_size: 3
- tags:
- host-type: master
- sub-host-type: default
- wait_for_instances: True
-...
- vpc:
- # name: mycluster # If missing; will default to clusterid
- cidr: 172.31.0.0/16
- subnets:
- us-east-1: # These are us-east-1 region defaults. Ensure this matches your region
- - cidr: 172.31.48.0/20
- az: "us-east-1c"
- - cidr: 172.31.32.0/20
- az: "us-east-1e"
- - cidr: 172.31.16.0/20
- az: "us-east-1a"
-
+---
+# when creating an AMI set this to True
+# when installing a cluster set this to False
+openshift_node_bootstrap: True
+
+# specify a clusterid
+# openshift_aws_clusterid: default
+
+# specify a region
+# openshift_aws_region: us-east-1
+
+# must specify a base_ami when building an AMI
+# openshift_aws_base_ami: # base image for AMI to build from
+# specify when using a custom AMI
+# openshift_aws_ami:
+
+# when creating an encrypted AMI please specify use_encryption
+# openshift_aws_ami_encrypt: False
+
+# custom certificates are required for the ELB
+# openshift_aws_iam_cert_path: '/path/to/cert/wildcard.<clusterid>.<domain>.com.crt'
+# openshift_aws_iam_cert_key_path: '/path/to/key/wildcard.<clusterid>.<domain>.com.key'
+# openshift_aws_iam_cert_chain_path: '/path/to/ca_cert_file/ca.crt'
+
+# This is required for any ec2 instances
+# openshift_aws_ssh_key_name: myuser_key
+
+# This will ensure these users are created
+#openshift_aws_users:
+#- key_name: myuser_key
+# username: myuser
+# pub_key: |
+# ssh-rsa AAAA
```
-Repeat the following setup for the infra and compute node groups. This most likely
- will not need editing but if the install requires further customization then these parameters
- can be updated.
-
-#### Step 1
+If customization is required for the instances, scale groups, or any other configurable option please see the ['openshift_aws/defaults/main.yml'](../../roles/openshift_aws/defaults/main.yml) for variables and overrides. These overrides can be placed in the `provisioning_vars.yml`, `inventory`, or `group_vars`.
-Create an openshift-ansible inventory file to use for a byo installation. The exception here is that there will be no hosts specified by the inventory file. Here is an example:
+In order to create the bootstrap-able AMI we need to create an openshift-ansible inventory file. This file enables us to create the AMI using the openshift-ansible node roles. The exception here is that there will be no hosts specified by the inventory file. Here is an example:
```ini
[OSEv3:children]
@@ -133,6 +96,13 @@ etcd
################################################################################
# Ensure these variables are set for bootstrap
################################################################################
+# openshift_deployment_type is required for installation
+openshift_deployment_type=origin
+
+# required when building an AMI. This will
+# be dependent on the version provided by the yum repository
+openshift_pkg_version=-3.6.0
+
openshift_master_bootstrap_enabled=True
openshift_hosted_router_wait=False
@@ -153,77 +123,94 @@ openshift_additional_repos=[{'name': 'openshift-repo', 'id': 'openshift-repo',
There are more examples of cluster inventory settings [`here`](../../inventory/byo/).
-In order to create the bootstrapable AMI we need to create an openshift-ansible inventory file. This file enables us to create the AMI using the openshift-ansible node roles.
-
-
-#### Step 2
+#### Step 1
-Once the vars.yml file has been updated with the correct settings for the desired AWS account then we are ready to build an AMI.
+Once the `inventory` and the `provisioning_vars.yml` file has been updated with the correct settings for the desired AWS account then we are ready to build an AMI.
```
-$ ansible-playbook -i inventory.yml build_ami.yml
+$ ansible-playbook -i inventory.yml build_ami.yml -e @provisioning_vars.yml
```
1. This script will build a VPC. Default name will be clusterid if not specified.
2. Create an ssh key required for the instance.
-3. Create an instance.
-4. Run some setup roles to ensure packages and services are correctly configured.
-5. Create the AMI.
-6. If encryption is desired
+3. Create a security group.
+4. Create an instance using the key from step 2 or a specified key.
+5. Run openshift-ansible setup roles to ensure packages and services are correctly configured.
+6. Create the AMI.
+7. If encryption is desired
- A KMS key is created with the name of $clusterid
- An encrypted AMI will be produced with $clusterid KMS key
-7. Terminate the instance used to configure the AMI.
+8. Terminate the instance used to configure the AMI.
+More AMI specific options can be found in ['openshift_aws/defaults/main.yml'](../../roles/openshift_aws/defaults/main.yml). When creating an encrypted AMI please specify use_encryption:
+```
+# openshift_aws_ami_encrypt: True # defaults to false
+```
-#### Step 3
+**Note**: This will ensure to take the recently created AMI and encrypt it to be used later. If encryption is not desired then set the value to false (defaults to false). The AMI id will be fetched and used according to its most recent creation date.
-Now that we have created an AMI for our Openshift installation, that AMI id needs to be placed in the `vars.yml` file. To do so update the following fields (The AMI can be captured from the output of the previous step or found in the ec2 console under AMIs):
+#### Step 2
+
+Now that we have created an AMI for our Openshift installation, there are two ways to use the AMI.
+
+1. In the default behavior, the AMI id will be found and used in the last created fashion.
+2. The `openshift_aws_ami` option can be specified. This will allow the user to override the behavior of the role and use a custom AMI specified in the `openshift_aws_ami` variable.
+We are now ready to provision and install the cluster. This can be accomplished by calling all of the following steps at once or one-by-one. The all in one can be called like this:
```
- # when creating an encrypted AMI please specify use_encryption
- use_encryption: False # defaults to false
+$ ansible-playbook -i inventory.yml provision_install.yml -e @provisioning_vars.yml
```
-**Note**: If using encryption, specify with `use_encryption: True`. This will ensure to take the recently created AMI and encrypt it to be used later. If encryption is not desired then set the value to false. The AMI id will be fetched and used according to its most recent creation date.
+If this is the first time running through this process, please attempt the following steps one-by-one and ensure the setup works correctly.
+#### Step 3
-#### Step 4
-
-We are ready to create the master instances and install Openshift.
+We are ready to create the master instances.
```
-$ ansible-playbook -i <inventory from step 1> provision.yml
+$ ansible-playbook provision.yml -e @provisioning_vars.yml
```
This playbook runs through the following steps:
-1. Ensures a VPC is created
-2. Ensures a SSH key exists
-3. Creates an s3 bucket for the registry named $clusterid
-4. Create master security groups
-5. Create a master launch config
-6. Create the master auto scaling groups
-7. If certificates are desired for ELB, they will be uploaded
-8. Create internal and external master ELBs
-9. Add newly created masters to the correct groups
-10. Set a couple of important facts for the masters
-11. Run the [`byo`](../../common/openshift-cluster/config.yml)
+1. Ensures a VPC is created.
+2. Ensures a SSH key exists.
+3. Creates an s3 bucket for the registry named $clusterid-docker-registry
+4. Create master security groups.
+5. Create a master launch config.
+6. Create the master auto scaling groups.
+7. If certificates are desired for ELB, they will be uploaded.
+8. Create internal and external master ELBs.
+9. Add newly created masters to the correct groups.
+10. Set a couple of important facts for the masters.
+
+At this point we have successfully created the infrastructure including the master nodes.
-At this point we have created a successful cluster with only the master nodes.
+#### Step 4
+
+Now it is time to install Openshift using the openshift-ansible installer. This can be achieved by running the following playbook:
+
+```
+$ ansible-playbook -i inventory.yml install.yml @provisioning_vars.yml
+```
+This playbook accomplishes the following:
+1. Builds a dynamic inventory file by querying AWS.
+2. Runs the [`byo`](../../common/openshift-cluster/config.yml)
+Once this playbook completes, the cluster masters should be installed and configured.
#### Step 5
-Now that we have a cluster deployed it might be more interesting to create some node types. This can be done easily with the following playbook:
+Now that we have a cluster deployed it will be more interesting to create some node types. This can be done easily with the following playbook:
```
-$ ansible-playbook provision_nodes.yml
+$ ansible-playbook provision_nodes.yml -e @provisioning_vars.yml
```
Once this playbook completes, it should create the compute and infra node scale groups. These nodes will attempt to register themselves to the cluster. These requests must be approved by an administrator.
#### Step 6
-The registration of our nodes can be automated by running the following script `accept.yml`. This script can handle the registration in a few different ways.
+To facilitate the node registration process, nodes may be registered by running the following script `accept.yml`. This script can register in a few different ways.
- approve_all - **Note**: this option is for development and test environments. Security is bypassed
- nodes - A list of node names that will be accepted into the cluster
@@ -233,10 +220,11 @@ The registration of our nodes can be automated by running the following script `
nodes: < list of nodes here >
timeout: 0
```
+
Once the desired accept method is chosen, run the following playbook `accept.yml`:
1. Run the following playbook.
```
-$ ansible-playbook accept.yml
+$ ansible-playbook accept.yml -e @provisioning_vars.yml
```
Login to a master and run the following command:
@@ -263,6 +251,6 @@ ip-172-31-49-148.ec2.internal Ready 1h v1.6.1+5115d
At this point your cluster should be ready for workloads. Proceed to deploy applications on your cluster.
-### Still to compute
+### Still to come
There are more enhancements that are arriving for provisioning. These will include more playbooks that enhance the provisioning capabilities.
diff --git a/playbooks/aws/openshift-cluster/accept.yml b/playbooks/aws/openshift-cluster/accept.yml
index d43c84205..ffc367f9f 100755
--- a/playbooks/aws/openshift-cluster/accept.yml
+++ b/playbooks/aws/openshift-cluster/accept.yml
@@ -1,12 +1,17 @@
+#!/usr/bin/ansible-playbook
---
- name: Setup the vpc and the master node group
- #hosts: oo_first_master
hosts: localhost
remote_user: root
gather_facts: no
tasks:
- - name: get provisioning vars
- include_vars: vars.yml
+ - name: Alert user to variables needed - clusterid
+ debug:
+ msg: "openshift_aws_clusterid={{ openshift_aws_clusterid | default('default') }}"
+
+ - name: Alert user to variables needed - region
+ debug:
+ msg: "openshift_aws_region={{ openshift_aws_region | default('us-east-1') }}"
- name: bring lib_openshift into scope
include_role:
@@ -14,9 +19,9 @@
- name: fetch masters
ec2_remote_facts:
- region: "{{ provision.region }}"
+ region: "{{ openshift_aws_region | default('us-east-1') }}"
filters:
- "tag:clusterid": "{{ provision.clusterid }}"
+ "tag:clusterid": "{{ openshift_aws_clusterid | default('default') }}"
"tag:host-type": master
instance-state-name: running
register: mastersout
@@ -26,9 +31,9 @@
- name: fetch new node instances
ec2_remote_facts:
- region: "{{ provision.region }}"
+ region: "{{ openshift_aws_region | default('us-east-1') }}"
filters:
- "tag:clusterid": "{{ provision.clusterid }}"
+ "tag:clusterid": "{{ openshift_aws_clusterid | default('default') }}"
"tag:host-type": node
instance-state-name: running
register: instancesout
diff --git a/playbooks/aws/openshift-cluster/build_ami.yml b/playbooks/aws/openshift-cluster/build_ami.yml
index d27874200..fc11205d8 100644
--- a/playbooks/aws/openshift-cluster/build_ami.yml
+++ b/playbooks/aws/openshift-cluster/build_ami.yml
@@ -3,67 +3,49 @@
connection: local
gather_facts: no
tasks:
- - name: get the necessary vars for ami building
- include_vars: vars.yml
-
- - name: create a vpc with the name <clusterid>
+ - name: Require openshift_aws_base_ami
+ fail:
+ msg: "A base AMI is required for AMI building. Please ensure `openshift_aws_base_ami` is defined."
+ when: openshift_aws_base_ami is undefined
+
+ - name: "Alert user to variables needed and their values - {{ item.name }}"
+ debug:
+ msg: "{{ item.msg }}"
+ with_items:
+ - name: openshift_aws_clusterid
+ msg: "openshift_aws_clusterid={{ openshift_aws_clusterid | default('default') }}"
+ - name: openshift_aws_region
+ msg: "openshift_aws_region={{ openshift_aws_region | default('us-east-1') }}"
+
+ - name: create an instance and prepare for ami
include_role:
- name: openshift_aws_vpc
+ name: openshift_aws
+ tasks_from: build_ami.yml
vars:
- r_openshift_aws_vpc_clusterid: "{{ provision.clusterid }}"
- r_openshift_aws_vpc_cidr: "{{ provision.vpc.cidr }}"
- r_openshift_aws_vpc_subnets: "{{ provision.vpc.subnets }}"
- r_openshift_aws_vpc_region: "{{ provision.region }}"
- r_openshift_aws_vpc_tags: "{{ provision.vpc.tags }}"
- r_openshift_aws_vpc_name: "{{ provision.vpc.name | default(provision.clusterid) }}"
+ openshift_aws_node_group_type: compute
- - name: create aws ssh keypair
- include_role:
- name: openshift_aws_ssh_keys
- vars:
- r_openshift_aws_ssh_keys_users: "{{ provision.instance_users }}"
- r_openshift_aws_ssh_keys_region: "{{ provision.region }}"
-
- - name: fetch the default subnet id
- ec2_vpc_subnet_facts:
- region: "{{ provision.region }}"
+ - name: fetch newly created instances
+ ec2_remote_facts:
+ region: "{{ openshift_aws_region | default('us-east-1') }}"
filters:
- "tag:Name": "{{ provision.vpc.subnets[provision.region][0].az }}"
- register: subnetout
-
- - name: create instance for ami creation
- ec2:
- assign_public_ip: yes
- region: "{{ provision.region }}"
- key_name: "{{ provision.node_group_config.ssh_key_name }}"
- group: "{{ provision.clusterid }}"
- instance_type: m4.xlarge
- vpc_subnet_id: "{{ subnetout.subnets[0].id }}"
- image: "{{ provision.build.base_image }}"
- volumes:
- - device_name: /dev/sdb
- volume_type: gp2
- volume_size: 100
- delete_on_termination: true
- wait: yes
- exact_count: 1
- count_tag:
- Name: ami_base
- instance_tags:
- Name: ami_base
- register: amibase
+ "tag:Name": "{{ openshift_aws_base_ami_name | default('ami_base') }}"
+ instance-state-name: running
+ register: instancesout
+ retries: 20
+ delay: 3
+ until: instancesout.instances|length > 0
- name: wait for ssh to become available
wait_for:
port: 22
- host: "{{ amibase.tagged_instances.0.public_ip }}"
+ host: "{{ instancesout.instances[0].public_ip_address }}"
timeout: 300
search_regex: OpenSSH
- name: add host to nodes
add_host:
groups: nodes
- name: "{{ amibase.tagged_instances.0.public_dns_name }}"
+ name: "{{ instancesout.instances[0].public_dns_name }}"
- name: set the user to perform installation
set_fact:
@@ -81,70 +63,16 @@
- name: run the std_include
include: ../../common/openshift-cluster/initialize_openshift_repos.yml
-- hosts: nodes
- remote_user: root
- tasks:
- - name: get the necessary vars for ami building
- include_vars: vars.yml
-
- - set_fact:
- openshift_node_bootstrap: True
-
- - name: run openshift image preparation
- include_role:
- name: openshift_node
+- name: install node config
+ include: ../../common/openshift-node/config.yml
- hosts: localhost
connection: local
become: no
tasks:
- - name: bundle ami
- ec2_ami:
- instance_id: "{{ amibase.tagged_instances.0.id }}"
- region: "{{ provision.region }}"
- state: present
- description: "This was provisioned {{ ansible_date_time.iso8601 }}"
- name: "{{ provision.build.ami_name }}{{ lookup('pipe', 'date +%Y%m%d%H%M')}}"
- tags: "{{ provision.build.openshift_ami_tags }}"
- wait: yes
- register: amioutput
-
- - debug: var=amioutput
-
- - when: provision.build.use_encryption | default(False)
- block:
- - name: setup kms key for encryption
- include_role:
- name: openshift_aws_iam_kms
- vars:
- r_openshift_aws_iam_kms_region: "{{ provision.region }}"
- r_openshift_aws_iam_kms_alias: "alias/{{ provision.clusterid }}_kms"
-
- - name: augment the encrypted ami tags with source-ami
- set_fact:
- source_tag:
- source-ami: "{{ amioutput.image_id }}"
-
- - name: copy the ami for encrypted disks
- include_role:
- name: openshift_aws_ami_copy
- vars:
- r_openshift_aws_ami_copy_region: "{{ provision.region }}"
- r_openshift_aws_ami_copy_name: "{{ provision.build.ami_name }}{{ lookup('pipe', 'date +%Y%m%d%H%M')}}-encrypted"
- r_openshift_aws_ami_copy_src_ami: "{{ amioutput.image_id }}"
- r_openshift_aws_ami_copy_kms_alias: "alias/{{ provision.clusterid }}_kms"
- r_openshift_aws_ami_copy_tags: "{{ source_tag | combine(provision.build.openshift_ami_tags) }}"
- r_openshift_aws_ami_copy_encrypt: "{{ provision.build.use_encryption }}"
- # this option currently fails due to boto waiters
- # when supported this need to be reapplied
- #r_openshift_aws_ami_copy_wait: True
-
- - name: Display newly created encrypted ami id
- debug:
- msg: "{{ r_openshift_aws_ami_copy_retval_custom_ami }}"
-
- - name: terminate temporary instance
- ec2:
- state: absent
- region: "{{ provision.region }}"
- instance_ids: "{{ amibase.tagged_instances.0.id }}"
+ - name: seal the ami
+ include_role:
+ name: openshift_aws
+ tasks_from: seal_ami.yml
+ vars:
+ openshift_aws_ami_name: "openshift-gi-{{ lookup('pipe', 'date +%Y%m%d%H%M')}}"
diff --git a/playbooks/aws/openshift-cluster/build_node_group.yml b/playbooks/aws/openshift-cluster/build_node_group.yml
deleted file mode 100644
index 3ef492238..000000000
--- a/playbooks/aws/openshift-cluster/build_node_group.yml
+++ /dev/null
@@ -1,47 +0,0 @@
----
-- name: fetch recently created AMI
- ec2_ami_find:
- region: "{{ provision.region }}"
- sort: creationDate
- sort_order: descending
- name: "{{ provision.build.ami_name }}*"
- ami_tags: "{{ provision.build.openshift_ami_tags }}"
- #no_result_action: fail
- register: amiout
-
-- block:
- - name: "Create {{ openshift_build_node_type }} sgs"
- include_role:
- name: openshift_aws_sg
- vars:
- r_openshift_aws_sg_clusterid: "{{ provision.clusterid }}"
- r_openshift_aws_sg_region: "{{ provision.region }}"
- r_openshift_aws_sg_type: "{{ openshift_build_node_type }}"
-
- - name: "generate a launch config name for {{ openshift_build_node_type }}"
- set_fact:
- launch_config_name: "{{ provision.clusterid }}-{{ openshift_build_node_type }}-{{ ansible_date_time.epoch }}"
-
- - name: create "{{ openshift_build_node_type }} launch config"
- include_role:
- name: openshift_aws_launch_config
- vars:
- r_openshift_aws_launch_config_name: "{{ launch_config_name }}"
- r_openshift_aws_launch_config_clusterid: "{{ provision.clusterid }}"
- r_openshift_aws_launch_config_region: "{{ provision.region }}"
- r_openshift_aws_launch_config: "{{ provision.node_group_config }}"
- r_openshift_aws_launch_config_type: "{{ openshift_build_node_type }}"
- r_openshift_aws_launch_config_custom_image: "{{ '' if 'results' not in amiout else amiout.results[0].ami_id }}"
- r_openshift_aws_launch_config_bootstrap_token: "{{ (local_bootstrap['content'] |b64decode) if local_bootstrap is defined else '' }}"
-
- - name: "create {{ openshift_build_node_type }} node groups"
- include_role:
- name: openshift_aws_node_group
- vars:
- r_openshift_aws_node_group_name: "{{ provision.clusterid }} openshift {{ openshift_build_node_type }}"
- r_openshift_aws_node_group_lc_name: "{{ launch_config_name }}"
- r_openshift_aws_node_group_clusterid: "{{ provision.clusterid }}"
- r_openshift_aws_node_group_region: "{{ provision.region }}"
- r_openshift_aws_node_group_config: "{{ provision.node_group_config }}"
- r_openshift_aws_node_group_type: "{{ openshift_build_node_type }}"
- r_openshift_aws_node_group_subnet_name: "{{ provision.vpc.subnets[provision.region][0].az }}"
diff --git a/playbooks/aws/openshift-cluster/install.yml b/playbooks/aws/openshift-cluster/install.yml
new file mode 100644
index 000000000..86d58a68e
--- /dev/null
+++ b/playbooks/aws/openshift-cluster/install.yml
@@ -0,0 +1,74 @@
+---
+- name: Setup the vpc and the master node group
+ hosts: localhost
+ tasks:
+ - name: Alert user to variables needed - clusterid
+ debug:
+ msg: "openshift_aws_clusterid={{ openshift_aws_clusterid | default('default') }}"
+
+ - name: Alert user to variables needed - region
+ debug:
+ msg: "openshift_aws_region={{ openshift_aws_region | default('us-east-1') }}"
+
+ - name: fetch newly created instances
+ ec2_remote_facts:
+ region: "{{ openshift_aws_region | default('us-east-1') }}"
+ filters:
+ "tag:clusterid": "{{ openshift_aws_clusterid | default('default') }}"
+ "tag:host-type": master
+ instance-state-name: running
+ register: instancesout
+ retries: 20
+ delay: 3
+ until: instancesout.instances|length > 0
+
+ - name: add new master to masters group
+ add_host:
+ groups: "masters,etcd,nodes"
+ name: "{{ item.public_ip_address }}"
+ hostname: "{{ openshift_aws_clusterid | default('default') }}-master-{{ item.id[:-5] }}"
+ with_items: "{{ instancesout.instances }}"
+
+ - name: wait for ssh to become available
+ wait_for:
+ port: 22
+ host: "{{ item.public_ip_address }}"
+ timeout: 300
+ search_regex: OpenSSH
+ with_items: "{{ instancesout.instances }}"
+
+- name: set the master facts for hostname to elb
+ hosts: masters
+ gather_facts: no
+ remote_user: root
+ tasks:
+ - name: fetch elbs
+ ec2_elb_facts:
+ region: "{{ openshift_aws_region | default('us-east-1') }}"
+ names:
+ - "{{ item }}"
+ with_items:
+ - "{{ openshift_aws_clusterid | default('default') }}-master-external"
+ - "{{ openshift_aws_clusterid | default('default') }}-master-internal"
+ delegate_to: localhost
+ register: elbs
+
+ - debug: var=elbs
+
+ - name: set fact
+ set_fact:
+ openshift_master_cluster_hostname: "{{ elbs.results[1].elbs[0].dns_name }}"
+ osm_custom_cors_origins:
+ - "{{ elbs.results[1].elbs[0].dns_name }}"
+ - "console.{{ openshift_aws_clusterid | default('default') }}.openshift.com"
+ - "api.{{ openshift_aws_clusterid | default('default') }}.openshift.com"
+ with_items: "{{ groups['masters'] }}"
+
+- name: normalize groups
+ include: ../../byo/openshift-cluster/initialize_groups.yml
+
+- name: run the std_include
+ include: ../../common/openshift-cluster/std_include.yml
+
+- name: run the config
+ include: ../../common/openshift-cluster/config.yml
diff --git a/playbooks/aws/openshift-cluster/provision.yml b/playbooks/aws/openshift-cluster/provision.yml
index dfbf61cc7..db7afac6f 100644
--- a/playbooks/aws/openshift-cluster/provision.yml
+++ b/playbooks/aws/openshift-cluster/provision.yml
@@ -2,156 +2,16 @@
- name: Setup the vpc and the master node group
hosts: localhost
tasks:
- - name: get provisioning vars
- include_vars: vars.yml
- - name: create default vpc
- include_role:
- name: openshift_aws_vpc
- vars:
- r_openshift_aws_vpc_clusterid: "{{ provision.clusterid }}"
- r_openshift_aws_vpc_cidr: "{{ provision.vpc.cidr }}"
- r_openshift_aws_vpc_subnets: "{{ provision.vpc.subnets }}"
- r_openshift_aws_vpc_region: "{{ provision.region }}"
- r_openshift_aws_vpc_tags: "{{ provision.vpc.tags }}"
- r_openshift_aws_vpc_name: "{{ provision.vpc.name | default(provision.clusterid) }}"
-
- - name: create aws ssh keypair
- include_role:
- name: openshift_aws_ssh_keys
- vars:
- r_openshift_aws_ssh_keys_users: "{{ provision.instance_users }}"
- r_openshift_aws_ssh_keys_region: "{{ provision.region }}"
-
- - when: provision.openshift_registry_s3 | default(false)
- name: create s3 bucket for registry
- include_role:
- name: openshift_aws_s3
- vars:
- r_openshift_aws_s3_clusterid: "{{ provision.clusterid }}-docker-registry"
- r_openshift_aws_s3_region: "{{ provision.region }}"
- r_openshift_aws_s3_mode: create
+ - name: Alert user to variables needed - clusterid
+ debug:
+ msg: "openshift_aws_clusterid={{ openshift_aws_clusterid | default('default') }}"
- - name: include scale group creation for master
- include: build_node_group.yml
- vars:
- openshift_build_node_type: master
+ - name: Alert user to variables needed - region
+ debug:
+ msg: "openshift_aws_region={{ openshift_aws_region | default('us-east-1') }}"
- - name: fetch new master instances
- ec2_remote_facts:
- region: "{{ provision.region }}"
- filters:
- "tag:clusterid": "{{ provision.clusterid }}"
- "tag:host-type": master
- instance-state-name: running
- register: instancesout
- retries: 20
- delay: 3
- until: instancesout.instances|length > 0
-
- - name: bring iam_cert23 into scope
- include_role:
- name: lib_utils
-
- - name: upload certificates to AWS IAM
- iam_cert23:
- state: present
- name: "{{ provision.clusterid }}-master-external"
- cert: "{{ provision.iam_cert_ca.cert_path }}"
- key: "{{ provision.iam_cert_ca.key_path }}"
- cert_chain: "{{ provision.iam_cert_ca.chain_path | default(omit) }}"
- register: elb_cert_chain
- failed_when:
- - "'failed' in elb_cert_chain"
- - elb_cert_chain.failed
- - "'msg' in elb_cert_chain"
- - "'already exists' not in elb_cert_chain.msg"
- when: provision.iam_cert_ca is defined
-
- - debug: var=elb_cert_chain
-
- - name: create our master external and internal load balancers
+ - name: create default vpc
include_role:
- name: openshift_aws_elb
- vars:
- r_openshift_aws_elb_clusterid: "{{ provision.clusterid }}"
- r_openshift_aws_elb_region: "{{ provision.region }}"
- r_openshift_aws_elb_instance_filter:
- "tag:clusterid": "{{ provision.clusterid }}"
- "tag:host-type": master
- instance-state-name: running
- r_openshift_aws_elb_type: master
- r_openshift_aws_elb_direction: "{{ elb_item }}"
- r_openshift_aws_elb_idle_timout: 400
- r_openshift_aws_elb_scheme: internet-facing
- r_openshift_aws_elb_security_groups:
- - "{{ provision.clusterid }}"
- - "{{ provision.clusterid }}_master"
- r_openshift_aws_elb_subnet_name: "{{ provision.vpc.subnets[provision.region][0].az }}"
- r_openshift_aws_elb_name: "{{ provision.clusterid }}-master-{{ elb_item }}"
- r_openshift_aws_elb_cert_arn: "{{ elb_cert_chain.arn }}"
- with_items:
- - internal
- - external
- loop_control:
- loop_var: elb_item
-
- - name: add new master to masters group
- add_host:
- groups: "masters,etcd,nodes"
- name: "{{ item.public_ip_address }}"
- hostname: "{{ provision.clusterid }}-master-{{ item.id[:-5] }}"
- with_items: "{{ instancesout.instances }}"
-
- - name: set facts for group normalization
- set_fact:
- cluster_id: "{{ provision.clusterid }}"
- cluster_env: "{{ provision.node_group_config.tags.environment | default('dev') }}"
-
- - name: wait for ssh to become available
- wait_for:
- port: 22
- host: "{{ item.public_ip_address }}"
- timeout: 300
- search_regex: OpenSSH
- with_items: "{{ instancesout.instances }}"
-
-
-- name: set the master facts for hostname to elb
- hosts: masters
- gather_facts: no
- remote_user: root
- tasks:
- - name: include vars
- include_vars: vars.yml
-
- - name: fetch elbs
- ec2_elb_facts:
- region: "{{ provision.region }}"
- names:
- - "{{ item }}"
- with_items:
- - "{{ provision.clusterid }}-master-external"
- - "{{ provision.clusterid }}-master-internal"
- delegate_to: localhost
- register: elbs
-
- - debug: var=elbs
-
- - name: set fact
- set_fact:
- openshift_master_cluster_hostname: "{{ elbs.results[1].elbs[0].dns_name }}"
- osm_custom_cors_origins:
- - "{{ elbs.results[1].elbs[0].dns_name }}"
- - "console.{{ provision.clusterid }}.openshift.com"
- - "api.{{ provision.clusterid }}.openshift.com"
- with_items: "{{ groups['masters'] }}"
-
-- name: normalize groups
- include: ../../byo/openshift-cluster/initialize_groups.yml
-
-- name: run the std_include
- include: ../../common/openshift-cluster/std_include.yml
-
-- name: run the config
- include: ../../common/openshift-cluster/config.yml
+ name: openshift_aws
+ tasks_from: provision.yml
diff --git a/playbooks/aws/openshift-cluster/provision_install.yml b/playbooks/aws/openshift-cluster/provision_install.yml
new file mode 100644
index 000000000..e787deced
--- /dev/null
+++ b/playbooks/aws/openshift-cluster/provision_install.yml
@@ -0,0 +1,16 @@
+---
+# Once an AMI is built then this script is used for
+# the one stop shop to provision and install a cluster
+# this playbook is run with the following parameters:
+# ansible-playbook -i openshift-ansible-inventory provision_install.yml
+- name: Include the provision.yml playbook to create cluster
+ include: provision.yml
+
+- name: Include the install.yml playbook to install cluster
+ include: install.yml
+
+- name: Include the install.yml playbook to install cluster
+ include: provision_nodes.yml
+
+- name: Include the accept.yml playbook to accept nodes into the cluster
+ include: accept.yml
diff --git a/playbooks/aws/openshift-cluster/provision_nodes.yml b/playbooks/aws/openshift-cluster/provision_nodes.yml
index 5428fb307..44c686e08 100644
--- a/playbooks/aws/openshift-cluster/provision_nodes.yml
+++ b/playbooks/aws/openshift-cluster/provision_nodes.yml
@@ -1,47 +1,18 @@
---
-# Get bootstrap config token
-# bootstrap should be created on first master
-# need to fetch it and shove it into cloud data
- name: create the node scale groups
hosts: localhost
connection: local
gather_facts: yes
tasks:
- - name: get provisioning vars
- include_vars: vars.yml
+ - name: Alert user to variables needed - clusterid
+ debug:
+ msg: "openshift_aws_clusterid={{ openshift_aws_clusterid | default('default') }}"
- - name: fetch master instances
- ec2_remote_facts:
- region: "{{ provision.region }}"
- filters:
- "tag:clusterid": "{{ provision.clusterid }}"
- "tag:host-type": master
- instance-state-name: running
- register: instancesout
- retries: 20
- delay: 3
- until: instancesout.instances|length > 0
+ - name: Alert user to variables needed - region
+ debug:
+ msg: "openshift_aws_region={{ openshift_aws_region | default('us-east-1') }}"
- - name: slurp down the bootstrap.kubeconfig
- slurp:
- src: /etc/origin/master/bootstrap.kubeconfig
- delegate_to: "{{ instancesout.instances[0].public_ip_address }}"
- remote_user: root
- register: bootstrap
-
- - name: set_fact on localhost for kubeconfig
- set_fact:
- local_bootstrap: "{{ bootstrap }}"
- launch_config_name:
- infra: "infra-{{ ansible_date_time.epoch }}"
- compute: "compute-{{ ansible_date_time.epoch }}"
-
- - name: include build node group
- include: build_node_group.yml
- vars:
- openshift_build_node_type: infra
-
- - name: include build node group
- include: build_node_group.yml
- vars:
- openshift_build_node_type: compute
+ - name: create the node groups
+ include_role:
+ name: openshift_aws
+ tasks_from: provision_nodes.yml
diff --git a/playbooks/aws/openshift-cluster/provisioning_vars.example.yml b/playbooks/aws/openshift-cluster/provisioning_vars.example.yml
new file mode 100644
index 000000000..28eb9c993
--- /dev/null
+++ b/playbooks/aws/openshift-cluster/provisioning_vars.example.yml
@@ -0,0 +1,28 @@
+---
+# when creating an AMI set this option to True
+# when installing the cluster, set this to False
+openshift_node_bootstrap: True
+
+# specify a clusterid
+#openshift_aws_clusterid: default
+
+# must specify a base_ami when building an AMI
+#openshift_aws_base_ami:
+
+# when creating an encrypted AMI please specify use_encryption
+#openshift_aws_ami_encrypt: False
+
+# custom certificates are required for the ELB
+#openshift_aws_iam_cert_path: '/path/to/wildcard.<clusterid>.example.com.crt'
+#openshift_aws_iam_key_path: '/path/to/wildcard.<clusterid>.example.com.key'
+#openshift_aws_iam_cert_chain_path: '/path/to/cert.ca.crt'
+
+# This is required for any ec2 instances
+#openshift_aws_ssh_key_name: myuser_key
+
+# This will ensure these users are created
+#openshift_aws_users:
+#- key_name: myuser_key
+# username: myuser
+# pub_key: |
+# ssh-rsa AAAA
diff --git a/playbooks/aws/openshift-cluster/vars.yml b/playbooks/aws/openshift-cluster/vars.yml
deleted file mode 100644
index 47da03cb7..000000000
--- a/playbooks/aws/openshift-cluster/vars.yml
+++ /dev/null
@@ -1,113 +0,0 @@
----
-
-clusterid: mycluster
-region: us-east-1
-
-provision:
- clusterid: "{{ clusterid }}"
- region: "{{ region }}"
-
- build: # build specific variables here
- ami_name: "openshift-gi-"
- base_image: ami-bdd5d6ab # base image for AMI to build from
-
- # when creating an encrypted AMI please specify use_encryption
- use_encryption: False
-
- openshift_ami_tags:
- bootstrap: "true"
- openshift-created: "true"
- clusterid: "{{ clusterid }}"
-
- # Use s3 backed registry storage
- openshift_registry_s3: True
-
- # if using custom certificates these are required for the ELB
- iam_cert_ca:
- name: "{{ clusterid }}_openshift"
- cert_path: '/path/to/wildcard.<clusterid>.example.com.crt'
- key_path: '/path/to/wildcard.<clusterid>.example.com.key'
- chain_path: '/path/to/cert.ca.crt'
-
- instance_users:
- - key_name: myuser_key
- username: myuser
- pub_key: |
- ssh-rsa AAAA== myuser@system
-
- node_group_config:
- tags:
- clusterid: "{{ clusterid }}"
- environment: stg
-
- ssh_key_name: myuser_key
-
- # master specific cluster node settings
- master:
- instance_type: m4.xlarge
- ami: ami-cdeec8b6 # if using an encrypted AMI this will be replaced
- volumes:
- - device_name: /dev/sdb
- volume_size: 100
- device_type: gp2
- delete_on_termination: False
- health_check:
- period: 60
- type: EC2
- min_size: 3
- max_size: 3
- desired_size: 3
- tags:
- host-type: master
- sub-host-type: default
- wait_for_instances: True
-
- # compute specific cluster node settings
- compute:
- instance_type: m4.xlarge
- ami: ami-cdeec8b6
- volumes:
- - device_name: /dev/sdb
- volume_size: 100
- device_type: gp2
- delete_on_termination: True
- health_check:
- period: 60
- type: EC2
- min_size: 3
- max_size: 100
- desired_size: 3
- tags:
- host-type: node
- sub-host-type: compute
-
- # infra specific cluster node settings
- infra:
- instance_type: m4.xlarge
- ami: ami-cdeec8b6
- volumes:
- - device_name: /dev/sdb
- volume_size: 100
- device_type: gp2
- delete_on_termination: True
- health_check:
- period: 60
- type: EC2
- min_size: 2
- max_size: 20
- desired_size: 2
- tags:
- host-type: node
- sub-host-type: infra
-
- # vpc settings
- vpc:
- cidr: 172.31.0.0/16
- subnets:
- us-east-1: # These are us-east-1 region defaults. Ensure this matches your region
- - cidr: 172.31.48.0/20
- az: "us-east-1c"
- - cidr: 172.31.32.0/20
- az: "us-east-1e"
- - cidr: 172.31.16.0/20
- az: "us-east-1a"
diff --git a/playbooks/byo/openshift-checks/roles b/playbooks/byo/openshift-checks/roles
new file mode 120000
index 000000000..20c4c58cf
--- /dev/null
+++ b/playbooks/byo/openshift-checks/roles
@@ -0,0 +1 @@
+../../../roles \ No newline at end of file
diff --git a/playbooks/byo/openshift-cluster/config.yml b/playbooks/byo/openshift-cluster/config.yml
index acf5469bf..60fa44c5b 100644
--- a/playbooks/byo/openshift-cluster/config.yml
+++ b/playbooks/byo/openshift-cluster/config.yml
@@ -9,6 +9,4 @@
- include: ../../common/openshift-cluster/config.yml
vars:
- openshift_cluster_id: "{{ cluster_id | default('default') }}"
- openshift_debug_level: "{{ debug_level | default(2) }}"
openshift_deployment_subtype: "{{ deployment_subtype | default(none) }}"
diff --git a/playbooks/byo/openshift-cluster/openshift-logging.yml b/playbooks/byo/openshift-cluster/openshift-logging.yml
index bbec3a4c2..a523bb47f 100644
--- a/playbooks/byo/openshift-cluster/openshift-logging.yml
+++ b/playbooks/byo/openshift-cluster/openshift-logging.yml
@@ -13,6 +13,3 @@
- always
- include: ../../common/openshift-cluster/openshift_logging.yml
- vars:
- openshift_cluster_id: "{{ cluster_id | default('default') }}"
- openshift_debug_level: "{{ debug_level | default(2) }}"
diff --git a/playbooks/byo/openshift-cluster/openshift-prometheus.yml b/playbooks/byo/openshift-cluster/openshift-prometheus.yml
new file mode 100644
index 000000000..15917078d
--- /dev/null
+++ b/playbooks/byo/openshift-cluster/openshift-prometheus.yml
@@ -0,0 +1,4 @@
+---
+- include: initialize_groups.yml
+
+- include: ../../common/openshift-cluster/openshift_prometheus.yml
diff --git a/playbooks/byo/openshift-cluster/service-catalog.yml b/playbooks/byo/openshift-cluster/service-catalog.yml
index 6f95b4e2d..40a7606e7 100644
--- a/playbooks/byo/openshift-cluster/service-catalog.yml
+++ b/playbooks/byo/openshift-cluster/service-catalog.yml
@@ -13,6 +13,3 @@
- always
- include: ../../common/openshift-cluster/service_catalog.yml
- vars:
- openshift_cluster_id: "{{ cluster_id | default('default') }}"
- openshift_debug_level: "{{ debug_level | default(2) }}"
diff --git a/playbooks/byo/openshift-loadbalancer/config.yml b/playbooks/byo/openshift-loadbalancer/config.yml
new file mode 100644
index 000000000..32c828f97
--- /dev/null
+++ b/playbooks/byo/openshift-loadbalancer/config.yml
@@ -0,0 +1,6 @@
+---
+- include: ../openshift-cluster/initialize_groups.yml
+
+- include: ../../common/openshift-cluster/std_include.yml
+
+- include: ../../common/openshift-loadbalancer/config.yml
diff --git a/playbooks/byo/openshift-master/additional_config.yml b/playbooks/byo/openshift-master/additional_config.yml
new file mode 100644
index 000000000..b3d7b5731
--- /dev/null
+++ b/playbooks/byo/openshift-master/additional_config.yml
@@ -0,0 +1,6 @@
+---
+- include: ../openshift-cluster/initialize_groups.yml
+
+- include: ../../common/openshift-cluster/std_include.yml
+
+- include: ../../common/openshift-master/additional_config.yml
diff --git a/playbooks/byo/openshift-master/scaleup.yml b/playbooks/byo/openshift-master/scaleup.yml
index e3ef704e5..a09edd55a 100644
--- a/playbooks/byo/openshift-master/scaleup.yml
+++ b/playbooks/byo/openshift-master/scaleup.yml
@@ -1,7 +1,7 @@
---
- include: ../openshift-cluster/initialize_groups.yml
-- name: Ensure there are new_masters
+- name: Ensure there are new_masters or new_nodes
hosts: localhost
connection: local
become: no
@@ -13,11 +13,8 @@
add hosts to the new_masters and new_nodes host groups to add
masters.
when:
- - (g_new_master_hosts | default([]) | length == 0) or (g_new_node_hosts | default([]) | length == 0)
+ - (g_new_master_hosts | default([]) | length == 0) and (g_new_node_hosts | default([]) | length == 0)
- include: ../../common/openshift-cluster/std_include.yml
- include: ../../common/openshift-master/scaleup.yml
- vars:
- openshift_cluster_id: "{{ cluster_id | default('default') }}"
- openshift_debug_level: "{{ debug_level | default(2) }}"
diff --git a/playbooks/byo/openshift-nfs/config.yml b/playbooks/byo/openshift-nfs/config.yml
new file mode 100644
index 000000000..93b24411e
--- /dev/null
+++ b/playbooks/byo/openshift-nfs/config.yml
@@ -0,0 +1,6 @@
+---
+- include: ../openshift-cluster/initialize_groups.yml
+
+- include: ../../common/openshift-cluster/std_include.yml
+
+- include: ../../common/openshift-nfs/config.yml
diff --git a/playbooks/common/openshift-cluster/config.yml b/playbooks/common/openshift-cluster/config.yml
index 5f420a76c..bbd5a0185 100644
--- a/playbooks/common/openshift-cluster/config.yml
+++ b/playbooks/common/openshift-cluster/config.yml
@@ -22,6 +22,15 @@
tags:
- always
+- name: Set hostname
+ hosts: oo_masters_to_config:oo_nodes_to_config
+ tasks:
+ # TODO: switch back to hostname module once we depend on ansible-2.4
+ # https://github.com/ansible/ansible/pull/25906
+ - name: Set hostname
+ command: "hostnamectl set-hostname {{ openshift.common.hostname }}"
+ when: openshift_set_hostname | default(false,true) | bool
+
- include: ../openshift-etcd/config.yml
- include: ../openshift-nfs/config.yml
@@ -34,6 +43,8 @@
- include: ../openshift-master/config.yml
+- include: ../openshift-master/additional_config.yml
+
- include: ../openshift-node/config.yml
tags:
- node
diff --git a/playbooks/common/openshift-cluster/enable_dnsmasq.yml b/playbooks/common/openshift-cluster/enable_dnsmasq.yml
index 50351588f..be14b06f0 100644
--- a/playbooks/common/openshift-cluster/enable_dnsmasq.yml
+++ b/playbooks/common/openshift-cluster/enable_dnsmasq.yml
@@ -27,9 +27,6 @@
role: "{{ item.role }}"
local_facts: "{{ item.local_facts }}"
with_items:
- - role: common
- local_facts:
- use_dnsmasq: True
- role: master
local_facts:
dns_port: '8053'
@@ -50,9 +47,6 @@
role: "{{ item.role }}"
local_facts: "{{ item.local_facts }}"
with_items:
- - role: common
- local_facts:
- use_dnsmasq: True
- role: node
local_facts:
dns_ip: "{{ hostvars[inventory_hostname]['ansible_default_ipv4']['address'] }}"
diff --git a/playbooks/common/openshift-cluster/evaluate_groups.yml b/playbooks/common/openshift-cluster/evaluate_groups.yml
index c9f37109b..e55b2f964 100644
--- a/playbooks/common/openshift-cluster/evaluate_groups.yml
+++ b/playbooks/common/openshift-cluster/evaluate_groups.yml
@@ -43,11 +43,15 @@
- name: Evaluate groups - Fail if no etcd hosts group is defined
fail:
msg: >
- No etcd hosts defined. Running an all-in-one master is deprecated and
- will no longer be supported in a future upgrade.
+ Running etcd as an embedded service is no longer supported. If this is a
+ new install please define an 'etcd' group with either one or three
+ hosts. These hosts may be the same hosts as your masters. If this is an
+ upgrade you may set openshift_master_unsupported_embedded_etcd=true
+ until a migration playbook becomes available.
when:
- - g_etcd_hosts | default([]) | length == 0
- - not openshift_master_unsupported_all_in_one | default(False)
+ - g_etcd_hosts | default([]) | length not in [3,1]
+ - not openshift_master_unsupported_embedded_etcd | default(False)
+ - not openshift_node_bootstrap | default(False)
- name: Evaluate oo_all_hosts
add_host:
diff --git a/playbooks/common/openshift-cluster/initialize_facts.yml b/playbooks/common/openshift-cluster/initialize_facts.yml
index 9eaf3bc34..0723575c2 100644
--- a/playbooks/common/openshift-cluster/initialize_facts.yml
+++ b/playbooks/common/openshift-cluster/initialize_facts.yml
@@ -126,11 +126,9 @@
openshift_facts:
role: common
local_facts:
- debug_level: "{{ openshift_debug_level | default(2) }}"
deployment_type: "{{ openshift_deployment_type }}"
deployment_subtype: "{{ openshift_deployment_subtype | default(None) }}"
cli_image: "{{ osm_image | default(None) }}"
- cluster_id: "{{ openshift_cluster_id | default('default') }}"
hostname: "{{ openshift_hostname | default(None) }}"
ip: "{{ openshift_ip | default(None) }}"
is_containerized: "{{ l_is_containerized | default(None) }}"
@@ -148,8 +146,6 @@
no_proxy: "{{ openshift_no_proxy | default(None) }}"
generate_no_proxy_hosts: "{{ openshift_generate_no_proxy_hosts | default(True) }}"
no_proxy_internal_hostnames: "{{ openshift_no_proxy_internal_hostnames | default(None) }}"
- sdn_network_plugin_name: "{{ os_sdn_network_plugin_name | default(None) }}"
- use_openshift_sdn: "{{ openshift_use_openshift_sdn | default(None) }}"
- name: initialize_facts set_fact repoquery command
set_fact:
diff --git a/playbooks/common/openshift-cluster/initialize_openshift_version.yml b/playbooks/common/openshift-cluster/initialize_openshift_version.yml
index 7112a6084..1b186f181 100644
--- a/playbooks/common/openshift-cluster/initialize_openshift_version.yml
+++ b/playbooks/common/openshift-cluster/initialize_openshift_version.yml
@@ -1,4 +1,12 @@
---
+- name: Set version_install_base_package true on masters and nodes
+ hosts: oo_masters_to_config:oo_nodes_to_config
+ tasks:
+ - name: Set version_install_base_package true
+ set_fact:
+ version_install_base_package: True
+ when: version_install_base_package is not defined
+
# NOTE: requires openshift_facts be run
- name: Determine openshift_version to configure on first master
hosts: oo_first_master
@@ -12,5 +20,10 @@
hosts: oo_all_hosts:!oo_first_master
vars:
openshift_version: "{{ hostvars[groups.oo_first_master.0].openshift_version }}"
+ pre_tasks:
+ - set_fact:
+ openshift_pkg_version: -{{ openshift_version }}
+ when: openshift_pkg_version is not defined
+ - debug: msg="openshift_pkg_version set to {{ openshift_pkg_version }}"
roles:
- openshift_version
diff --git a/playbooks/common/openshift-cluster/openshift_hosted.yml b/playbooks/common/openshift-cluster/openshift_hosted.yml
index 99a634970..75339f6df 100644
--- a/playbooks/common/openshift-cluster/openshift_hosted.yml
+++ b/playbooks/common/openshift-cluster/openshift_hosted.yml
@@ -29,7 +29,6 @@
- role: openshift_default_storage_class
when: openshift_cloudprovider_kind is defined and (openshift_cloudprovider_kind == 'aws' or openshift_cloudprovider_kind == 'gce')
- role: openshift_hosted
- r_openshift_hosted_use_calico: "{{ openshift.common.use_calico | default(false) | bool }}"
- role: openshift_metrics
when: openshift_hosted_metrics_deploy | default(false) | bool
- role: openshift_logging
@@ -49,6 +48,9 @@
- role: cockpit-ui
when: ( openshift.common.version_gte_3_3_or_1_3 | bool ) and ( openshift_hosted_manage_registry | default(true) | bool ) and not (openshift.docker.hosted_registry_insecure | default(false) | bool)
+ - role: openshift_prometheus
+ when: openshift_hosted_prometheus_deploy | default(false) | bool
+
- name: Update master-config for publicLoggingURL
hosts: oo_masters_to_config:!oo_first_master
tags:
diff --git a/playbooks/common/openshift-cluster/openshift_prometheus.yml b/playbooks/common/openshift-cluster/openshift_prometheus.yml
new file mode 100644
index 000000000..a979c0c00
--- /dev/null
+++ b/playbooks/common/openshift-cluster/openshift_prometheus.yml
@@ -0,0 +1,9 @@
+---
+- include: std_include.yml
+
+- name: OpenShift Prometheus
+ hosts: oo_first_master
+ roles:
+ - openshift_prometheus
+ vars:
+ openshift_prometheus_state: present
diff --git a/playbooks/common/openshift-cluster/sanity_checks.yml b/playbooks/common/openshift-cluster/sanity_checks.yml
new file mode 100644
index 000000000..26716a92d
--- /dev/null
+++ b/playbooks/common/openshift-cluster/sanity_checks.yml
@@ -0,0 +1,51 @@
+---
+- name: Verify Requirements
+ hosts: oo_all_hosts
+ tasks:
+ - fail:
+ msg: Flannel can not be used with openshift sdn, set openshift_use_openshift_sdn=false if you want to use flannel
+ when: openshift_use_openshift_sdn | default(true) | bool and openshift_use_flannel | default(false) | bool
+
+ - fail:
+ msg: Nuage sdn can not be used with openshift sdn, set openshift_use_openshift_sdn=false if you want to use nuage
+ when: openshift_use_openshift_sdn | default(true) | bool and openshift_use_nuage | default(false) | bool
+
+ - fail:
+ msg: Nuage sdn can not be used with flannel
+ when: openshift_use_flannel | default(false) | bool and openshift_use_nuage | default(false) | bool
+
+ - fail:
+ msg: Contiv can not be used with openshift sdn, set openshift_use_openshift_sdn=false if you want to use contiv
+ when: openshift_use_openshift_sdn | default(true) | bool and openshift_use_contiv | default(false) | bool
+
+ - fail:
+ msg: Contiv can not be used with flannel
+ when: openshift_use_flannel | default(false) | bool and openshift_use_contiv | default(false) | bool
+
+ - fail:
+ msg: Contiv can not be used with nuage
+ when: openshift_use_nuage | default(false) | bool and openshift_use_contiv | default(false) | bool
+
+ - fail:
+ msg: Calico can not be used with openshift sdn, set openshift_use_openshift_sdn=false if you want to use Calico
+ when: openshift_use_openshift_sdn | default(true) | bool and openshift_use_calico | default(false) | bool
+
+ - fail:
+ msg: The Calico playbook does not yet integrate with the Flannel playbook in Openshift. Set either openshift_use_calico or openshift_use_flannel, but not both.
+ when: openshift_use_calico | default(false) | bool and openshift_use_flannel | default(false) | bool
+
+ - fail:
+ msg: Calico can not be used with Nuage in Openshift. Set either openshift_use_calico or openshift_use_nuage, but not both
+ when: openshift_use_calico | default(false) | bool and openshift_use_nuage | default(false) | bool
+
+ - fail:
+ msg: Calico can not be used with Contiv in Openshift. Set either openshift_use_calico or openshift_use_contiv, but not both
+ when: openshift_use_calico | default(false) | bool and openshift_use_contiv | default(false) | bool
+
+ - fail:
+ msg: openshift_hostname must be 63 characters or less
+ when: openshift_hostname is defined and openshift_hostname | length > 63
+
+ - fail:
+ msg: openshift_public_hostname must be 63 characters or less
+ when: openshift_public_hostname is defined and openshift_public_hostname | length > 63
diff --git a/playbooks/common/openshift-cluster/std_include.yml b/playbooks/common/openshift-cluster/std_include.yml
index 6cc56889a..cef0072f3 100644
--- a/playbooks/common/openshift-cluster/std_include.yml
+++ b/playbooks/common/openshift-cluster/std_include.yml
@@ -7,6 +7,10 @@
tags:
- always
+- include: sanity_checks.yml
+ tags:
+ - always
+
- include: validate_hostnames.yml
tags:
- node
diff --git a/playbooks/common/openshift-cluster/tasks/set_etcd_launch_facts.yml b/playbooks/common/openshift-cluster/tasks/set_etcd_launch_facts.yml
index 1a6580795..eb118365a 100644
--- a/playbooks/common/openshift-cluster/tasks/set_etcd_launch_facts.yml
+++ b/playbooks/common/openshift-cluster/tasks/set_etcd_launch_facts.yml
@@ -3,7 +3,7 @@
- name: Generate etcd instance names(s)
set_fact:
- scratch_name: "{{ cluster_id }}-{{ k8s_type }}-{{ '%05x' | format(1048576 | random) }}"
+ scratch_name: "{{ openshift_cluster_id | default('default') }}-{{ k8s_type }}-{{ '%05x' | format(1048576 | random) }}"
register: etcd_names_output
with_sequence: count={{ num_etcd }}
diff --git a/playbooks/common/openshift-cluster/tasks/set_master_launch_facts.yml b/playbooks/common/openshift-cluster/tasks/set_master_launch_facts.yml
index 36d7b7870..783f70f50 100644
--- a/playbooks/common/openshift-cluster/tasks/set_master_launch_facts.yml
+++ b/playbooks/common/openshift-cluster/tasks/set_master_launch_facts.yml
@@ -3,7 +3,7 @@
- name: Generate master instance names(s)
set_fact:
- scratch_name: "{{ cluster_id }}-{{ k8s_type }}-{{ '%05x' | format(1048576 | random) }}"
+ scratch_name: "{{ openshift_cluster_id | default('default') }}-{{ k8s_type }}-{{ '%05x' | format(1048576 | random) }}"
register: master_names_output
with_sequence: count={{ num_masters }}
diff --git a/playbooks/common/openshift-cluster/tasks/set_node_launch_facts.yml b/playbooks/common/openshift-cluster/tasks/set_node_launch_facts.yml
index 278942f8b..c103e40a9 100644
--- a/playbooks/common/openshift-cluster/tasks/set_node_launch_facts.yml
+++ b/playbooks/common/openshift-cluster/tasks/set_node_launch_facts.yml
@@ -5,7 +5,7 @@
- name: Generate node instance names(s)
set_fact:
- scratch_name: "{{ cluster_id }}-{{ k8s_type }}-{{ sub_host_type }}-{{ '%05x' | format(1048576 | random) }}"
+ scratch_name: "{{ openshift_cluster_id | default('default') }}-{{ k8s_type }}-{{ sub_host_type }}-{{ '%05x' | format(1048576 | random) }}"
register: node_names_output
with_sequence: count={{ number_nodes }}
diff --git a/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml
index 7cc13137f..98953f72e 100644
--- a/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml
@@ -4,7 +4,6 @@
# Do not allow adding hosts during upgrade.
g_new_master_hosts: []
g_new_node_hosts: []
- openshift_cluster_id: "{{ cluster_id | default('default') }}"
- include: ../initialize_nodes_to_upgrade.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml b/playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml
index b2a2eac9a..52345a9ba 100644
--- a/playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml
+++ b/playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml
@@ -18,12 +18,16 @@
- name: Get current version of Docker
command: "{{ repoquery_cmd }} --installed --qf '%{version}' docker"
register: curr_docker_version
+ retries: 4
+ until: curr_docker_version | succeeded
changed_when: false
- name: Get latest available version of Docker
command: >
{{ repoquery_cmd }} --qf '%{version}' "docker"
register: avail_docker_version
+ retries: 4
+ until: avail_docker_version | succeeded
# Don't expect docker rpm to be available on hosts that don't already have it installed:
when: pkg_check.rc == 0
failed_when: false
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml b/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml
index 616ba04f8..2cc6c9019 100644
--- a/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml
+++ b/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml
@@ -2,7 +2,7 @@
- name: Backup etcd
hosts: oo_etcd_hosts_to_backup
roles:
- - role: openshift_facts
+ - role: openshift_etcd_facts
- role: etcd_common
r_etcd_common_action: backup
r_etcd_common_backup_tag: etcd_backup_tag
diff --git a/playbooks/common/openshift-cluster/upgrades/init.yml b/playbooks/common/openshift-cluster/upgrades/init.yml
index 0f421928b..c98065cf4 100644
--- a/playbooks/common/openshift-cluster/upgrades/init.yml
+++ b/playbooks/common/openshift-cluster/upgrades/init.yml
@@ -4,7 +4,6 @@
# Do not allow adding hosts during upgrade.
g_new_master_hosts: []
g_new_node_hosts: []
- openshift_cluster_id: "{{ cluster_id | default('default') }}"
- include: ../initialize_oo_option_facts.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/tasks/verify_docker_upgrade_targets.yml b/playbooks/common/openshift-cluster/upgrades/pre/tasks/verify_docker_upgrade_targets.yml
index 9d8b73cff..6d8503879 100644
--- a/playbooks/common/openshift-cluster/upgrades/pre/tasks/verify_docker_upgrade_targets.yml
+++ b/playbooks/common/openshift-cluster/upgrades/pre/tasks/verify_docker_upgrade_targets.yml
@@ -1,8 +1,10 @@
---
# Only check if docker upgrade is required if docker_upgrade is not
# already set to False.
-- include: ../docker/upgrade_check.yml
- when: docker_upgrade is not defined or docker_upgrade | bool and not openshift.common.is_atomic | bool
+- include: ../../docker/upgrade_check.yml
+ when:
+ - docker_upgrade is not defined or (docker_upgrade | bool)
+ - not (openshift.common.is_atomic | bool)
# Additional checks for Atomic hosts:
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
index 18f10437d..b75aae589 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
@@ -13,11 +13,11 @@
{{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig
migrate storage --include=* --confirm
register: l_pb_upgrade_control_plane_pre_upgrade_storage
- when: openshift_upgrade_pre_storage_migration_enabled | default(true,true) | bool
+ when: openshift_upgrade_pre_storage_migration_enabled | default(true) | bool
failed_when:
- - openshift_upgrade_pre_storage_migration_enabled | default(true,true) | bool
+ - openshift_upgrade_pre_storage_migration_enabled | default(true) | bool
- l_pb_upgrade_control_plane_pre_upgrade_storage.rc != 0
- - openshift_upgrade_pre_storage_migration_fatal | default(true,true) | bool
+ - openshift_upgrade_pre_storage_migration_fatal | default(true) | bool
# If facts cache were for some reason deleted, this fact may not be set, and if not set
# it will always default to true. This causes problems for the etcd data dir fact detection
@@ -151,11 +151,11 @@
{{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig
migrate storage --include=clusterpolicies --confirm
register: l_pb_upgrade_control_plane_post_upgrade_storage
- when: openshift_upgrade_post_storage_migration_enabled | default(true,true) | bool
+ when: openshift_upgrade_post_storage_migration_enabled | default(true) | bool
failed_when:
- - openshift_upgrade_post_storage_migration_enabled | default(true,true) | bool
+ - openshift_upgrade_post_storage_migration_enabled | default(true) | bool
- l_pb_upgrade_control_plane_post_upgrade_storage.rc != 0
- - openshift_upgrade_post_storage_migration_fatal | default(false,true) | bool
+ - openshift_upgrade_post_storage_migration_fatal | default(false) | bool
run_once: true
delegate_to: "{{ groups.oo_first_master.0 }}"
@@ -247,11 +247,11 @@
migrate storage --include=* --confirm
run_once: true
register: l_pb_upgrade_control_plane_post_upgrade_storage
- when: openshift_upgrade_post_storage_migration_enabled | default(true,true) | bool
+ when: openshift_upgrade_post_storage_migration_enabled | default(true) | bool
failed_when:
- - openshift_upgrade_post_storage_migration_enabled | default(true,true) | bool
+ - openshift_upgrade_post_storage_migration_enabled | default(true) | bool
- l_pb_upgrade_control_plane_post_upgrade_storage.rc != 0
- - openshift_upgrade_post_storage_migration_fatal | default(false,true) | bool
+ - openshift_upgrade_post_storage_migration_fatal | default(false) | bool
- set_fact:
reconcile_complete: True
diff --git a/playbooks/common/openshift-etcd/migrate.yml b/playbooks/common/openshift-etcd/migrate.yml
index a2af7bb21..e4ab0aa41 100644
--- a/playbooks/common/openshift-etcd/migrate.yml
+++ b/playbooks/common/openshift-etcd/migrate.yml
@@ -69,7 +69,7 @@
- role: etcd_migrate
r_etcd_migrate_action: migrate
r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"
- etcd_peer: "{{ ansible_default_ipv4.address }}"
+ etcd_peer: "{{ openshift.common.ip }}"
etcd_url_scheme: "https"
etcd_peer_url_scheme: "https"
@@ -80,7 +80,7 @@
- role: etcd_migrate
r_etcd_migrate_action: clean_data
r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"
- etcd_peer: "{{ ansible_default_ipv4.address }}"
+ etcd_peer: "{{ openshift.common.ip }}"
etcd_url_scheme: "https"
etcd_peer_url_scheme: "https"
post_tasks:
@@ -115,7 +115,7 @@
roles:
- role: etcd_migrate
r_etcd_migrate_action: add_ttls
- etcd_peer: "{{ hostvars[groups.oo_etcd_to_migrate.0].ansible_default_ipv4.address }}"
+ etcd_peer: "{{ hostvars[groups.oo_etcd_to_migrate.0].openshift.common.ip }}"
etcd_url_scheme: "https"
etcd_peer_url_scheme: "https"
when: etcd_migration_failed | length == 0
diff --git a/playbooks/common/openshift-etcd/scaleup.yml b/playbooks/common/openshift-etcd/scaleup.yml
index 5f8bb1c7a..d3fa48bad 100644
--- a/playbooks/common/openshift-etcd/scaleup.yml
+++ b/playbooks/common/openshift-etcd/scaleup.yml
@@ -23,6 +23,9 @@
-C {{ etcd_peer_url_scheme }}://{{ hostvars[etcd_ca_host].etcd_hostname }}:{{ etcd_client_port }}
member add {{ etcd_hostname }} {{ etcd_peer_url_scheme }}://{{ etcd_ip }}:{{ etcd_peer_port }}
delegate_to: "{{ etcd_ca_host }}"
+ failed_when:
+ - etcd_add_check.rc == 1
+ - ("peerURL exists" not in etcd_add_check.stderr)
register: etcd_add_check
retries: 3
delay: 10
@@ -53,3 +56,19 @@
retries: 3
delay: 30
until: scaleup_health.rc == 0
+
+- name: Update master etcd client urls
+ hosts: oo_masters_to_config
+ serial: 1
+ tasks:
+ - include_role:
+ name: openshift_master
+ tasks_from: update_etcd_client_urls
+ vars:
+ etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
+ openshift_ca_host: "{{ groups.oo_first_master.0 }}"
+ openshift_master_etcd_hosts: "{{ hostvars
+ | oo_select_keys(groups['oo_etcd_to_config'] | union(groups['oo_new_etcd_to_config']))
+ | oo_collect('openshift.common.hostname')
+ | default(none, true) }}"
+ openshift_master_etcd_port: "{{ (etcd_client_port | default('2379')) if (groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config) else none }}"
diff --git a/playbooks/common/openshift-master/additional_config.yml b/playbooks/common/openshift-master/additional_config.yml
index c0ea93d2c..7468c78f0 100644
--- a/playbooks/common/openshift-master/additional_config.yml
+++ b/playbooks/common/openshift-master/additional_config.yml
@@ -11,13 +11,13 @@
when: openshift_master_ha | bool and openshift.master.cluster_method == "pacemaker"
- role: openshift_examples
registry_url: "{{ openshift.master.registry_url }}"
- when: openshift.common.install_examples | bool
+ when: openshift_install_examples | default(True)
- role: openshift_hosted_templates
registry_url: "{{ openshift.master.registry_url }}"
- role: openshift_manageiq
- when: openshift.common.use_manageiq | bool
+ when: openshift_use_manageiq | default(false) | bool
- role: cockpit
when: not openshift.common.is_atomic and ( deployment_type in ['atomic-enterprise','openshift-enterprise'] ) and
(osm_use_cockpit | bool or osm_use_cockpit is undefined ) and ( openshift.common.deployment_subtype != 'registry' )
- role: flannel_register
- when: openshift.common.use_flannel | bool
+ when: openshift_use_flannel | default(false) | bool
diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml
index b29b9ef4f..e1b9a4964 100644
--- a/playbooks/common/openshift-master/config.yml
+++ b/playbooks/common/openshift-master/config.yml
@@ -179,28 +179,36 @@
openshift_master_count: "{{ openshift.master.master_count }}"
openshift_master_session_auth_secrets: "{{ hostvars[groups.oo_first_master.0].openshift.master.session_auth_secrets }}"
openshift_master_session_encryption_secrets: "{{ hostvars[groups.oo_first_master.0].openshift.master.session_encryption_secrets }}"
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- openshift_no_proxy_etcd_host_ips: "{{ hostvars | oo_select_keys(groups['oo_etcd_to_config'] | default([]))
- | oo_collect('openshift.common.ip') | default([]) | join(',')
- }}"
- roles:
- - role: os_firewall
- - role: openshift_master
openshift_ca_host: "{{ groups.oo_first_master.0 }}"
openshift_master_etcd_hosts: "{{ hostvars
| oo_select_keys(groups['oo_etcd_to_config'] | default([]))
| oo_collect('openshift.common.hostname')
| default(none, true) }}"
- openshift_master_hosts: "{{ groups.oo_masters_to_config }}"
- r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
- etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
+ openshift_no_proxy_etcd_host_ips: "{{ hostvars | oo_select_keys(groups['oo_etcd_to_config'] | default([]))
+ | oo_collect('openshift.common.ip') | default([]) | join(',')
+ }}"
+ roles:
+ - role: os_firewall
+ - role: openshift_master_facts
+ - role: openshift_hosted_facts
+ - role: openshift_master_certificates
+ - role: openshift_etcd_client_certificates
etcd_cert_subdir: "openshift-master-{{ openshift.common.hostname }}"
etcd_cert_config_dir: "{{ openshift.common.config_base }}/master"
etcd_cert_prefix: "master.etcd-"
+ r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
+ etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
+ when: groups.oo_etcd_to_config | default([]) | length != 0
+ - role: openshift_clock
+ - role: openshift_cloud_provider
+ - role: openshift_builddefaults
+ - role: openshift_buildoverrides
+ - role: nickhammond.logrotate
+ - role: contiv
+ contiv_role: netmaster
+ when: openshift_use_contiv | default(False) | bool
+ - role: openshift_master
+ openshift_master_hosts: "{{ groups.oo_masters_to_config }}"
r_openshift_master_clean_install: "{{ hostvars[groups.oo_first_master.0].l_clean_install }}"
r_openshift_master_etcd3_storage: "{{ hostvars[groups.oo_first_master.0].l_etcd3_enabled }}"
openshift_master_is_scaleup_host: "{{ g_openshift_master_is_scaleup | default(false) }}"
@@ -208,18 +216,14 @@
openshift_master_default_registry_value_api: "{{ hostvars[groups.oo_first_master.0].l_default_registry_value_api }}"
openshift_master_default_registry_value_controllers: "{{ hostvars[groups.oo_first_master.0].l_default_registry_value_controllers }}"
- role: nuage_master
- when: openshift.common.use_nuage | bool
+ when: openshift_use_nuage | default(false) | bool
- role: calico_master
- when: openshift.common.use_calico | bool
-
+ when: openshift_use_calico | default(false) | bool
post_tasks:
- name: Create group for deployment type
group_by: key=oo_masters_deployment_type_{{ openshift.common.deployment_type }}
changed_when: False
-- include: additional_config.yml
- when: not g_openshift_master_is_scaleup
-
- name: Re-enable excluder if it was previously enabled
hosts: oo_masters_to_config
gather_facts: no
diff --git a/playbooks/common/openshift-master/restart.yml b/playbooks/common/openshift-master/restart.yml
index 6fec346c3..4d73b8124 100644
--- a/playbooks/common/openshift-master/restart.yml
+++ b/playbooks/common/openshift-master/restart.yml
@@ -7,7 +7,7 @@
openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}"
serial: 1
handlers:
- - include: roles/openshift_master/handlers/main.yml
+ - include: ../../../roles/openshift_master/handlers/main.yml
static: yes
roles:
- openshift_facts
diff --git a/playbooks/common/openshift-nfs/config.yml b/playbooks/common/openshift-nfs/config.yml
index 000e46e80..64ea0d3c4 100644
--- a/playbooks/common/openshift-nfs/config.yml
+++ b/playbooks/common/openshift-nfs/config.yml
@@ -2,5 +2,5 @@
- name: Configure nfs
hosts: oo_nfs_to_config
roles:
- - role: openshift_facts
+ - role: os_firewall
- role: openshift_storage_nfs
diff --git a/playbooks/common/openshift-node/config.yml b/playbooks/common/openshift-node/config.yml
index c13417714..0801c41ff 100644
--- a/playbooks/common/openshift-node/config.yml
+++ b/playbooks/common/openshift-node/config.yml
@@ -71,17 +71,18 @@
etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
etcd_cert_subdir: "openshift-node-{{ openshift.common.hostname }}"
etcd_cert_config_dir: "{{ openshift.common.config_base }}/node"
- when: openshift.common.use_flannel | bool
+ when: openshift_use_flannel | default(false) | bool
- role: calico
- when: openshift.common.use_calico | bool
+ when: openshift_use_calico | default(false) | bool
- role: nuage_node
- when: openshift.common.use_nuage | bool
+ when: openshift_use_nuage | default(false) | bool
- role: contiv
contiv_role: netplugin
- when: openshift.common.use_contiv | bool
+ when: openshift_use_contiv | default(false) | bool
- role: nickhammond.logrotate
- role: openshift_manage_node
openshift_master_host: "{{ groups.oo_first_master.0 }}"
+ when: not openshift_node_bootstrap | default(False)
tasks:
- name: Create group for deployment type
group_by: key=oo_nodes_deployment_type_{{ openshift.common.deployment_type }}
diff --git a/playbooks/gcp/openshift-cluster/provision.yml b/playbooks/gcp/openshift-cluster/provision.yml
new file mode 100644
index 000000000..a3d1d46a6
--- /dev/null
+++ b/playbooks/gcp/openshift-cluster/provision.yml
@@ -0,0 +1,19 @@
+---
+- name: Ensure all cloud resources necessary for the cluster, including instances, have been started
+ hosts: localhost
+ connection: local
+ gather_facts: no
+ tasks:
+
+ - name: provision a GCP cluster in the specified project
+ include_role:
+ name: openshift_gcp
+
+- name: normalize groups
+ include: ../../byo/openshift-cluster/initialize_groups.yml
+
+- name: run the std_include
+ include: ../../common/openshift-cluster/std_include.yml
+
+- name: run the config
+ include: ../../common/openshift-cluster/config.yml
diff --git a/roles/docker/defaults/main.yml b/roles/docker/defaults/main.yml
index ed97d539c..7e206ded1 100644
--- a/roles/docker/defaults/main.yml
+++ b/roles/docker/defaults/main.yml
@@ -1 +1,6 @@
---
+docker_cli_auth_config_path: '/root/.docker'
+
+oreg_url: ''
+oreg_host: "{{ oreg_url.split('/')[0] if '.' in oreg_url.split('/')[0] else '' }}"
+oreg_auth_credentials_replace: False
diff --git a/roles/docker/tasks/package_docker.yml b/roles/docker/tasks/package_docker.yml
index bc52ab60c..16aea5067 100644
--- a/roles/docker/tasks/package_docker.yml
+++ b/roles/docker/tasks/package_docker.yml
@@ -3,6 +3,8 @@
command: "{{ repoquery_cmd }} --installed --qf '%{version}' docker"
when: not openshift.common.is_atomic | bool
register: curr_docker_version
+ retries: 4
+ until: curr_docker_version | succeeded
changed_when: false
- name: Error out if Docker pre-installed but too old
@@ -117,6 +119,18 @@
notify:
- restart docker
+- name: Check for credentials file for registry auth
+ stat:
+ path: "{{ docker_cli_auth_config_path }}/config.json"
+ when: oreg_auth_user is defined
+ register: docker_cli_auth_credentials_stat
+
+- name: Create credentials for docker cli registry auth
+ command: "docker --config={{ docker_cli_auth_config_path }} login -u {{ oreg_auth_user }} -p {{ oreg_auth_password }} {{ oreg_host }}"
+ when:
+ - oreg_auth_user is defined
+ - (not docker_cli_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool
+
- name: Start the Docker service
systemd:
name: docker
diff --git a/roles/docker/tasks/systemcontainer_crio.yml b/roles/docker/tasks/systemcontainer_crio.yml
index 787f51f94..0bab0899c 100644
--- a/roles/docker/tasks/systemcontainer_crio.yml
+++ b/roles/docker/tasks/systemcontainer_crio.yml
@@ -3,6 +3,15 @@
- set_fact:
l_insecure_crio_registries: "{{ '\"{}\"'.format('\", \"'.join(openshift.docker.insecure_registries)) }}"
when: openshift.docker.insecure_registries
+- set_fact:
+ l_crio_registries: "{{ openshift.docker.additional_registries + ['docker.io'] }}"
+ when: openshift.docker.additional_registries
+- set_fact:
+ l_crio_registries: "{{ ['docker.io'] }}"
+ when: not openshift.docker.additional_registries
+- set_fact:
+ l_additional_crio_registries: "{{ '\"{}\"'.format('\", \"'.join(l_crio_registries)) }}"
+ when: openshift.docker.additional_registries
- name: Ensure container-selinux is installed
package:
@@ -86,12 +95,18 @@
- name: Set to default prepend
set_fact:
l_crio_image_prepend: "docker.io/gscrivano"
- l_crio_image_name: "crio-o-fedora"
+ l_crio_image_name: "cri-o-fedora"
- - name: Use Centos based image when distribution is Red Hat or CentOS
+ - name: Use Centos based image when distribution is CentOS
set_fact:
l_crio_image_name: "cri-o-centos"
- when: ansible_distribution in ['RedHat', 'CentOS']
+ when: ansible_distribution == "CentOS"
+
+ - name: Use RHEL based image when distribution is Red Hat
+ set_fact:
+ l_crio_image_prepend: "registry.access.redhat.com"
+ l_crio_image_name: "cri-o"
+ when: ansible_distribution == "RedHat"
# For https://github.com/openshift/openshift-ansible/pull/4049#discussion_r114478504
- name: Use a testing registry if requested
diff --git a/roles/docker/tasks/systemcontainer_docker.yml b/roles/docker/tasks/systemcontainer_docker.yml
index 57a84bc2c..146e5f430 100644
--- a/roles/docker/tasks/systemcontainer_docker.yml
+++ b/roles/docker/tasks/systemcontainer_docker.yml
@@ -100,18 +100,22 @@
l_docker_image_prepend: "registry.fedoraproject.org/f25"
when: ansible_distribution == 'Fedora'
- # For https://github.com/openshift/openshift-ansible/pull/4049#discussion_r114478504
- - name: Use a testing registry if requested
- set_fact:
- l_docker_image_prepend: "{{ openshift_docker_systemcontainer_image_registry_override }}"
- when:
- - openshift_docker_systemcontainer_image_registry_override is defined
- - openshift_docker_systemcontainer_image_registry_override != ""
-
- name: Set the full image name
set_fact:
l_docker_image: "{{ l_docker_image_prepend }}/{{ openshift.docker.service_name }}:latest"
+ # For https://github.com/openshift/openshift-ansible/pull/5354#issuecomment-328552959
+ - name: Use a specific image if requested
+ set_fact:
+ l_docker_image: "{{ openshift_docker_systemcontainer_image_override }}"
+ when:
+ - openshift_docker_systemcontainer_image_override is defined
+ - openshift_docker_systemcontainer_image_override != ""
+
+ # Be nice and let the user see the variable result
+ - debug:
+ var: l_docker_image
+
# NOTE: no_proxy added as a workaround until https://github.com/projectatomic/atomic/pull/999 is released
- name: Pre-pull Container Engine System Container image
command: "atomic pull --storage ostree {{ l_docker_image }}"
diff --git a/roles/docker/templates/crio.conf.j2 b/roles/docker/templates/crio.conf.j2
index 5b31932b1..b4ee84fd0 100644
--- a/roles/docker/templates/crio.conf.j2
+++ b/roles/docker/templates/crio.conf.j2
@@ -120,6 +120,11 @@ insecure_registries = [
{{ l_insecure_crio_registries|default("") }}
]
+# registries is used to specify a comma separated list of registries to be used
+# when pulling an unqualified image (e.g. fedora:rawhide).
+registries = [
+{{ l_additional_crio_registries|default("") }}
+]
# The "crio.network" table contains settings pertaining to the
# management of CNI plugins.
[crio.network]
diff --git a/roles/etcd_common/defaults/main.yml b/roles/etcd_common/defaults/main.yml
index 89993f7ea..b67411f40 100644
--- a/roles/etcd_common/defaults/main.yml
+++ b/roles/etcd_common/defaults/main.yml
@@ -56,7 +56,7 @@ etcd_is_containerized: False
etcd_is_thirdparty: False
# etcd dir vars
-etcd_data_dir: "{{ '/var/lib/origin/openshift.local.etcd' if r_etcd_common_embedded_etcd | bool else '/var/lib/etcd/' if openshift.common.etcd_runtime != 'runc' else '/var/lib/etcd/etcd.etcd/' }}"
+etcd_data_dir: "{{ '/var/lib/origin/openshift.local.etcd' if r_etcd_common_embedded_etcd | bool else '/var/lib/etcd/' if r_etcd_common_etcd_runtime != 'runc' else '/var/lib/etcd/etcd.etcd/' }}"
# etcd ports and protocols
etcd_client_port: 2379
diff --git a/roles/etcd_common/tasks/backup.yml b/roles/etcd_common/tasks/backup.yml
index 2bc486d3f..42d27c081 100644
--- a/roles/etcd_common/tasks/backup.yml
+++ b/roles/etcd_common/tasks/backup.yml
@@ -29,7 +29,6 @@
- name: Check current etcd disk usage
shell: du --exclude='*openshift-backup*' -k {{ l_etcd_data_dir }} | tail -n 1 | cut -f1
register: l_etcd_disk_usage
- when: r_etcd_common_embedded_etcd | bool
# AUDIT:changed_when: `false` because we are only inspecting
# state, not manipulating anything
changed_when: false
@@ -37,9 +36,9 @@
- name: Abort if insufficient disk space for etcd backup
fail:
msg: >
- {{ l_etcd_disk_usage.stdout }} Kb disk space required for etcd backup,
+ {{ l_etcd_disk_usage.stdout|int*2 }} Kb disk space required for etcd backup,
{{ l_avail_disk.stdout }} Kb available.
- when: (r_etcd_common_embedded_etcd | bool) and (l_etcd_disk_usage.stdout|int > l_avail_disk.stdout|int)
+ when: l_etcd_disk_usage.stdout|int*2 > l_avail_disk.stdout|int
# For non containerized and non embedded we should have the correct version of
# etcd installed already. So don't do anything.
diff --git a/roles/etcd_common/tasks/main.yml b/roles/etcd_common/tasks/main.yml
index f5bcd03ee..6ed87e6c7 100644
--- a/roles/etcd_common/tasks/main.yml
+++ b/roles/etcd_common/tasks/main.yml
@@ -6,4 +6,4 @@
- name: Include main action task file
include: "{{ r_etcd_common_action }}.yml"
- when: '"noop" not in r_etcd_common_action'
+ when: r_etcd_common_action != "noop"
diff --git a/roles/etcd_common/tasks/noop.yml b/roles/etcd_common/tasks/noop.yml
new file mode 100644
index 000000000..a88d78235
--- /dev/null
+++ b/roles/etcd_common/tasks/noop.yml
@@ -0,0 +1,4 @@
+---
+# This is file is here because the usage of tags, specifically `pre_upgrade`
+# breaks the functionality of this role.
+# See https://bugzilla.redhat.com/show_bug.cgi?id=1464025
diff --git a/roles/flannel_register/defaults/main.yaml b/roles/flannel_register/defaults/main.yaml
index ddf8230ec..71c8f38c3 100644
--- a/roles/flannel_register/defaults/main.yaml
+++ b/roles/flannel_register/defaults/main.yaml
@@ -1,7 +1,6 @@
---
-flannel_network: "{{ openshift.common.portal_net | default('172.30.0.0/16', true) }}"
-flannel_min_network: 172.30.5.0
-flannel_subnet_len: 24
+flannel_network: "{{ openshift.master.sdn_cluster_network_cidr }}"
+flannel_subnet_len: "{{ 32 - openshift.master.sdn_host_subnet_length }}"
flannel_etcd_key: /openshift.com/network
etcd_hosts: "{{ etcd_urls }}"
etcd_conf_dir: "{{ openshift.common.config_base }}/master"
diff --git a/roles/flannel_register/templates/flannel-config.json b/roles/flannel_register/templates/flannel-config.json
index 89ce4c30b..bba3729fa 100644
--- a/roles/flannel_register/templates/flannel-config.json
+++ b/roles/flannel_register/templates/flannel-config.json
@@ -1,7 +1,6 @@
{
"Network": "{{ flannel_network }}",
"SubnetLen": {{ flannel_subnet_len }},
- "SubnetMin": "{{ flannel_min_network }}",
"Backend": {
"Type": "host-gw"
}
diff --git a/roles/lib_utils/library/repoquery.py b/roles/lib_utils/library/repoquery.py
index 95a305b58..e5ac1f74f 100644
--- a/roles/lib_utils/library/repoquery.py
+++ b/roles/lib_utils/library/repoquery.py
@@ -35,6 +35,7 @@ import os # noqa: F401
import re # noqa: F401
import shutil # noqa: F401
import tempfile # noqa: F401
+import time # noqa: F401
try:
import ruamel.yaml as yaml # noqa: F401
@@ -618,17 +619,22 @@ def main():
show_duplicates=dict(default=False, required=False, type='bool'),
match_version=dict(default=None, required=False, type='str'),
ignore_excluders=dict(default=False, required=False, type='bool'),
+ retries=dict(default=4, required=False, type='int'),
+ retry_interval=dict(default=5, required=False, type='int'),
),
supports_check_mode=False,
required_if=[('show_duplicates', True, ['name'])],
)
- rval = Repoquery.run_ansible(module.params, module.check_mode)
-
- if 'failed' in rval:
- module.fail_json(**rval)
-
- module.exit_json(**rval)
+ tries = 1
+ while True:
+ rval = Repoquery.run_ansible(module.params, module.check_mode)
+ if 'failed' not in rval:
+ module.exit_json(**rval)
+ elif tries > module.params['retries']:
+ module.fail_json(**rval)
+ tries += 1
+ time.sleep(module.params['retry_interval'])
if __name__ == "__main__":
diff --git a/roles/lib_utils/library/yedit.py b/roles/lib_utils/library/yedit.py
index baf72fe47..921bca074 100644
--- a/roles/lib_utils/library/yedit.py
+++ b/roles/lib_utils/library/yedit.py
@@ -35,6 +35,7 @@ import os # noqa: F401
import re # noqa: F401
import shutil # noqa: F401
import tempfile # noqa: F401
+import time # noqa: F401
try:
import ruamel.yaml as yaml # noqa: F401
diff --git a/roles/lib_utils/src/ansible/repoquery.py b/roles/lib_utils/src/ansible/repoquery.py
index 40773b1c1..5f5b93639 100644
--- a/roles/lib_utils/src/ansible/repoquery.py
+++ b/roles/lib_utils/src/ansible/repoquery.py
@@ -19,17 +19,22 @@ def main():
show_duplicates=dict(default=False, required=False, type='bool'),
match_version=dict(default=None, required=False, type='str'),
ignore_excluders=dict(default=False, required=False, type='bool'),
+ retries=dict(default=4, required=False, type='int'),
+ retry_interval=dict(default=5, required=False, type='int'),
),
supports_check_mode=False,
required_if=[('show_duplicates', True, ['name'])],
)
- rval = Repoquery.run_ansible(module.params, module.check_mode)
-
- if 'failed' in rval:
- module.fail_json(**rval)
-
- module.exit_json(**rval)
+ tries = 1
+ while True:
+ rval = Repoquery.run_ansible(module.params, module.check_mode)
+ if 'failed' not in rval:
+ module.exit_json(**rval)
+ elif tries > module.params['retries']:
+ module.fail_json(**rval)
+ tries += 1
+ time.sleep(module.params['retry_interval'])
if __name__ == "__main__":
diff --git a/roles/lib_utils/src/lib/import.py b/roles/lib_utils/src/lib/import.py
index 567f8c9e0..07a04b7ae 100644
--- a/roles/lib_utils/src/lib/import.py
+++ b/roles/lib_utils/src/lib/import.py
@@ -10,6 +10,7 @@ import os # noqa: F401
import re # noqa: F401
import shutil # noqa: F401
import tempfile # noqa: F401
+import time # noqa: F401
try:
import ruamel.yaml as yaml # noqa: F401
diff --git a/roles/openshift_aws/README.md b/roles/openshift_aws/README.md
new file mode 100644
index 000000000..696efbea5
--- /dev/null
+++ b/roles/openshift_aws/README.md
@@ -0,0 +1,84 @@
+openshift_aws
+==================================
+
+Provision AWS infrastructure helpers.
+
+Requirements
+------------
+
+* Ansible 2.3
+* Boto
+
+Role Variables
+--------------
+
+From this role:
+
+| Name | Default value
+|---------------------------------------------------|-----------------------
+| openshift_aws_clusterid | default
+| openshift_aws_elb_scheme | internet-facing
+| openshift_aws_launch_config_bootstrap_token | ''
+| openshift_aws_node_group_config | {'master': {'ami': '{{ openshift_aws_ami }}', 'health_check': {'type': 'EC2', 'period': 60}, 'volumes': '{{ openshift_aws_node_group_config_master_volumes }}', 'tags': {'host-type': 'master', 'sub-host-type': 'default'}, 'min_size': 3, 'instance_type': 'm4.xlarge', 'desired_size': 3, 'wait_for_instances': True, 'max_size': 3}, 'tags': '{{ openshift_aws_node_group_config_tags }}', 'compute': {'ami': '{{ openshift_aws_ami }}', 'health_check': {'type': 'EC2', 'period': 60}, 'volumes': '{{ openshift_aws_node_group_config_node_volumes }}', 'tags': {'host-type': 'node', 'sub-host-type': 'compute'}, 'min_size': 3, 'instance_type': 'm4.xlarge', 'desired_size': 3, 'max_size': 100}, 'infra': {'ami': '{{ openshift_aws_ami }}', 'health_check': {'type': 'EC2', 'period': 60}, 'volumes': '{{ openshift_aws_node_group_config_node_volumes }}', 'tags': {'host-type': 'node', 'sub-host-type': 'infra'}, 'min_size': 2, 'instance_type': 'm4.xlarge', 'desired_size': 2, 'max_size': 20}}
+| openshift_aws_ami_copy_wait | False
+| openshift_aws_users | []
+| openshift_aws_launch_config_name | {{ openshift_aws_clusterid }}-{{ openshift_aws_node_group_type }}
+| openshift_aws_create_vpc | False
+| openshift_aws_node_group_type | master
+| openshift_aws_elb_cert_arn | ''
+| openshift_aws_kubernetes_cluster_status | owned
+| openshift_aws_s3_mode | create
+| openshift_aws_vpc | {'subnets': {'us-east-1': [{'cidr': '172.31.48.0/20', 'az': 'us-east-1c'}, {'cidr': '172.31.32.0/20', 'az': 'us-east-1e'}, {'cidr': '172.31.16.0/20', 'az': 'us-east-1a'}]}, 'cidr': '172.31.0.0/16', 'name': '{{ openshift_aws_vpc_name }}'}
+| openshift_aws_create_ssh_keys | False
+| openshift_aws_iam_kms_alias | alias/{{ openshift_aws_clusterid }}_kms
+| openshift_aws_use_custom_ami | False
+| openshift_aws_ami_copy_src_region | {{ openshift_aws_region }}
+| openshift_aws_s3_bucket_name | {{ openshift_aws_clusterid }}
+| openshift_aws_elb_health_check | {'response_timeout': 5, 'ping_port': 443, 'ping_protocol': 'tcp', 'interval': 30, 'healthy_threshold': 2, 'unhealthy_threshold': 2}
+| openshift_aws_node_security_groups | {'default': {'rules': [{'to_port': 22, 'from_port': 22, 'cidr_ip': '0.0.0.0/0', 'proto': 'tcp'}, {'to_port': 'all', 'from_port': 'all', 'proto': 'all', 'group_name': '{{ openshift_aws_clusterid }}'}], 'name': '{{ openshift_aws_clusterid }}', 'desc': '{{ openshift_aws_clusterid }} default'}, 'master': {'rules': [{'to_port': 80, 'from_port': 80, 'cidr_ip': '0.0.0.0/0', 'proto': 'tcp'}, {'to_port': 443, 'from_port': 443, 'cidr_ip': '0.0.0.0/0', 'proto': 'tcp'}], 'name': '{{ openshift_aws_clusterid }}_master', 'desc': '{{ openshift_aws_clusterid }} master instances'}, 'compute': {'name': '{{ openshift_aws_clusterid }}_compute', 'desc': '{{ openshift_aws_clusterid }} compute node instances'}, 'etcd': {'name': '{{ openshift_aws_clusterid }}_etcd', 'desc': '{{ openshift_aws_clusterid }} etcd instances'}, 'infra': {'rules': [{'to_port': 80, 'from_port': 80, 'cidr_ip': '0.0.0.0/0', 'proto': 'tcp'}, {'to_port': 443, 'from_port': 443, 'cidr_ip': '0.0.0.0/0', 'proto': 'tcp'}, {'to_port': 32000, 'from_port': 30000, 'cidr_ip': '0.0.0.0/0', 'proto': 'tcp'}], 'name': '{{ openshift_aws_clusterid }}_infra', 'desc': '{{ openshift_aws_clusterid }} infra node instances'}}
+| openshift_aws_elb_security_groups | ['{{ openshift_aws_clusterid }}', '{{ openshift_aws_clusterid }}_{{ openshift_aws_node_group_type }}']
+| openshift_aws_vpc_tags | {'Name': '{{ openshift_aws_vpc_name }}'}
+| openshift_aws_create_security_groups | False
+| openshift_aws_create_iam_cert | False
+| openshift_aws_create_scale_group | True
+| openshift_aws_ami_encrypt | False
+| openshift_aws_node_group_config_node_volumes | [{'volume_size': 100, 'delete_on_termination': True, 'device_type': 'gp2', 'device_name': '/dev/sdb'}]
+| openshift_aws_elb_instance_filter | {'tag:host-type': '{{ openshift_aws_node_group_type }}', 'tag:clusterid': '{{ openshift_aws_clusterid }}', 'instance-state-name': 'running'}
+| openshift_aws_region | us-east-1
+| openshift_aws_elb_name | {{ openshift_aws_clusterid }}-{{ openshift_aws_node_group_type }}
+| openshift_aws_elb_idle_timout | 400
+| openshift_aws_subnet_name | us-east-1c
+| openshift_aws_node_group_config_tags | {{ openshift_aws_clusterid | openshift_aws_build_instance_tags(openshift_aws_kubernetes_cluster_status) }}
+| openshift_aws_create_launch_config | True
+| openshift_aws_ami_tags | {'bootstrap': 'true', 'clusterid': '{{ openshift_aws_clusterid }}', 'openshift-created': 'true'}
+| openshift_aws_ami_name | openshift-gi
+| openshift_aws_node_group_config_master_volumes | [{'volume_size': 100, 'delete_on_termination': False, 'device_type': 'gp2', 'device_name': '/dev/sdb'}]
+| openshift_aws_vpc_name | {{ openshift_aws_clusterid }}
+| openshift_aws_elb_listeners | {'master': {'internal': [{'instance_port': 80, 'instance_protocol': 'tcp', 'load_balancer_port': 80, 'protocol': 'tcp'}, {'instance_port': 443, 'instance_protocol': 'tcp', 'load_balancer_port': 443, 'protocol': 'tcp'}], 'external': [{'instance_port': 443, 'instance_protocol': 'ssl', 'load_balancer_port': 80, 'protocol': 'tcp'}, {'instance_port': 443, 'instance_protocol': 'ssl', 'load_balancer_port': 443, 'ssl_certificate_id': '{{ openshift_aws_elb_cert_arn }}', 'protocol': 'ssl'}]}}
+|
+
+
+Dependencies
+------------
+
+
+Example Playbook
+----------------
+
+```yaml
+- include_role:
+ name: openshift_aws
+ tasks_from: vpc.yml
+ vars:
+ openshift_aws_clusterid: test
+ openshift_aws_region: us-east-1
+ openshift_aws_create_vpc: true
+```
+
+License
+-------
+
+Apache License, Version 2.0
+
+Author Information
+------------------
diff --git a/roles/openshift_aws/defaults/main.yml b/roles/openshift_aws/defaults/main.yml
new file mode 100644
index 000000000..4e7f54f79
--- /dev/null
+++ b/roles/openshift_aws/defaults/main.yml
@@ -0,0 +1,209 @@
+---
+openshift_aws_create_vpc: True
+openshift_aws_create_s3: True
+openshift_aws_create_iam_cert: True
+openshift_aws_create_security_groups: True
+openshift_aws_create_launch_config: True
+openshift_aws_create_scale_group: True
+openshift_aws_kubernetes_cluster_status: owned # or shared
+openshift_aws_node_group_type: master
+
+openshift_aws_wait_for_ssh: True
+
+openshift_aws_clusterid: default
+openshift_aws_region: us-east-1
+openshift_aws_vpc_name: "{{ openshift_aws_clusterid }}"
+
+openshift_aws_iam_cert_name: "{{ openshift_aws_clusterid }}-master-external"
+openshift_aws_iam_cert_path: ''
+openshift_aws_iam_cert_chain_path: ''
+openshift_aws_iam_cert_key_path: ''
+openshift_aws_scale_group_name: "{{ openshift_aws_clusterid }} openshift {{ openshift_aws_node_group_type }}"
+
+openshift_aws_iam_kms_alias: "alias/{{ openshift_aws_clusterid }}_kms"
+openshift_aws_ami: ''
+openshift_aws_ami_copy_wait: False
+openshift_aws_ami_encrypt: False
+openshift_aws_ami_copy_src_region: "{{ openshift_aws_region }}"
+openshift_aws_ami_name: openshift-gi
+openshift_aws_base_ami_name: ami_base
+
+openshift_aws_launch_config_bootstrap_token: ''
+openshift_aws_launch_config_name: "{{ openshift_aws_clusterid }}-{{ openshift_aws_node_group_type }}-{{ ansible_date_time.epoch }}"
+
+openshift_aws_users: []
+
+openshift_aws_ami_tags:
+ bootstrap: "true"
+ openshift-created: "true"
+ clusterid: "{{ openshift_aws_clusterid }}"
+
+openshift_aws_s3_mode: create
+openshift_aws_s3_bucket_name: "{{ openshift_aws_clusterid }}-docker-registry"
+
+openshift_aws_elb_health_check:
+ ping_protocol: tcp
+ ping_port: 443
+ response_timeout: 5
+ interval: 30
+ unhealthy_threshold: 2
+ healthy_threshold: 2
+
+openshift_aws_elb_name: "{{ openshift_aws_clusterid }}-{{ openshift_aws_node_group_type }}"
+openshift_aws_elb_idle_timout: 400
+openshift_aws_elb_scheme: internet-facing
+openshift_aws_elb_cert_arn: ''
+
+openshift_aws_elb_listeners:
+ master:
+ external:
+ - protocol: tcp
+ load_balancer_port: 80
+ instance_protocol: ssl
+ instance_port: 443
+ - protocol: ssl
+ load_balancer_port: 443
+ instance_protocol: ssl
+ instance_port: 443
+ # ssl certificate required for https or ssl
+ ssl_certificate_id: "{{ openshift_aws_elb_cert_arn }}"
+ internal:
+ - protocol: tcp
+ load_balancer_port: 80
+ instance_protocol: tcp
+ instance_port: 80
+ - protocol: tcp
+ load_balancer_port: 443
+ instance_protocol: tcp
+ instance_port: 443
+
+openshift_aws_node_group_config_master_volumes:
+- device_name: /dev/sdb
+ volume_size: 100
+ device_type: gp2
+ delete_on_termination: False
+
+openshift_aws_node_group_config_node_volumes:
+- device_name: /dev/sdb
+ volume_size: 100
+ device_type: gp2
+ delete_on_termination: True
+
+openshift_aws_node_group_config_tags: "{{ openshift_aws_clusterid | build_instance_tags(openshift_aws_kubernetes_cluster_status) }}"
+
+openshift_aws_node_group_config:
+ tags: "{{ openshift_aws_node_group_config_tags }}"
+ master:
+ instance_type: m4.xlarge
+ ami: "{{ openshift_aws_ami }}"
+ volumes: "{{ openshift_aws_node_group_config_master_volumes }}"
+ health_check:
+ period: 60
+ type: EC2
+ min_size: 3
+ max_size: 3
+ desired_size: 3
+ tags:
+ host-type: master
+ sub-host-type: default
+ wait_for_instances: True
+ compute:
+ instance_type: m4.xlarge
+ ami: "{{ openshift_aws_ami }}"
+ volumes: "{{ openshift_aws_node_group_config_node_volumes }}"
+ health_check:
+ period: 60
+ type: EC2
+ min_size: 3
+ max_size: 100
+ desired_size: 3
+ tags:
+ host-type: node
+ sub-host-type: compute
+ infra:
+ instance_type: m4.xlarge
+ ami: "{{ openshift_aws_ami }}"
+ volumes: "{{ openshift_aws_node_group_config_node_volumes }}"
+ health_check:
+ period: 60
+ type: EC2
+ min_size: 2
+ max_size: 20
+ desired_size: 2
+ tags:
+ host-type: node
+ sub-host-type: infra
+
+openshift_aws_elb_security_groups:
+- "{{ openshift_aws_clusterid }}"
+- "{{ openshift_aws_clusterid }}_{{ openshift_aws_node_group_type }}"
+
+openshift_aws_elb_instance_filter:
+ "tag:clusterid": "{{ openshift_aws_clusterid }}"
+ "tag:host-type": "{{ openshift_aws_node_group_type }}"
+ instance-state-name: running
+
+openshift_aws_node_security_groups:
+ default:
+ name: "{{ openshift_aws_clusterid }}"
+ desc: "{{ openshift_aws_clusterid }} default"
+ rules:
+ - proto: tcp
+ from_port: 22
+ to_port: 22
+ cidr_ip: 0.0.0.0/0
+ - proto: all
+ from_port: all
+ to_port: all
+ group_name: "{{ openshift_aws_clusterid }}"
+ master:
+ name: "{{ openshift_aws_clusterid }}_master"
+ desc: "{{ openshift_aws_clusterid }} master instances"
+ rules:
+ - proto: tcp
+ from_port: 80
+ to_port: 80
+ cidr_ip: 0.0.0.0/0
+ - proto: tcp
+ from_port: 443
+ to_port: 443
+ cidr_ip: 0.0.0.0/0
+ compute:
+ name: "{{ openshift_aws_clusterid }}_compute"
+ desc: "{{ openshift_aws_clusterid }} compute node instances"
+ infra:
+ name: "{{ openshift_aws_clusterid }}_infra"
+ desc: "{{ openshift_aws_clusterid }} infra node instances"
+ rules:
+ - proto: tcp
+ from_port: 80
+ to_port: 80
+ cidr_ip: 0.0.0.0/0
+ - proto: tcp
+ from_port: 443
+ to_port: 443
+ cidr_ip: 0.0.0.0/0
+ - proto: tcp
+ from_port: 30000
+ to_port: 32000
+ cidr_ip: 0.0.0.0/0
+ etcd:
+ name: "{{ openshift_aws_clusterid }}_etcd"
+ desc: "{{ openshift_aws_clusterid }} etcd instances"
+
+openshift_aws_vpc_tags:
+ Name: "{{ openshift_aws_vpc_name }}"
+
+openshift_aws_subnet_name: us-east-1c
+
+openshift_aws_vpc:
+ name: "{{ openshift_aws_vpc_name }}"
+ cidr: 172.31.0.0/16
+ subnets:
+ us-east-1:
+ - cidr: 172.31.48.0/20
+ az: "us-east-1c"
+ - cidr: 172.31.32.0/20
+ az: "us-east-1e"
+ - cidr: 172.31.16.0/20
+ az: "us-east-1a"
diff --git a/roles/openshift_aws/filter_plugins/filters.py b/roles/openshift_aws/filter_plugins/filters.py
new file mode 100644
index 000000000..06e1f9602
--- /dev/null
+++ b/roles/openshift_aws/filter_plugins/filters.py
@@ -0,0 +1,28 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+'''
+Custom filters for use in openshift_aws
+'''
+
+
+class FilterModule(object):
+ ''' Custom ansible filters for use by openshift_aws role'''
+
+ @staticmethod
+ def build_instance_tags(clusterid, status='owned'):
+ ''' This function will return a dictionary of the instance tags.
+
+ The main desire to have this inside of a filter_plugin is that we
+ need to build the following key.
+
+ {"kubernetes.io/cluster/{{ openshift_aws_clusterid }}": 'owned'}
+
+ '''
+ tags = {'clusterid': clusterid,
+ 'kubernetes.io/cluster/{}'.format(clusterid): status}
+
+ return tags
+
+ def filters(self):
+ ''' returns a mapping of filters to methods '''
+ return {'build_instance_tags': self.build_instance_tags}
diff --git a/roles/openshift_aws/meta/main.yml b/roles/openshift_aws/meta/main.yml
new file mode 100644
index 000000000..875efcb8f
--- /dev/null
+++ b/roles/openshift_aws/meta/main.yml
@@ -0,0 +1,3 @@
+---
+dependencies:
+- lib_utils
diff --git a/roles/openshift_aws/tasks/ami_copy.yml b/roles/openshift_aws/tasks/ami_copy.yml
new file mode 100644
index 000000000..07020dd62
--- /dev/null
+++ b/roles/openshift_aws/tasks/ami_copy.yml
@@ -0,0 +1,34 @@
+---
+- fail:
+ msg: "{{ item }} needs to be defined"
+ when: item is not defined
+ with_items:
+ - openshift_aws_ami_copy_src_ami
+ - openshift_aws_ami_copy_name
+
+- name: Create IAM KMS key with alias
+ oo_iam_kms:
+ state: present
+ alias: "{{ openshift_aws_iam_kms_alias }}"
+ region: "{{ openshift_aws_region }}"
+ register: created_kms
+
+- debug: var=created_kms.results
+
+- name: "Create copied AMI image and wait: {{ openshift_aws_ami_copy_wait }}"
+ ec2_ami_copy:
+ name: "{{ openshift_aws_ami_copy_name }}"
+ region: "{{ openshift_aws_region }}"
+ source_region: "{{ openshift_aws_ami_copy_src_region }}"
+ source_image_id: "{{ openshift_aws_ami_copy_src_ami }}"
+ encrypted: "{{ openshift_aws_ami_encrypt | bool }}"
+ kms_key_id: "{{ created_kms.results.KeyArn | default(omit) }}"
+ wait: "{{ openshift_aws_ami_copy_wait | default(omit) }}"
+ tags: "{{ openshift_aws_ami_tags }}"
+ register: copy_result
+
+- debug: var=copy_result
+
+- name: return AMI ID with setfact
+ set_fact:
+ openshift_aws_ami_copy_custom_ami: "{{ copy_result.image_id }}"
diff --git a/roles/openshift_aws/tasks/build_ami.yml b/roles/openshift_aws/tasks/build_ami.yml
new file mode 100644
index 000000000..8d4e5ac43
--- /dev/null
+++ b/roles/openshift_aws/tasks/build_ami.yml
@@ -0,0 +1,48 @@
+---
+- when: openshift_aws_create_vpc | bool
+ name: create a vpc
+ include: vpc.yml
+
+- when: openshift_aws_users | length > 0
+ name: create aws ssh keypair
+ include: ssh_keys.yml
+
+- when: openshift_aws_create_security_groups | bool
+ name: Create compute security_groups
+ include: security_group.yml
+
+- name: query vpc
+ ec2_vpc_net_facts:
+ region: "{{ openshift_aws_region }}"
+ filters:
+ 'tag:Name': "{{ openshift_aws_vpc_name }}"
+ register: vpcout
+
+- name: fetch the default subnet id
+ ec2_vpc_subnet_facts:
+ region: "{{ openshift_aws_region }}"
+ filters:
+ "tag:Name": "{{ openshift_aws_subnet_name }}"
+ vpc-id: "{{ vpcout.vpcs[0].id }}"
+ register: subnetout
+
+- name: create instance for ami creation
+ ec2:
+ assign_public_ip: yes
+ region: "{{ openshift_aws_region }}"
+ key_name: "{{ openshift_aws_ssh_key_name }}"
+ group: "{{ openshift_aws_clusterid }}"
+ instance_type: m4.xlarge
+ vpc_subnet_id: "{{ subnetout.subnets[0].id }}"
+ image: "{{ openshift_aws_base_ami }}"
+ volumes:
+ - device_name: /dev/sdb
+ volume_type: gp2
+ volume_size: 100
+ delete_on_termination: true
+ wait: yes
+ exact_count: 1
+ count_tag:
+ Name: "{{ openshift_aws_base_ami_name }}"
+ instance_tags:
+ Name: "{{ openshift_aws_base_ami_name }}"
diff --git a/roles/openshift_aws/tasks/build_node_group.yml b/roles/openshift_aws/tasks/build_node_group.yml
new file mode 100644
index 000000000..0dac1c23d
--- /dev/null
+++ b/roles/openshift_aws/tasks/build_node_group.yml
@@ -0,0 +1,34 @@
+---
+# When openshift_aws_use_custom_ami is '' then
+# we retrieve the latest build AMI.
+# Then set openshift_aws_ami to the ami.
+- when: openshift_aws_ami == ''
+ block:
+ - name: fetch recently created AMI
+ ec2_ami_find:
+ region: "{{ openshift_aws_region }}"
+ sort: creationDate
+ sort_order: descending
+ name: "{{ openshift_aws_ami_name }}*"
+ ami_tags: "{{ openshift_aws_ami_tags }}"
+ no_result_action: fail
+ register: amiout
+
+ - name: Set the openshift_aws_ami
+ set_fact:
+ openshift_aws_ami: "{{ amiout.results[0].ami_id }}"
+ when:
+ - "'results' in amiout"
+ - amiout.results|length > 0
+
+- when: openshift_aws_create_security_groups
+ name: "Create {{ openshift_aws_node_group_type }} security groups"
+ include: security_group.yml
+
+- when: openshift_aws_create_launch_config
+ name: "Create {{ openshift_aws_node_group_type }} launch config"
+ include: launch_config.yml
+
+- when: openshift_aws_create_scale_group
+ name: "Create {{ openshift_aws_node_group_type }} node group"
+ include: scale_group.yml
diff --git a/roles/openshift_aws/tasks/elb.yml b/roles/openshift_aws/tasks/elb.yml
new file mode 100644
index 000000000..a1fdd66fc
--- /dev/null
+++ b/roles/openshift_aws/tasks/elb.yml
@@ -0,0 +1,68 @@
+---
+- name: query vpc
+ ec2_vpc_net_facts:
+ region: "{{ openshift_aws_region }}"
+ filters:
+ 'tag:Name': "{{ openshift_aws_vpc_name }}"
+ register: vpcout
+
+- name: debug
+ debug: var=vpcout
+
+- name: fetch the remote instances
+ ec2_remote_facts:
+ region: "{{ openshift_aws_region }}"
+ filters: "{{ openshift_aws_elb_instance_filter }}"
+ register: instancesout
+
+- name: fetch the default subnet id
+ ec2_vpc_subnet_facts:
+ region: "{{ openshift_aws_region }}"
+ filters:
+ "tag:Name": "{{ openshift_aws_subnet_name }}"
+ vpc-id: "{{ vpcout.vpcs[0].id }}"
+ register: subnetout
+
+- name:
+ debug:
+ msg: "{{ openshift_aws_elb_listeners[openshift_aws_node_group_type][openshift_aws_elb_direction]
+ if 'master' in openshift_aws_node_group_type or 'infra' in openshift_aws_node_group_type
+ else openshift_aws_elb_listeners }}"
+
+- name: "Create ELB {{ openshift_aws_elb_name }}"
+ ec2_elb_lb:
+ name: "{{ openshift_aws_elb_name }}"
+ state: present
+ security_group_names: "{{ openshift_aws_elb_security_groups }}"
+ idle_timeout: "{{ openshift_aws_elb_idle_timout }}"
+ region: "{{ openshift_aws_region }}"
+ subnets:
+ - "{{ subnetout.subnets[0].id }}"
+ health_check: "{{ openshift_aws_elb_health_check }}"
+ listeners: "{{ openshift_aws_elb_listeners[openshift_aws_node_group_type][openshift_aws_elb_direction]
+ if 'master' in openshift_aws_node_group_type or 'infra' in openshift_aws_node_group_type
+ else openshift_aws_elb_listeners }}"
+ scheme: "{{ openshift_aws_elb_scheme }}"
+ tags:
+ KubernetesCluster: "{{ openshift_aws_clusterid }}"
+ register: new_elb
+
+# It is necessary to ignore_errors here because the instances are not in 'ready'
+# state when first added to ELB
+- name: "Add instances to ELB {{ openshift_aws_elb_name }}"
+ ec2_elb:
+ instance_id: "{{ item.id }}"
+ ec2_elbs: "{{ openshift_aws_elb_name }}"
+ state: present
+ region: "{{ openshift_aws_region }}"
+ wait: False
+ with_items: "{{ instancesout.instances }}"
+ ignore_errors: True
+ retries: 10
+ register: elb_call
+ until: elb_call|succeeded
+
+- debug:
+ msg: "{{ item }}"
+ with_items:
+ - "{{ new_elb }}"
diff --git a/roles/openshift_aws/tasks/iam_cert.yml b/roles/openshift_aws/tasks/iam_cert.yml
new file mode 100644
index 000000000..cd9772a25
--- /dev/null
+++ b/roles/openshift_aws/tasks/iam_cert.yml
@@ -0,0 +1,29 @@
+---
+- name: upload certificates to AWS IAM
+ iam_cert23:
+ state: present
+ name: "{{ openshift_aws_iam_cert_name }}"
+ cert: "{{ openshift_aws_iam_cert_path }}"
+ key: "{{ openshift_aws_iam_cert_key_path }}"
+ cert_chain: "{{ openshift_aws_iam_cert_chain_path | default(omit) }}"
+ register: elb_cert_chain
+ failed_when:
+ - "'failed' in elb_cert_chain"
+ - elb_cert_chain.failed
+ - "'msg' in elb_cert_chain"
+ - "'already exists and has a different certificate body' in elb_cert_chain.msg"
+ - "'BotoServerError' in elb_cert_chain.msg"
+ when:
+ - openshift_aws_create_iam_cert | bool
+ - openshift_aws_iam_cert_path != ''
+ - openshift_aws_iam_cert_key_path != ''
+ - openshift_aws_elb_cert_arn == ''
+
+- name: set_fact openshift_aws_elb_cert_arn
+ set_fact:
+ openshift_aws_elb_cert_arn: "{{ elb_cert_chain.arn }}"
+
+- name: wait for cert to propagate
+ pause:
+ seconds: 5
+ when: elb_cert_chain.changed
diff --git a/roles/openshift_aws/tasks/launch_config.yml b/roles/openshift_aws/tasks/launch_config.yml
new file mode 100644
index 000000000..65c5a6cc0
--- /dev/null
+++ b/roles/openshift_aws/tasks/launch_config.yml
@@ -0,0 +1,45 @@
+---
+- fail:
+ msg: "Ensure that an AMI value is defined for openshift_aws_ami or openshift_aws_launch_config_custom_image."
+ when:
+ - openshift_aws_ami is undefined
+
+- name: fetch the security groups for launch config
+ ec2_group_facts:
+ filters:
+ group-name:
+ - "{{ openshift_aws_clusterid }}" # default sg
+ - "{{ openshift_aws_clusterid }}_{{ openshift_aws_node_group_type }}" # node type sg
+ - "{{ openshift_aws_clusterid }}_{{ openshift_aws_node_group_type }}_k8s" # node type sg k8s
+ region: "{{ openshift_aws_region }}"
+ register: ec2sgs
+
+# Create the scale group config
+- name: Create the node scale group launch config
+ ec2_lc:
+ name: "{{ openshift_aws_launch_config_name }}"
+ region: "{{ openshift_aws_region }}"
+ image_id: "{{ openshift_aws_ami }}"
+ instance_type: "{{ openshift_aws_node_group_config[openshift_aws_node_group_type].instance_type }}"
+ security_groups: "{{ ec2sgs.security_groups | map(attribute='group_id')| list }}"
+ user_data: |-
+ #cloud-config
+ {% if openshift_aws_node_group_type != 'master' %}
+ write_files:
+ - path: /root/csr_kubeconfig
+ owner: root:root
+ permissions: '0640'
+ content: {{ openshift_aws_launch_config_bootstrap_token | default('') | to_yaml }}
+ - path: /root/openshift_settings
+ owner: root:root
+ permissions: '0640'
+ content:
+ openshift_type: "{{ openshift_aws_node_group_type }}"
+ runcmd:
+ - [ systemctl, enable, atomic-openshift-node]
+ - [ systemctl, start, atomic-openshift-node]
+ {% endif %}
+ key_name: "{{ openshift_aws_ssh_key_name }}"
+ ebs_optimized: False
+ volumes: "{{ openshift_aws_node_group_config[openshift_aws_node_group_type].volumes }}"
+ assign_public_ip: True
diff --git a/roles/openshift_aws/tasks/provision.yml b/roles/openshift_aws/tasks/provision.yml
new file mode 100644
index 000000000..189caeaee
--- /dev/null
+++ b/roles/openshift_aws/tasks/provision.yml
@@ -0,0 +1,54 @@
+---
+- when: openshift_aws_create_vpc | bool
+ name: create default vpc
+ include: vpc.yml
+
+- when: openshift_aws_create_iam_cert | bool
+ name: create the iam_cert for elb certificate
+ include: iam_cert.yml
+
+- when: openshift_aws_users | length > 0
+ name: create aws ssh keypair
+ include: ssh_keys.yml
+
+- when: openshift_aws_create_s3 | bool
+ name: create s3 bucket for registry
+ include: s3.yml
+
+- name: include scale group creation for master
+ include: build_node_group.yml
+
+- name: fetch newly created instances
+ ec2_remote_facts:
+ region: "{{ openshift_aws_region }}"
+ filters:
+ "tag:clusterid": "{{ openshift_aws_clusterid }}"
+ "tag:host-type": "{{ openshift_aws_node_group_type }}"
+ instance-state-name: running
+ register: instancesout
+ retries: 20
+ delay: 3
+ until: instancesout.instances|length > 0
+
+- name: create our master internal load balancers
+ include: elb.yml
+ vars:
+ openshift_aws_elb_direction: internal
+ openshift_aws_elb_name: "{{ openshift_aws_clusterid }}-{{openshift_aws_node_group_type }}-internal"
+ openshift_aws_elb_scheme: internal
+
+- name: create our master external load balancers
+ include: elb.yml
+ vars:
+ openshift_aws_elb_direction: external
+ openshift_aws_elb_name: "{{ openshift_aws_clusterid }}-{{openshift_aws_node_group_type }}-external"
+ openshift_aws_elb_scheme: internet-facing
+
+- name: wait for ssh to become available
+ wait_for:
+ port: 22
+ host: "{{ item.public_ip_address }}"
+ timeout: 300
+ search_regex: OpenSSH
+ with_items: "{{ instancesout.instances }}"
+ when: openshift_aws_wait_for_ssh | bool
diff --git a/roles/openshift_aws/tasks/provision_nodes.yml b/roles/openshift_aws/tasks/provision_nodes.yml
new file mode 100644
index 000000000..fc4996c68
--- /dev/null
+++ b/roles/openshift_aws/tasks/provision_nodes.yml
@@ -0,0 +1,66 @@
+---
+# Get bootstrap config token
+# bootstrap should be created on first master
+# need to fetch it and shove it into cloud data
+- name: fetch master instances
+ ec2_remote_facts:
+ region: "{{ openshift_aws_region }}"
+ filters:
+ "tag:clusterid": "{{ openshift_aws_clusterid }}"
+ "tag:host-type": master
+ instance-state-name: running
+ register: instancesout
+ retries: 20
+ delay: 3
+ until: instancesout.instances|length > 0
+
+- name: slurp down the bootstrap.kubeconfig
+ slurp:
+ src: /etc/origin/master/bootstrap.kubeconfig
+ delegate_to: "{{ instancesout.instances[0].public_ip_address }}"
+ remote_user: root
+ register: bootstrap
+
+- name: set_fact for kubeconfig token
+ set_fact:
+ openshift_aws_launch_config_bootstrap_token: "{{ bootstrap['content'] | b64decode }}"
+
+- name: include build node group for infra
+ include: build_node_group.yml
+ vars:
+ openshift_aws_node_group_type: infra
+ openshift_aws_scale_group_name: "{{ openshift_aws_clusterid }} openshift infra"
+ openshift_aws_launch_config_name: "{{ openshift_aws_clusterid }}-infra-{{ ansible_date_time.epoch }}"
+
+- name: include build node group for compute
+ include: build_node_group.yml
+ vars:
+ openshift_aws_node_group_type: compute
+ openshift_aws_scale_group_name: "{{ openshift_aws_clusterid }} openshift compute"
+ openshift_aws_launch_config_name: "{{ openshift_aws_clusterid }}-compute-{{ ansible_date_time.epoch }}"
+
+- when: openshift_aws_wait_for_ssh | bool
+ block:
+ - name: pause and allow for instances to scale before we query them
+ pause:
+ seconds: 10
+
+ - name: fetch newly created instances
+ ec2_remote_facts:
+ region: "{{ openshift_aws_region }}"
+ filters:
+ "tag:clusterid": "{{ openshift_aws_clusterid }}"
+ "tag:host-type": node
+ instance-state-name: running
+ register: instancesout
+ retries: 20
+ delay: 3
+ until: instancesout.instances|length > 0
+
+ - name: wait for ssh to become available
+ wait_for:
+ port: 22
+ host: "{{ item.public_ip_address }}"
+ timeout: 300
+ search_regex: OpenSSH
+ with_items: "{{ instancesout.instances }}"
diff --git a/roles/openshift_aws/tasks/s3.yml b/roles/openshift_aws/tasks/s3.yml
new file mode 100644
index 000000000..9cf37c840
--- /dev/null
+++ b/roles/openshift_aws/tasks/s3.yml
@@ -0,0 +1,7 @@
+---
+- name: Create an s3 bucket
+ s3:
+ bucket: "{{ openshift_aws_s3_bucket_name }}"
+ mode: "{{ openshift_aws_s3_mode }}"
+ region: "{{ openshift_aws_region }}"
+ when: openshift_aws_create_s3 | bool
diff --git a/roles/openshift_aws/tasks/scale_group.yml b/roles/openshift_aws/tasks/scale_group.yml
new file mode 100644
index 000000000..3e969fc43
--- /dev/null
+++ b/roles/openshift_aws/tasks/scale_group.yml
@@ -0,0 +1,32 @@
+---
+- name: query vpc
+ ec2_vpc_net_facts:
+ region: "{{ openshift_aws_region }}"
+ filters:
+ 'tag:Name': "{{ openshift_aws_vpc_name }}"
+ register: vpcout
+
+- name: fetch the subnet to use in scale group
+ ec2_vpc_subnet_facts:
+ region: "{{ openshift_aws_region }}"
+ filters:
+ "tag:Name": "{{ openshift_aws_subnet_name }}"
+ vpc-id: "{{ vpcout.vpcs[0].id }}"
+ register: subnetout
+
+- name: Create the scale group
+ ec2_asg:
+ name: "{{ openshift_aws_scale_group_name }}"
+ launch_config_name: "{{ openshift_aws_launch_config_name }}"
+ health_check_period: "{{ openshift_aws_node_group_config[openshift_aws_node_group_type].health_check.period }}"
+ health_check_type: "{{ openshift_aws_node_group_config[openshift_aws_node_group_type].health_check.type }}"
+ min_size: "{{ openshift_aws_node_group_config[openshift_aws_node_group_type].min_size }}"
+ max_size: "{{ openshift_aws_node_group_config[openshift_aws_node_group_type].max_size }}"
+ desired_capacity: "{{ openshift_aws_node_group_config[openshift_aws_node_group_type].desired_size }}"
+ region: "{{ openshift_aws_region }}"
+ termination_policies: "{{ openshift_aws_node_group_config[openshift_aws_node_group_type].termination_policy if 'termination_policy' in openshift_aws_node_group_config[openshift_aws_node_group_type] else omit }}"
+ load_balancers: "{{ openshift_aws_node_group_config[openshift_aws_node_group_type].elbs if 'elbs' in openshift_aws_node_group_config[openshift_aws_node_group_type] else omit }}"
+ wait_for_instances: "{{ openshift_aws_node_group_config[openshift_aws_node_group_type].wait_for_instances | default(False)}}"
+ vpc_zone_identifier: "{{ subnetout.subnets[0].id }}"
+ tags:
+ - "{{ openshift_aws_node_group_config.tags | combine(openshift_aws_node_group_config[openshift_aws_node_group_type].tags) }}"
diff --git a/roles/openshift_aws/tasks/seal_ami.yml b/roles/openshift_aws/tasks/seal_ami.yml
new file mode 100644
index 000000000..0cb749dcc
--- /dev/null
+++ b/roles/openshift_aws/tasks/seal_ami.yml
@@ -0,0 +1,49 @@
+---
+- name: fetch newly created instances
+ ec2_remote_facts:
+ region: "{{ openshift_aws_region }}"
+ filters:
+ "tag:Name": "{{ openshift_aws_base_ami_name }}"
+ instance-state-name: running
+ register: instancesout
+ retries: 20
+ delay: 3
+ until: instancesout.instances|length > 0
+
+- name: bundle ami
+ ec2_ami:
+ instance_id: "{{ instancesout.instances.0.id }}"
+ region: "{{ openshift_aws_region }}"
+ state: present
+ description: "This was provisioned {{ ansible_date_time.iso8601 }}"
+ name: "{{ openshift_aws_ami_name }}"
+ tags: "{{ openshift_aws_ami_tags }}"
+ wait: yes
+ register: amioutput
+
+- debug: var=amioutput
+
+- when: openshift_aws_ami_encrypt | bool
+ block:
+ - name: augment the encrypted ami tags with source-ami
+ set_fact:
+ source_tag:
+ source-ami: "{{ amioutput.image_id }}"
+
+ - name: copy the ami for encrypted disks
+ include: ami_copy.yml
+ vars:
+ openshift_aws_ami_copy_name: "{{ openshift_aws_ami_name }}-encrypted"
+ openshift_aws_ami_copy_src_ami: "{{ amioutput.image_id }}"
+ # TODO: How does the kms alias get passed to ec2_ami_copy
+ openshift_aws_ami_copy_kms_alias: "alias/{{ openshift_aws_clusterid }}_kms"
+ openshift_aws_ami_copy_tags: "{{ source_tag | combine(openshift_aws_ami_tags) }}"
+ # this option currently fails due to boto waiters
+ # when supported this need to be reapplied
+ #openshift_aws_ami_copy_wait: True
+
+- name: terminate temporary instance
+ ec2:
+ state: absent
+ region: "{{ openshift_aws_region }}"
+ instance_ids: "{{ instancesout.instances.0.id }}"
diff --git a/roles/openshift_aws/tasks/security_group.yml b/roles/openshift_aws/tasks/security_group.yml
new file mode 100644
index 000000000..161e72fb4
--- /dev/null
+++ b/roles/openshift_aws/tasks/security_group.yml
@@ -0,0 +1,45 @@
+---
+- name: Fetch the VPC for the vpc.id
+ ec2_vpc_net_facts:
+ region: "{{ openshift_aws_region }}"
+ filters:
+ "tag:Name": "{{ openshift_aws_clusterid }}"
+ register: vpcout
+
+- name: Create default security group for cluster
+ ec2_group:
+ name: "{{ openshift_aws_node_security_groups.default.name }}"
+ description: "{{ openshift_aws_node_security_groups.default.desc }}"
+ region: "{{ openshift_aws_region }}"
+ vpc_id: "{{ vpcout.vpcs[0].id }}"
+ rules: "{{ openshift_aws_node_security_groups.default.rules | default(omit, True)}}"
+ register: sg_default_created
+
+- name: create the node group sgs
+ ec2_group:
+ name: "{{ item.name}}"
+ description: "{{ item.desc }}"
+ rules: "{{ item.rules if 'rules' in item else [] }}"
+ region: "{{ openshift_aws_region }}"
+ vpc_id: "{{ vpcout.vpcs[0].id }}"
+ register: sg_create
+ with_items:
+ - "{{ openshift_aws_node_security_groups[openshift_aws_node_group_type]}}"
+
+- name: create the k8s sgs for the node group
+ ec2_group:
+ name: "{{ item.name }}_k8s"
+ description: "{{ item.desc }} for k8s"
+ region: "{{ openshift_aws_region }}"
+ vpc_id: "{{ vpcout.vpcs[0].id }}"
+ register: k8s_sg_create
+ with_items:
+ - "{{ openshift_aws_node_security_groups[openshift_aws_node_group_type]}}"
+
+- name: tag sg groups with proper tags
+ ec2_tag:
+ tags:
+ KubernetesCluster: "{{ openshift_aws_clusterid }}"
+ resource: "{{ item.group_id }}"
+ region: "{{ openshift_aws_region }}"
+ with_items: "{{ k8s_sg_create.results }}"
diff --git a/roles/openshift_aws_ssh_keys/tasks/main.yml b/roles/openshift_aws/tasks/ssh_keys.yml
index 232cf20ed..f439ce74e 100644
--- a/roles/openshift_aws_ssh_keys/tasks/main.yml
+++ b/roles/openshift_aws/tasks/ssh_keys.yml
@@ -3,6 +3,6 @@
ec2_key:
name: "{{ item.key_name }}"
key_material: "{{ item.pub_key }}"
- region: "{{ r_openshift_aws_ssh_keys_region }}"
- with_items: "{{ r_openshift_aws_ssh_keys_users }}"
+ region: "{{ openshift_aws_region }}"
+ with_items: "{{ openshift_aws_users }}"
no_log: True
diff --git a/roles/openshift_aws_vpc/tasks/main.yml b/roles/openshift_aws/tasks/vpc.yml
index cfe08dae5..ce2c8eac5 100644
--- a/roles/openshift_aws_vpc/tasks/main.yml
+++ b/roles/openshift_aws/tasks/vpc.yml
@@ -2,13 +2,12 @@
- name: Create AWS VPC
ec2_vpc_net:
state: present
- cidr_block: "{{ r_openshift_aws_vpc_cidr }}"
+ cidr_block: "{{ openshift_aws_vpc.cidr }}"
dns_support: True
dns_hostnames: True
- region: "{{ r_openshift_aws_vpc_region }}"
- name: "{{ r_openshift_aws_vpc_clusterid }}"
- tags:
- Name: "{{ r_openshift_aws_vpc_clusterid }}"
+ region: "{{ openshift_aws_region }}"
+ name: "{{ openshift_aws_clusterid }}"
+ tags: "{{ openshift_aws_vpc_tags }}"
register: vpc
- name: Sleep to avoid a race condition when creating the vpc
@@ -18,23 +17,23 @@
- name: assign the vpc igw
ec2_vpc_igw:
- region: "{{ r_openshift_aws_vpc_region }}"
+ region: "{{ openshift_aws_region }}"
vpc_id: "{{ vpc.vpc.id }}"
register: igw
- name: assign the vpc subnets
ec2_vpc_subnet:
- region: "{{ r_openshift_aws_vpc_region }}"
+ region: "{{ openshift_aws_region }}"
vpc_id: "{{ vpc.vpc.id }}"
cidr: "{{ item.cidr }}"
az: "{{ item.az }}"
resource_tags:
Name: "{{ item.az }}"
- with_items: "{{ r_openshift_aws_vpc_subnets[r_openshift_aws_vpc_region] }}"
+ with_items: "{{ openshift_aws_vpc.subnets[openshift_aws_region] }}"
- name: Grab the route tables from our VPC
ec2_vpc_route_table_facts:
- region: "{{ r_openshift_aws_vpc_region }}"
+ region: "{{ openshift_aws_region }}"
filters:
vpc-id: "{{ vpc.vpc.id }}"
register: route_table
@@ -44,9 +43,9 @@
lookup: id
route_table_id: "{{ route_table.route_tables[0].id }}"
vpc_id: "{{ vpc.vpc.id }}"
- region: "{{ r_openshift_aws_vpc_region }}"
+ region: "{{ openshift_aws_region }}"
tags:
- Name: "{{ r_openshift_aws_vpc_name }}"
+ Name: "{{ openshift_aws_vpc_name }}"
routes:
- dest: 0.0.0.0/0
gateway_id: igw
diff --git a/roles/openshift_aws_ami_copy/README.md b/roles/openshift_aws_ami_copy/README.md
deleted file mode 100644
index 111818451..000000000
--- a/roles/openshift_aws_ami_copy/README.md
+++ /dev/null
@@ -1,50 +0,0 @@
-openshift_aws_ami_perms
-=========
-
-Ansible role for copying an AMI
-
-Requirements
-------------
-
-Ansible Modules:
-
-
-Role Variables
---------------
-
-- openshift_aws_ami_copy_src_ami: source AMI id to copy from
-- openshift_aws_ami_copy_region: region where the AMI is found
-- openshift_aws_ami_copy_name: name to assign to new AMI
-- openshift_aws_ami_copy_kms_arn: AWS IAM KMS arn of the key to use for encryption
-- openshift_aws_ami_copy_tags: dict with desired tags
-- openshift_aws_ami_copy_wait: wait for the ami copy to achieve available status. This fails due to boto waiters.
-
-Dependencies
-------------
-
-
-Example Playbook
-----------------
-```yaml
- - name: copy the ami for encrypted disks
- include_role:
- name: openshift_aws_ami_copy
- vars:
- r_openshift_aws_ami_copy_region: us-east-1
- r_openshift_aws_ami_copy_name: myami
- r_openshift_aws_ami_copy_src_ami: ami-1234
- r_openshift_aws_ami_copy_kms_arn: arn:xxxx
- r_openshift_aws_ami_copy_tags: {}
- r_openshift_aws_ami_copy_encrypt: False
-
-```
-
-License
--------
-
-Apache 2.0
-
-Author Information
-------------------
-
-Openshift
diff --git a/roles/openshift_aws_ami_copy/tasks/main.yml b/roles/openshift_aws_ami_copy/tasks/main.yml
deleted file mode 100644
index bcccd4042..000000000
--- a/roles/openshift_aws_ami_copy/tasks/main.yml
+++ /dev/null
@@ -1,26 +0,0 @@
----
-- fail:
- msg: "{{ item }} needs to be defined"
- when: item is not defined
- with_items:
- - r_openshift_aws_ami_copy_src_ami
- - r_openshift_aws_ami_copy_name
- - r_openshift_aws_ami_copy_region
-
-- name: "Create copied AMI image and wait: {{ r_openshift_aws_ami_copy_wait | default(False) }}"
- ec2_ami_copy:
- region: "{{ r_openshift_aws_ami_copy_region }}"
- source_region: "{{ r_openshift_aws_ami_copy_region }}"
- name: "{{ r_openshift_aws_ami_copy_name }}"
- source_image_id: "{{ r_openshift_aws_ami_copy_src_ami }}"
- encrypted: "{{ r_openshift_aws_ami_copy_encrypt | default(False) }}"
- kms_key_id: "{{ r_openshift_aws_ami_copy_kms_arn | default(omit) }}"
- wait: "{{ r_openshift_aws_ami_copy_wait | default(omit) }}"
- tags: "{{ r_openshift_aws_ami_copy_tags }}"
- register: copy_result
-
-- debug: var=copy_result
-
-- name: return AMI ID with setfact - openshift_aws_ami_copy_retval_custom_ami
- set_fact:
- r_openshift_aws_ami_copy_retval_custom_ami: "{{ copy_result.image_id }}"
diff --git a/roles/openshift_aws_elb/README.md b/roles/openshift_aws_elb/README.md
deleted file mode 100644
index ecc45fa14..000000000
--- a/roles/openshift_aws_elb/README.md
+++ /dev/null
@@ -1,75 +0,0 @@
-openshift_aws_elb
-=========
-
-Ansible role to provision and manage AWS ELB's for Openshift.
-
-Requirements
-------------
-
-Ansible Modules:
-
-- ec2_elb
-- ec2_elb_lb
-
-python package:
-
-python-boto
-
-Role Variables
---------------
-
-- r_openshift_aws_elb_instances: instances to put in ELB
-- r_openshift_aws_elb_elb_name: name of elb
-- r_openshift_aws_elb_security_group_names: list of SGs (by name) that the ELB will belong to
-- r_openshift_aws_elb_region: AWS Region
-- r_openshift_aws_elb_health_check: definition of the ELB health check. See ansible docs for ec2_elb
-```yaml
- ping_protocol: tcp
- ping_port: 443
- response_timeout: 5
- interval: 30
- unhealthy_threshold: 2
- healthy_threshold: 2
-```
-- r_openshift_aws_elb_listeners: definition of the ELB listeners. See ansible docs for ec2_elb
-```yaml
-- protocol: tcp
- load_balancer_port: 80
- instance_protocol: ssl
- instance_port: 443
-- protocol: ssl
- load_balancer_port: 443
- instance_protocol: ssl
- instance_port: 443
- # ssl certificate required for https or ssl
- ssl_certificate_id: "{{ r_openshift_aws_elb_cert_arn }}"
-```
-
-Dependencies
-------------
-
-
-Example Playbook
-----------------
-```yaml
-- include_role:
- name: openshift_aws_elb
- vars:
- r_openshift_aws_elb_instances: aws_instances_to_put_in_elb
- r_openshift_aws_elb_elb_name: elb_name
- r_openshift_aws_elb_security_groups: security_group_names
- r_openshift_aws_elb_region: aws_region
- r_openshift_aws_elb_health_check: "{{ elb_health_check_definition }}"
- r_openshift_aws_elb_listeners: "{{ elb_listeners_definition }}"
-```
-
-
-License
--------
-
-Apache 2.0
-
-Author Information
-------------------
-
-Openshift
diff --git a/roles/openshift_aws_elb/defaults/main.yml b/roles/openshift_aws_elb/defaults/main.yml
deleted file mode 100644
index ed5d38079..000000000
--- a/roles/openshift_aws_elb/defaults/main.yml
+++ /dev/null
@@ -1,33 +0,0 @@
----
-r_openshift_aws_elb_health_check:
- ping_protocol: tcp
- ping_port: 443
- response_timeout: 5
- interval: 30
- unhealthy_threshold: 2
- healthy_threshold: 2
-
-r_openshift_aws_elb_cert_arn: ''
-
-r_openshift_aws_elb_listeners:
- master:
- external:
- - protocol: tcp
- load_balancer_port: 80
- instance_protocol: ssl
- instance_port: 443
- - protocol: ssl
- load_balancer_port: 443
- instance_protocol: ssl
- instance_port: 443
- # ssl certificate required for https or ssl
- ssl_certificate_id: "{{ r_openshift_aws_elb_cert_arn }}"
- internal:
- - protocol: tcp
- load_balancer_port: 80
- instance_protocol: tcp
- instance_port: 80
- - protocol: tcp
- load_balancer_port: 443
- instance_protocol: tcp
- instance_port: 443
diff --git a/roles/openshift_aws_elb/meta/main.yml b/roles/openshift_aws_elb/meta/main.yml
deleted file mode 100644
index 58be652a5..000000000
--- a/roles/openshift_aws_elb/meta/main.yml
+++ /dev/null
@@ -1,12 +0,0 @@
----
-galaxy_info:
- author: OpenShift
- description: Openshift ELB provisioning
- company: Red Hat, Inc
- license: ASL 2.0
- min_ansible_version: 1.2
- platforms:
- - name: EL
- versions:
- - 7
-dependencies: []
diff --git a/roles/openshift_aws_elb/tasks/main.yml b/roles/openshift_aws_elb/tasks/main.yml
deleted file mode 100644
index 64ec18545..000000000
--- a/roles/openshift_aws_elb/tasks/main.yml
+++ /dev/null
@@ -1,57 +0,0 @@
----
-- name: fetch the default subnet id
- ec2_remote_facts:
- region: "{{ r_openshift_aws_elb_region }}"
- filters: "{{ r_openshift_aws_elb_instance_filter }}"
- register: instancesout
-
-- name: fetch the default subnet id
- ec2_vpc_subnet_facts:
- region: "{{ r_openshift_aws_elb_region }}"
- filters:
- "tag:Name": "{{ r_openshift_aws_elb_subnet_name }}"
- register: subnetout
-
-- name:
- debug:
- msg: "{{ r_openshift_aws_elb_listeners[r_openshift_aws_elb_type][r_openshift_aws_elb_direction]
- if 'master' in r_openshift_aws_elb_type or 'infra' in r_openshift_aws_elb_type
- else r_openshift_aws_elb_listeners }}"
-
-- name: "Create ELB {{ r_openshift_aws_elb_name }}"
- ec2_elb_lb:
- name: "{{ r_openshift_aws_elb_name }}"
- state: present
- security_group_names: "{{ r_openshift_aws_elb_security_groups }}"
- idle_timeout: "{{ r_openshift_aws_elb_idle_timout }}"
- region: "{{ r_openshift_aws_elb_region }}"
- subnets:
- - "{{ subnetout.subnets[0].id }}"
- health_check: "{{ r_openshift_aws_elb_health_check }}"
- listeners: "{{ r_openshift_aws_elb_listeners[r_openshift_aws_elb_type][r_openshift_aws_elb_direction]
- if 'master' in r_openshift_aws_elb_type or 'infra' in r_openshift_aws_elb_type
- else r_openshift_aws_elb_listeners }}"
- scheme: "{{ r_openshift_aws_elb_scheme }}"
- tags:
- KubernetesCluster: "{{ r_openshift_aws_elb_clusterid }}"
- register: new_elb
-
-# It is necessary to ignore_errors here because the instances are not in 'ready'
-# state when first added to ELB
-- name: "Add instances to ELB {{ r_openshift_aws_elb_name }}"
- ec2_elb:
- instance_id: "{{ item.id }}"
- ec2_elbs: "{{ r_openshift_aws_elb_name }}"
- state: present
- region: "{{ r_openshift_aws_elb_region }}"
- wait: False
- with_items: "{{ instancesout.instances }}"
- ignore_errors: True
- retries: 10
- register: elb_call
- until: elb_call|succeeded
-
-- debug:
- msg: "{{ item }}"
- with_items:
- - "{{ new_elb }}"
diff --git a/roles/openshift_aws_iam_kms/README.md b/roles/openshift_aws_iam_kms/README.md
deleted file mode 100644
index 9468e785c..000000000
--- a/roles/openshift_aws_iam_kms/README.md
+++ /dev/null
@@ -1,43 +0,0 @@
-openshift_aws_iam_kms
-=========
-
-Ansible role to create AWS IAM KMS keys for encryption
-
-Requirements
-------------
-
-Ansible Modules:
-
-oo_iam_kms
-
-Role Variables
---------------
-
-- r_openshift_aws_iam_kms_region: AWS region to create KMS key
-- r_openshift_aws_iam_kms_alias: Alias name to assign to created KMS key
-
-Dependencies
-------------
-
-lib_utils
-
-Example Playbook
-----------------
-```yaml
-- include_role:
- name: openshift_aws_iam_kms
- vars:
- r_openshift_aws_iam_kms_region: 'us-east-1'
- r_openshift_aws_iam_kms_alias: 'alias/clusterABC_kms'
-```
-
-
-License
--------
-
-Apache 2.0
-
-Author Information
-------------------
-
-Openshift
diff --git a/roles/openshift_aws_iam_kms/defaults/main.yml b/roles/openshift_aws_iam_kms/defaults/main.yml
deleted file mode 100644
index ed97d539c..000000000
--- a/roles/openshift_aws_iam_kms/defaults/main.yml
+++ /dev/null
@@ -1 +0,0 @@
----
diff --git a/roles/openshift_aws_iam_kms/meta/main.yml b/roles/openshift_aws_iam_kms/meta/main.yml
deleted file mode 100644
index e29aaf96b..000000000
--- a/roles/openshift_aws_iam_kms/meta/main.yml
+++ /dev/null
@@ -1,13 +0,0 @@
----
-galaxy_info:
- author: OpenShift
- description: AWS IAM KMS setup and management
- company: Red Hat, Inc
- license: ASL 2.0
- min_ansible_version: 1.2
- platforms:
- - name: EL
- versions:
- - 7
-dependencies:
-- lib_utils
diff --git a/roles/openshift_aws_iam_kms/tasks/main.yml b/roles/openshift_aws_iam_kms/tasks/main.yml
deleted file mode 100644
index 32aac2666..000000000
--- a/roles/openshift_aws_iam_kms/tasks/main.yml
+++ /dev/null
@@ -1,18 +0,0 @@
----
-- fail:
- msg: "{{ item.name }} needs to be defined."
- when: item.cond | bool
- with_items:
- - name: "{{ r_openshift_aws_iam_kms_alias }}"
- cond: "{{ r_openshift_aws_iam_kms_alias is undefined }}"
- - name: "{{ r_openshift_aws_iam_kms_region }}"
- cond: "{{ r_openshift_aws_iam_kms_region is undefined }}"
-
-- name: Create IAM KMS key with alias
- oo_iam_kms:
- state: present
- alias: "{{ r_openshift_aws_iam_kms_alias }}"
- region: "{{ r_openshift_aws_iam_kms_region }}"
- register: created_kms
-
-- debug: var=created_kms.results
diff --git a/roles/openshift_aws_launch_config/README.md b/roles/openshift_aws_launch_config/README.md
deleted file mode 100644
index 52b7e83b6..000000000
--- a/roles/openshift_aws_launch_config/README.md
+++ /dev/null
@@ -1,72 +0,0 @@
-openshift_aws_launch_config
-=========
-
-Ansible role to create an AWS launch config for a scale group.
-
-This includes the AMI, volumes, user_data, etc.
-
-Requirements
-------------
-
-Ansible Modules:
-
-
-Role Variables
---------------
-- r_openshift_aws_launch_config_name: "{{ launch_config_name }}"
-- r_openshift_aws_launch_config_clusterid: "{{ clusterid }}"
-- r_openshift_aws_launch_config_region: "{{ region }}"
-- r_openshift_aws_launch_config: "{{ node_group_config }}"
-```yaml
- master:
- instance_type: m4.xlarge
- ami: ami-cdeec8b6 # if using an encrypted AMI this will be replaced
- volumes:
- - device_name: /dev/sdb
- volume_size: 100
- device_type: gp2
- delete_on_termination: False
- health_check:
- period: 60
- type: EC2
- min_size: 3
- max_size: 3
- desired_size: 3
- tags:
- host-type: master
- sub-host-type: default
- wait_for_instances: True
-```
-- r_openshift_aws_launch_config_type: compute
-- r_openshift_aws_launch_config_custom_image: ami-xxxxx
-- r_openshift_aws_launch_config_bootstrap_token: <string of kubeconfig>
-
-Dependencies
-------------
-
-
-Example Playbook
-----------------
-```yaml
- - name: create compute nodes config
- include_role:
- name: openshift_aws_launch_config
- vars:
- r_openshift_aws_launch_config_name: "{{ launch_config_name }}"
- r_openshift_aws_launch_config_clusterid: "{{ clusterid }}"
- r_openshift_aws_launch_config_region: "{{ region }}"
- r_openshift_aws_launch_config: "{{ node_group_config }}"
- r_openshift_aws_launch_config_type: compute
- r_openshift_aws_launch_config_custom_image: ami-1234
- r_openshift_aws_launch_config_bootstrap_token: abcd
-```
-
-License
--------
-
-Apache 2.0
-
-Author Information
-------------------
-
-Openshift
diff --git a/roles/openshift_aws_launch_config/defaults/main.yml b/roles/openshift_aws_launch_config/defaults/main.yml
deleted file mode 100644
index ed97d539c..000000000
--- a/roles/openshift_aws_launch_config/defaults/main.yml
+++ /dev/null
@@ -1 +0,0 @@
----
diff --git a/roles/openshift_aws_launch_config/meta/main.yml b/roles/openshift_aws_launch_config/meta/main.yml
deleted file mode 100644
index e61670cc2..000000000
--- a/roles/openshift_aws_launch_config/meta/main.yml
+++ /dev/null
@@ -1,12 +0,0 @@
----
-galaxy_info:
- author: OpenShift
- description: Openshift AWS VPC creation
- company: Red Hat, Inc
- license: ASL 2.0
- min_ansible_version: 2.3
- platforms:
- - name: EL
- versions:
- - 7
-dependencies: []
diff --git a/roles/openshift_aws_launch_config/tasks/main.yml b/roles/openshift_aws_launch_config/tasks/main.yml
deleted file mode 100644
index 437cf1f71..000000000
--- a/roles/openshift_aws_launch_config/tasks/main.yml
+++ /dev/null
@@ -1,50 +0,0 @@
----
-- name: fail when params are not set
- fail:
- msg: Please specify the role parameters.
- when:
- - r_openshift_aws_launch_config_cluseterid is undefined
- - r_openshift_aws_launch_config_type is undefined
- - r_openshift_aws_launch_config_region is undefined
- - r_openshift_aws_launch_config is undefined
-
-- name: fetch the security groups for launch config
- ec2_group_facts:
- filters:
- group-name:
- - "{{ r_openshift_aws_launch_config_clusterid }}" # default sg
- - "{{ r_openshift_aws_launch_config_clusterid }}_{{ r_openshift_aws_launch_config_type }}" # node type sg
- - "{{ r_openshift_aws_launch_config_clusterid }}_{{ r_openshift_aws_launch_config_type }}_k8s" # node type sg k8s
- region: "{{ r_openshift_aws_launch_config_region }}"
- register: ec2sgs
-
-# Create the scale group config
-- name: Create the node scale group config
- ec2_lc:
- name: "{{ r_openshift_aws_launch_config_name }}"
- region: "{{ r_openshift_aws_launch_config_region }}"
- image_id: "{{ r_openshift_aws_launch_config_custom_image if 'ami-' in r_openshift_aws_launch_config_custom_image else r_openshift_aws_launch_config[r_openshift_aws_launch_config_type].ami }}"
- instance_type: "{{ r_openshift_aws_launch_config[r_openshift_aws_launch_config_type].instance_type }}"
- security_groups: "{{ ec2sgs.security_groups | map(attribute='group_id')| list }}"
- user_data: |-
- #cloud-config
- {% if r_openshift_aws_launch_config_type != 'master' %}
- write_files:
- - path: /root/csr_kubeconfig
- owner: root:root
- permissions: '0640'
- content: {{ r_openshift_aws_launch_config_bootstrap_token | default('') | to_yaml }}
- - path: /root/openshift_settings
- owner: root:root
- permissions: '0640'
- content:
- openshift_type: "{{ r_openshift_aws_launch_config_type }}"
- runcmd:
- - [ systemctl, enable, atomic-openshift-node]
- - [ systemctl, start, atomic-openshift-node]
- {% endif %}
- key_name: "{{ r_openshift_aws_launch_config.ssh_key_name }}"
- ebs_optimized: False
- volumes: "{{ r_openshift_aws_launch_config[r_openshift_aws_launch_config_type].volumes }}"
- assign_public_ip: True
- register: test
diff --git a/roles/openshift_aws_launch_config/templates/cloud-init.j2 b/roles/openshift_aws_launch_config/templates/cloud-init.j2
deleted file mode 100644
index 1a1e29550..000000000
--- a/roles/openshift_aws_launch_config/templates/cloud-init.j2
+++ /dev/null
@@ -1,9 +0,0 @@
-{% if r_openshift_aws_launch_config_bootstrap_token is defined and r_openshift_aws_launch_config_bootstrap_token is not '' %}
-#cloud-config
-write_files:
-- path: /root/csr_kubeconfig
- owner: root:root
- permissions: '0640'
- content: |-
- {{ r_openshift_aws_launch_config_bootstrap_token }}
-{% endif %}
diff --git a/roles/openshift_aws_node_group/README.md b/roles/openshift_aws_node_group/README.md
deleted file mode 100644
index c32c57bc5..000000000
--- a/roles/openshift_aws_node_group/README.md
+++ /dev/null
@@ -1,77 +0,0 @@
-openshift_aws_node_group
-=========
-
-Ansible role to create an aws node group.
-
-This includes the security group, launch config, and scale group.
-
-Requirements
-------------
-
-Ansible Modules:
-
-
-Role Variables
---------------
-```yaml
-- r_openshift_aws_node_group_name: myscalegroup
-- r_openshift_aws_node_group_clusterid: myclusterid
-- r_openshift_aws_node_group_region: us-east-1
-- r_openshift_aws_node_group_lc_name: launch_config
-- r_openshift_aws_node_group_type: master|infra|compute
-- r_openshift_aws_node_group_config: "{{ node_group_config }}"
-```yaml
-master:
- instance_type: m4.xlarge
- ami: ami-cdeec8b6 # if using an encrypted AMI this will be replaced
- volumes:
- - device_name: /dev/sdb
- volume_size: 100
- device_type: gp2
- delete_on_termination: False
- health_check:
- period: 60
- type: EC2
- min_size: 3
- max_size: 3
- desired_size: 3
- tags:
- host-type: master
- sub-host-type: default
- wait_for_instances: True
-```
-- r_openshift_aws_node_group_subnet_name: "{{ subnet_name }}"
-
-```yaml
-us-east-1a # name of subnet
-```
-
-Dependencies
-------------
-
-
-Example Playbook
-----------------
-```yaml
- - name: "create {{ openshift_build_node_type }} node groups"
- include_role:
- name: openshift_aws_node_group
- vars:
- r_openshift_aws_node_group_name: "{{ clusterid }} openshift compute"
- r_openshift_aws_node_group_lc_name: "{{ launch_config_name }}"
- r_openshift_aws_node_group_clusterid: "{{ clusterid }}"
- r_openshift_aws_node_group_region: "{{ region }}"
- r_openshift_aws_node_group_config: "{{ node_group_config }}"
- r_openshift_aws_node_group_type: compute
- r_openshift_aws_node_group_subnet_name: "{{ subnet_name }}"
-```
-
-License
--------
-
-Apache 2.0
-
-Author Information
-------------------
-
-Openshift
diff --git a/roles/openshift_aws_node_group/defaults/main.yml b/roles/openshift_aws_node_group/defaults/main.yml
deleted file mode 100644
index 44c5116a1..000000000
--- a/roles/openshift_aws_node_group/defaults/main.yml
+++ /dev/null
@@ -1,58 +0,0 @@
----
-r_openshift_aws_node_group_type: master
-
-r_openshift_aws_node_group_config:
- tags:
- clusterid: "{{ r_openshift_aws_node_group_clusterid }}"
- master:
- instance_type: m4.xlarge
- ami: "{{ r_openshift_aws_node_group_ami }}"
- volumes:
- - device_name: /dev/sdb
- volume_size: 100
- device_type: gp2
- delete_on_termination: False
- health_check:
- period: 60
- type: EC2
- min_size: 3
- max_size: 3
- desired_size: 3
- tags:
- host-type: master
- sub-host-type: default
- wait_for_instances: True
- compute:
- instance_type: m4.xlarge
- ami: "{{ r_openshift_aws_node_group_ami }}"
- volumes:
- - device_name: /dev/sdb
- volume_size: 100
- device_type: gp2
- delete_on_termination: True
- health_check:
- period: 60
- type: EC2
- min_size: 3
- max_size: 100
- desired_size: 3
- tags:
- host-type: node
- sub-host-type: compute
- infra:
- instance_type: m4.xlarge
- ami: "{{ r_openshift_aws_node_group_ami }}"
- volumes:
- - device_name: /dev/sdb
- volume_size: 100
- device_type: gp2
- delete_on_termination: True
- health_check:
- period: 60
- type: EC2
- min_size: 2
- max_size: 20
- desired_size: 2
- tags:
- host-type: node
- sub-host-type: infra
diff --git a/roles/openshift_aws_node_group/tasks/main.yml b/roles/openshift_aws_node_group/tasks/main.yml
deleted file mode 100644
index 6f5364b03..000000000
--- a/roles/openshift_aws_node_group/tasks/main.yml
+++ /dev/null
@@ -1,32 +0,0 @@
----
-- name: validate role inputs
- fail:
- msg: Please pass in the required role variables
- when:
- - r_openshift_aws_node_group_clusterid is not defined
- - r_openshift_aws_node_group_region is not defined
- - r_openshift_aws_node_group_subnet_name is not defined
-
-- name: fetch the subnet to use in scale group
- ec2_vpc_subnet_facts:
- region: "{{ r_openshift_aws_node_group_region }}"
- filters:
- "tag:Name": "{{ r_openshift_aws_node_group_subnet_name }}"
- register: subnetout
-
-- name: Create the scale group
- ec2_asg:
- name: "{{ r_openshift_aws_node_group_name }}"
- launch_config_name: "{{ r_openshift_aws_node_group_lc_name }}"
- health_check_period: "{{ r_openshift_aws_node_group_config[r_openshift_aws_node_group_type].health_check.period }}"
- health_check_type: "{{ r_openshift_aws_node_group_config[r_openshift_aws_node_group_type].health_check.type }}"
- min_size: "{{ r_openshift_aws_node_group_config[r_openshift_aws_node_group_type].min_size }}"
- max_size: "{{ r_openshift_aws_node_group_config[r_openshift_aws_node_group_type].max_size }}"
- desired_capacity: "{{ r_openshift_aws_node_group_config[r_openshift_aws_node_group_type].desired_size }}"
- region: "{{ r_openshift_aws_node_group_region }}"
- termination_policies: "{{ r_openshift_aws_node_group_config[r_openshift_aws_node_group_type].termination_policy if 'termination_policy' in r_openshift_aws_node_group_config[r_openshift_aws_node_group_type] else omit }}"
- load_balancers: "{{ r_openshift_aws_node_group_config[r_openshift_aws_node_group_type].elbs if 'elbs' in r_openshift_aws_node_group_config[r_openshift_aws_node_group_type] else omit }}"
- wait_for_instances: "{{ r_openshift_aws_node_group_config[r_openshift_aws_node_group_type].wait_for_instances | default(False)}}"
- vpc_zone_identifier: "{{ subnetout.subnets[0].id }}"
- tags:
- - "{{ r_openshift_aws_node_group_config.tags | combine(r_openshift_aws_node_group_config[r_openshift_aws_node_group_type].tags) }}"
diff --git a/roles/openshift_aws_s3/README.md b/roles/openshift_aws_s3/README.md
deleted file mode 100644
index afafe61cf..000000000
--- a/roles/openshift_aws_s3/README.md
+++ /dev/null
@@ -1,43 +0,0 @@
-openshift_aws_s3
-=========
-
-Ansible role to create an s3 bucket
-
-Requirements
-------------
-
-Ansible Modules:
-
-
-Role Variables
---------------
-
-- r_openshift_aws_s3_clusterid: myclusterid
-- r_openshift_aws_s3_region: us-east-1
-- r_openshift_aws_s3_mode: create|delete
-
-Dependencies
-------------
-
-
-Example Playbook
-----------------
-```yaml
-- name: create an s3 bucket
- include_role:
- name: openshift_aws_s3
- vars:
- r_openshift_aws_s3_clusterid: mycluster
- r_openshift_aws_s3_region: us-east-1
- r_openshift_aws_s3_mode: create
-```
-
-License
--------
-
-Apache 2.0
-
-Author Information
-------------------
-
-Openshift
diff --git a/roles/openshift_aws_s3/tasks/main.yml b/roles/openshift_aws_s3/tasks/main.yml
deleted file mode 100644
index 46bd781bd..000000000
--- a/roles/openshift_aws_s3/tasks/main.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-- name: Create an s3 bucket
- s3:
- bucket: "{{ r_openshift_aws_s3_clusterid }}"
- mode: "{{ r_openshift_aws_s3_mode }}"
- region: "{{ r_openshift_aws_s3_region }}"
diff --git a/roles/openshift_aws_sg/README.md b/roles/openshift_aws_sg/README.md
deleted file mode 100644
index eeb76bbb6..000000000
--- a/roles/openshift_aws_sg/README.md
+++ /dev/null
@@ -1,59 +0,0 @@
-openshift_aws_sg
-=========
-
-Ansible role to create an aws security groups
-
-Requirements
-------------
-
-Ansible Modules:
-
-
-Role Variables
---------------
-
-- r_openshift_aws_sg_clusterid: myclusterid
-- r_openshift_aws_sg_region: us-east-1
-- r_openshift_aws_sg_type: master|infra|compute
-```yaml
-# defaults/main.yml
- default:
- name: "{{ r_openshift_aws_sg_clusterid }}"
- desc: "{{ r_openshift_aws_sg_clusterid }} default"
- rules:
- - proto: tcp
- from_port: 22
- to_port: 22
- cidr_ip: 0.0.0.0/0
- - proto: all
- from_port: all
- to_port: all
- group_name: "{{ r_openshift_aws_sg_clusterid }}"
-```
-
-
-Dependencies
-------------
-
-
-Example Playbook
-----------------
-```yaml
-- name: create security groups for master
- include_role:
- name: openshift_aws_sg
- vars:
- r_openshift_aws_sg_clusterid: mycluster
- r_openshift_aws_sg_region: us-east-1
- r_openshift_aws_sg_type: master
-```
-
-License
--------
-
-Apache 2.0
-
-Author Information
-------------------
-
-Openshift
diff --git a/roles/openshift_aws_sg/defaults/main.yml b/roles/openshift_aws_sg/defaults/main.yml
deleted file mode 100644
index 9c480d337..000000000
--- a/roles/openshift_aws_sg/defaults/main.yml
+++ /dev/null
@@ -1,48 +0,0 @@
----
-r_openshift_aws_sg_sg:
- default:
- name: "{{ r_openshift_aws_sg_clusterid }}"
- desc: "{{ r_openshift_aws_sg_clusterid }} default"
- rules:
- - proto: tcp
- from_port: 22
- to_port: 22
- cidr_ip: 0.0.0.0/0
- - proto: all
- from_port: all
- to_port: all
- group_name: "{{ r_openshift_aws_sg_clusterid }}"
- master:
- name: "{{ r_openshift_aws_sg_clusterid }}_master"
- desc: "{{ r_openshift_aws_sg_clusterid }} master instances"
- rules:
- - proto: tcp
- from_port: 80
- to_port: 80
- cidr_ip: 0.0.0.0/0
- - proto: tcp
- from_port: 443
- to_port: 443
- cidr_ip: 0.0.0.0/0
- compute:
- name: "{{ r_openshift_aws_sg_clusterid }}_compute"
- desc: "{{ r_openshift_aws_sg_clusterid }} compute node instances"
- infra:
- name: "{{ r_openshift_aws_sg_clusterid }}_infra"
- desc: "{{ r_openshift_aws_sg_clusterid }} infra node instances"
- rules:
- - proto: tcp
- from_port: 80
- to_port: 80
- cidr_ip: 0.0.0.0/0
- - proto: tcp
- from_port: 443
- to_port: 443
- cidr_ip: 0.0.0.0/0
- - proto: tcp
- from_port: 30000
- to_port: 32000
- cidr_ip: 0.0.0.0/0
- etcd:
- name: "{{ r_openshift_aws_sg_clusterid }}_etcd"
- desc: "{{ r_openshift_aws_sg_clusterid }} etcd instances"
diff --git a/roles/openshift_aws_sg/tasks/main.yml b/roles/openshift_aws_sg/tasks/main.yml
deleted file mode 100644
index 2294fdcc9..000000000
--- a/roles/openshift_aws_sg/tasks/main.yml
+++ /dev/null
@@ -1,53 +0,0 @@
----
-- name: Validate role inputs
- fail:
- msg: Please ensure to pass the correct variables
- when:
- - r_openshift_aws_sg_region is undefined
- - r_openshift_aws_sg_region is undefined
-
-
-- name: Fetch the VPC for vpc.id
- ec2_vpc_net_facts:
- region: "{{ r_openshift_aws_sg_region }}"
- filters:
- "tag:Name": "{{ r_openshift_aws_sg_clusterid }}"
- register: vpcout
-
-- name: Create default security group for cluster
- ec2_group:
- name: "{{ r_openshift_aws_sg_sg.default.name }}"
- description: "{{ r_openshift_aws_sg_sg.default.desc }}"
- region: "{{ r_openshift_aws_sg_region }}"
- vpc_id: "{{ vpcout.vpcs[0].id }}"
- rules: "{{ r_openshift_aws_sg_sg.default.rules | default(omit, True)}}"
- register: sg_default_created
-
-- name: create the node group sgs
- ec2_group:
- name: "{{ item.name}}"
- description: "{{ item.desc }}"
- rules: "{{ item.rules if 'rules' in item else [] }}"
- region: "{{ r_openshift_aws_sg_region }}"
- vpc_id: "{{ vpcout.vpcs[0].id }}"
- register: sg_create
- with_items:
- - "{{ r_openshift_aws_sg_sg[r_openshift_aws_sg_type]}}"
-
-- name: create the k8s sgs for the node group
- ec2_group:
- name: "{{ item.name }}_k8s"
- description: "{{ item.desc }} for k8s"
- region: "{{ r_openshift_aws_sg_region }}"
- vpc_id: "{{ vpcout.vpcs[0].id }}"
- register: k8s_sg_create
- with_items:
- - "{{ r_openshift_aws_sg_sg[r_openshift_aws_sg_type] }}"
-
-- name: tag sg groups with proper tags
- ec2_tag:
- tags:
- KubernetesCluster: "{{ r_openshift_aws_sg_clusterid }}"
- resource: "{{ item.group_id }}"
- region: "{{ r_openshift_aws_sg_region }}"
- with_items: "{{ k8s_sg_create.results }}"
diff --git a/roles/openshift_aws_ssh_keys/README.md b/roles/openshift_aws_ssh_keys/README.md
deleted file mode 100644
index 4f8667918..000000000
--- a/roles/openshift_aws_ssh_keys/README.md
+++ /dev/null
@@ -1,49 +0,0 @@
-openshift_aws_ssh_keys
-=========
-
-Ansible role for sshind SSH keys
-
-Requirements
-------------
-
-Ansible Modules:
-
-
-Role Variables
---------------
-
-- r_openshift_aws_ssh_keys_users: list of dicts of users
-- r_openshift_aws_ssh_keys_region: ec2_region to install the keys
-
-Dependencies
-------------
-
-
-Example Playbook
-----------------
-```yaml
-users:
-- username: user1
- pub_key: <user1 ssh public key>
-- username: user2
- pub_key: <user2 ssh public key>
-
-region: us-east-1
-
-- include_role:
- name: openshift_aws_ssh_keys
- vars:
- r_openshift_aws_ssh_keys_users: "{{ users }}"
- r_openshift_aws_ssh_keys_region: "{{ region }}"
-```
-
-
-License
--------
-
-Apache 2.0
-
-Author Information
-------------------
-
-Openshift
diff --git a/roles/openshift_aws_vpc/README.md b/roles/openshift_aws_vpc/README.md
deleted file mode 100644
index d88cf0581..000000000
--- a/roles/openshift_aws_vpc/README.md
+++ /dev/null
@@ -1,62 +0,0 @@
-openshift_aws_vpc
-=========
-
-Ansible role to create a default AWS VPC
-
-Requirements
-------------
-
-Ansible Modules:
-
-
-Role Variables
---------------
-
-- r_openshift_aws_vpc_clusterid: "{{ clusterid }}"
-- r_openshift_aws_vpc_cidr: 172.31.48.0/20
-- r_openshift_aws_vpc_subnets: "{{ subnets }}"
-```yaml
- subnets:
- us-east-1: # These are us-east-1 region defaults. Ensure this matches your region
- - cidr: 172.31.48.0/20
- az: "us-east-1c"
- - cidr: 172.31.32.0/20
- az: "us-east-1e"
- - cidr: 172.31.16.0/20
- az: "us-east-1a"
-```
-- r_openshift_aws_vpc_region: "{{ region }}"
-- r_openshift_aws_vpc_tags: dict of tags to apply to vpc
-- r_openshift_aws_vpc_name: "{{ vpc_name | default(clusterid) }}"
-
-Dependencies
-------------
-
-
-Example Playbook
-----------------
-
-```yaml
- - name: create default vpc
- include_role:
- name: openshift_aws_vpc
- vars:
- r_openshift_aws_vpc_clusterid: mycluster
- r_openshift_aws_vpc_cidr: 172.31.48.0/20
- r_openshift_aws_vpc_subnets: "{{ subnets }}"
- r_openshift_aws_vpc_region: us-east-1
- r_openshift_aws_vpc_tags: {}
- r_openshift_aws_vpc_name: mycluster
-
-```
-
-
-License
--------
-
-Apache 2.0
-
-Author Information
-------------------
-
-Openshift
diff --git a/roles/openshift_aws_vpc/defaults/main.yml b/roles/openshift_aws_vpc/defaults/main.yml
deleted file mode 100644
index ed97d539c..000000000
--- a/roles/openshift_aws_vpc/defaults/main.yml
+++ /dev/null
@@ -1 +0,0 @@
----
diff --git a/roles/openshift_cfme/defaults/main.yml b/roles/openshift_cfme/defaults/main.yml
index 8aa57e75a..b82c2e602 100644
--- a/roles/openshift_cfme/defaults/main.yml
+++ b/roles/openshift_cfme/defaults/main.yml
@@ -27,9 +27,6 @@ openshift_cfme_pv_data:
# Tuning parameter to use more than 5 images at once from an ImageStream
openshift_cfme_maxImagesBulkImportedPerRepository: 100
-# Hostname/IP of the NFS server. Currently defaults to first master
-openshift_cfme_nfs_server: "{{ groups.nfs.0 }}"
-openshift_cfme_nfs_directory: "/exports"
# TODO: Refactor '_install_app' variable. This is just for testing but
# maybe in the future it should control the entire yes/no for CFME.
#
diff --git a/roles/openshift_cfme/meta/main.yml b/roles/openshift_cfme/meta/main.yml
index 9200f2c3c..162d817f0 100644
--- a/roles/openshift_cfme/meta/main.yml
+++ b/roles/openshift_cfme/meta/main.yml
@@ -16,5 +16,4 @@ galaxy_info:
dependencies:
- role: lib_openshift
- role: lib_utils
-- role: openshift_common
- role: openshift_master_facts
diff --git a/roles/openshift_cfme/tasks/nfs.yml b/roles/openshift_cfme/tasks/nfs.yml
index 8db45492e..ca04628a8 100644
--- a/roles/openshift_cfme/tasks/nfs.yml
+++ b/roles/openshift_cfme/tasks/nfs.yml
@@ -1,6 +1,13 @@
---
# Tasks to statically provision NFS volumes
# Include if not using dynamic volume provisioning
+
+- name: Set openshift_cfme_nfs_server fact
+ when: openshift_cfme_nfs_server is not defined
+ set_fact:
+ # Hostname/IP of the NFS server. Currently defaults to first master
+ openshift_cfme_nfs_server: "{{ oo_nfs_to_config.0 }}"
+
- name: Ensure the /exports/ directory exists
file:
path: /exports/
diff --git a/roles/openshift_cli/meta/main.yml b/roles/openshift_cli/meta/main.yml
index 04a1ce873..29ed82783 100644
--- a/roles/openshift_cli/meta/main.yml
+++ b/roles/openshift_cli/meta/main.yml
@@ -14,5 +14,4 @@ galaxy_info:
dependencies:
- role: openshift_docker
when: not skip_docker_role | default(False) | bool
-- role: openshift_common
- role: openshift_facts
diff --git a/roles/openshift_common/README.md b/roles/openshift_common/README.md
deleted file mode 100644
index 2a271854b..000000000
--- a/roles/openshift_common/README.md
+++ /dev/null
@@ -1,45 +0,0 @@
-OpenShift/Atomic Enterprise Common
-===================================
-
-OpenShift/Atomic Enterprise common installation and configuration tasks.
-
-Requirements
-------------
-
-A RHEL 7.1 host pre-configured with access to the rhel-7-server-rpms,
-rhel-7-server-extra-rpms, and rhel-7-server-ose-3.0-rpms repos.
-
-Role Variables
---------------
-
-| Name | Default value | |
-|---------------------------|-------------------|---------------------------------------------|
-| openshift_cluster_id | default | Cluster name if multiple OpenShift clusters |
-| openshift_debug_level | 2 | Global openshift debug log verbosity |
-| openshift_hostname | UNDEF | Internal hostname to use for this host (this value will set the hostname on the system) |
-| openshift_ip | UNDEF | Internal IP address to use for this host |
-| openshift_public_hostname | UNDEF | Public hostname to use for this host |
-| openshift_public_ip | UNDEF | Public IP address to use for this host |
-| openshift_portal_net | UNDEF | Service IP CIDR |
-
-Dependencies
-------------
-
-os_firewall
-openshift_facts
-openshift_repos
-
-Example Playbook
-----------------
-
-TODO
-
-License
--------
-
-Apache License, Version 2.0
-
-Author Information
-------------------
-
-Jason DeTiberus (jdetiber@redhat.com)
diff --git a/roles/openshift_common/defaults/main.yml b/roles/openshift_common/defaults/main.yml
deleted file mode 100644
index 267c03605..000000000
--- a/roles/openshift_common/defaults/main.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-openshift_cluster_id: 'default'
-openshift_debug_level: 2
diff --git a/roles/openshift_common/meta/main.yml b/roles/openshift_common/meta/main.yml
deleted file mode 100644
index 7cc95d8fa..000000000
--- a/roles/openshift_common/meta/main.yml
+++ /dev/null
@@ -1,15 +0,0 @@
----
-galaxy_info:
- author: Jason DeTiberus
- description: OpenShift Common
- company: Red Hat, Inc.
- license: Apache License, Version 2.0
- min_ansible_version: 1.7
- platforms:
- - name: EL
- versions:
- - 7
- categories:
- - cloud
-dependencies:
-- role: openshift_facts
diff --git a/roles/openshift_common/tasks/main.yml b/roles/openshift_common/tasks/main.yml
deleted file mode 100644
index a0bd6c860..000000000
--- a/roles/openshift_common/tasks/main.yml
+++ /dev/null
@@ -1,78 +0,0 @@
----
-- fail:
- msg: Flannel can not be used with openshift sdn, set openshift_use_openshift_sdn=false if you want to use flannel
- when: openshift_use_openshift_sdn | default(true) | bool and openshift_use_flannel | default(false) | bool
-
-- fail:
- msg: Nuage sdn can not be used with openshift sdn, set openshift_use_openshift_sdn=false if you want to use nuage
- when: openshift_use_openshift_sdn | default(true) | bool and openshift_use_nuage | default(false) | bool
-
-- fail:
- msg: Nuage sdn can not be used with flannel
- when: openshift_use_flannel | default(false) | bool and openshift_use_nuage | default(false) | bool
-
-- fail:
- msg: Contiv can not be used with openshift sdn, set openshift_use_openshift_sdn=false if you want to use contiv
- when: openshift_use_openshift_sdn | default(true) | bool and openshift_use_contiv | default(false) | bool
-
-- fail:
- msg: Contiv can not be used with flannel
- when: openshift_use_flannel | default(false) | bool and openshift_use_contiv | default(false) | bool
-
-- fail:
- msg: Contiv can not be used with nuage
- when: openshift_use_nuage | default(false) | bool and openshift_use_contiv | default(false) | bool
-
-- fail:
- msg: Calico can not be used with openshift sdn, set openshift_use_openshift_sdn=false if you want to use Calico
- when: openshift_use_openshift_sdn | default(true) | bool and openshift_use_calico | default(false) | bool
-
-- fail:
- msg: The Calico playbook does not yet integrate with the Flannel playbook in Openshift. Set either openshift_use_calico or openshift_use_flannel, but not both.
- when: openshift_use_calico | default(false) | bool and openshift_use_flannel | default(false) | bool
-
-- fail:
- msg: Calico can not be used with Nuage in Openshift. Set either openshift_use_calico or openshift_use_nuage, but not both
- when: openshift_use_calico | default(false) | bool and openshift_use_nuage | default(false) | bool
-
-- fail:
- msg: Calico can not be used with Contiv in Openshift. Set either openshift_use_calico or openshift_use_contiv, but not both
- when: openshift_use_calico | default(false) | bool and openshift_use_contiv | default(false) | bool
-
-- fail:
- msg: openshift_hostname must be 63 characters or less
- when: openshift_hostname is defined and openshift_hostname | length > 63
-
-- name: Set common Cluster facts
- openshift_facts:
- role: common
- local_facts:
- install_examples: "{{ openshift_install_examples | default(True) }}"
- use_openshift_sdn: "{{ openshift_use_openshift_sdn | default(None) }}"
- sdn_network_plugin_name: "{{ os_sdn_network_plugin_name | default(None) }}"
- use_flannel: "{{ openshift_use_flannel | default(None) }}"
- use_calico: "{{openshift_use_calico | default(None) }}"
- use_nuage: "{{ openshift_use_nuage | default(None) }}"
- use_contiv: "{{ openshift_use_contiv | default(None) }}"
- use_manageiq: "{{ openshift_use_manageiq | default(None) }}"
- data_dir: "{{ openshift_data_dir | default(None) }}"
- use_dnsmasq: "{{ openshift_use_dnsmasq | default(None) }}"
-
-- name: Install the base package for versioning
- package:
- name: "{{ openshift.common.service_type }}{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }}"
- state: present
- when: not openshift.common.is_containerized | bool
-
-- name: Set version facts
- openshift_facts:
-
-# For enterprise versions < 3.1 and origin versions < 1.1 we want to set the
-# hostname by default.
-- set_fact:
- set_hostname_default: "{{ not openshift.common.version_gte_3_1_or_1_1 }}"
-
-- name: Set hostname
- command: >
- hostnamectl set-hostname {{ openshift.common.hostname }}
- when: openshift_set_hostname | default(set_hostname_default) | bool
diff --git a/roles/openshift_examples/meta/main.yml b/roles/openshift_examples/meta/main.yml
index 5cfda1c89..f3fe2dcbe 100644
--- a/roles/openshift_examples/meta/main.yml
+++ b/roles/openshift_examples/meta/main.yml
@@ -11,5 +11,4 @@ galaxy_info:
- 7
categories:
- cloud
-dependencies:
-- role: openshift_common
+dependencies: []
diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py
index cf78b4a75..517e0231d 100755
--- a/roles/openshift_facts/library/openshift_facts.py
+++ b/roles/openshift_facts/library/openshift_facts.py
@@ -449,78 +449,6 @@ def normalize_provider_facts(provider, metadata):
return facts
-def set_flannel_facts_if_unset(facts):
- """ Set flannel facts if not already present in facts dict
- dict: the facts dict updated with the flannel facts if
- missing
- Args:
- facts (dict): existing facts
- Returns:
- dict: the facts dict updated with the flannel
- facts if they were not already present
-
- """
- if 'common' in facts:
- if 'use_flannel' not in facts['common']:
- use_flannel = False
- facts['common']['use_flannel'] = use_flannel
- return facts
-
-
-def set_calico_facts_if_unset(facts):
- """ Set calico facts if not already present in facts dict
- dict: the facts dict updated with the calico facts if
- missing
- Args:
- facts (dict): existing facts
- Returns:
- dict: the facts dict updated with the calico
- facts if they were not already present
-
- """
- if 'common' in facts:
- if 'use_calico' not in facts['common']:
- use_calico = False
- facts['common']['use_calico'] = use_calico
- return facts
-
-
-def set_nuage_facts_if_unset(facts):
- """ Set nuage facts if not already present in facts dict
- dict: the facts dict updated with the nuage facts if
- missing
- Args:
- facts (dict): existing facts
- Returns:
- dict: the facts dict updated with the nuage
- facts if they were not already present
-
- """
- if 'common' in facts:
- if 'use_nuage' not in facts['common']:
- use_nuage = False
- facts['common']['use_nuage'] = use_nuage
- return facts
-
-
-def set_contiv_facts_if_unset(facts):
- """ Set contiv facts if not already present in facts dict
- dict: the facts dict updated with the contiv facts if
- missing
- Args:
- facts (dict): existing facts
- Returns:
- dict: the facts dict updated with the contiv
- facts if they were not already present
-
- """
- if 'common' in facts:
- if 'use_contiv' not in facts['common']:
- use_contiv = False
- facts['common']['use_contiv'] = use_contiv
- return facts
-
-
def set_node_schedulability(facts):
""" Set schedulable facts if not already present in facts dict
Args:
@@ -590,13 +518,8 @@ def set_dnsmasq_facts_if_unset(facts):
"""
if 'common' in facts:
- if 'use_dnsmasq' not in facts['common']:
- facts['common']['use_dnsmasq'] = bool(safe_get_bool(facts['common']['version_gte_3_2_or_1_2']))
if 'master' in facts and 'dns_port' not in facts['master']:
- if safe_get_bool(facts['common']['use_dnsmasq']):
- facts['master']['dns_port'] = 8053
- else:
- facts['master']['dns_port'] = 53
+ facts['master']['dns_port'] = 8053
return facts
@@ -968,27 +891,6 @@ def set_version_facts_if_unset(facts):
return facts
-def set_manageiq_facts_if_unset(facts):
- """ Set manageiq facts. This currently includes common.use_manageiq.
-
- Args:
- facts (dict): existing facts
- Returns:
- dict: the facts dict updated with version facts.
- Raises:
- OpenShiftFactsInternalError:
- """
- if 'common' not in facts:
- if 'version_gte_3_1_or_1_1' not in facts['common']:
- raise OpenShiftFactsInternalError(
- "Invalid invocation: The required facts are not set"
- )
- if 'use_manageiq' not in facts['common']:
- facts['common']['use_manageiq'] = facts['common']['version_gte_3_1_or_1_1']
-
- return facts
-
-
def set_sdn_facts_if_unset(facts, system_facts):
""" Set sdn facts if not already present in facts dict
@@ -999,15 +901,6 @@ def set_sdn_facts_if_unset(facts, system_facts):
dict: the facts dict updated with the generated sdn facts if they
were not already present
"""
- # pylint: disable=too-many-branches
- if 'common' in facts:
- use_sdn = facts['common']['use_openshift_sdn']
- if not (use_sdn == '' or isinstance(use_sdn, bool)):
- use_sdn = safe_get_bool(use_sdn)
- facts['common']['use_openshift_sdn'] = use_sdn
- if 'sdn_network_plugin_name' not in facts['common']:
- plugin = 'redhat/openshift-ovs-subnet' if use_sdn else ''
- facts['common']['sdn_network_plugin_name'] = plugin
if 'master' in facts:
# set defaults for sdn_cluster_network_cidr and sdn_host_subnet_length
@@ -1709,11 +1602,13 @@ def set_builddefaults_facts(facts):
builddefaults['git_no_proxy'] = builddefaults['no_proxy']
# If we're actually defining a builddefaults config then create admission_plugin_config
# then merge builddefaults[config] structure into admission_plugin_config
+
+ # 'config' is the 'openshift_builddefaults_json' inventory variable
if 'config' in builddefaults:
if 'admission_plugin_config' not in facts['master']:
- facts['master']['admission_plugin_config'] = dict()
+ # Scaffold out the full expected datastructure
+ facts['master']['admission_plugin_config'] = {'BuildDefaults': {'configuration': {'env': {}}}}
facts['master']['admission_plugin_config'].update(builddefaults['config'])
- # if the user didn't actually provide proxy values, delete the proxy env variable defaults.
delete_empty_keys(facts['master']['admission_plugin_config']['BuildDefaults']['configuration']['env'])
return facts
@@ -1996,10 +1891,6 @@ class OpenShiftFacts(object):
facts['current_config'] = get_current_config(facts)
facts = set_url_facts_if_unset(facts)
facts = set_project_cfg_facts_if_unset(facts)
- facts = set_flannel_facts_if_unset(facts)
- facts = set_calico_facts_if_unset(facts)
- facts = set_nuage_facts_if_unset(facts)
- facts = set_contiv_facts_if_unset(facts)
facts = set_node_schedulability(facts)
facts = set_selectors(facts)
facts = set_identity_providers_if_unset(facts)
@@ -2011,7 +1902,6 @@ class OpenShiftFacts(object):
facts = build_api_server_args(facts)
facts = set_version_facts_if_unset(facts)
facts = set_dnsmasq_facts_if_unset(facts)
- facts = set_manageiq_facts_if_unset(facts)
facts = set_aggregate_facts(facts)
facts = set_etcd_facts_if_unset(facts)
facts = set_proxy_facts(facts)
@@ -2039,7 +1929,7 @@ class OpenShiftFacts(object):
self.system_facts['ansible_fqdn']]
hostname = choose_hostname(hostname_values, ip_addr)
- defaults['common'] = dict(use_openshift_sdn=True, ip=ip_addr,
+ defaults['common'] = dict(ip=ip_addr,
public_ip=ip_addr,
deployment_type=deployment_type,
deployment_subtype=deployment_subtype,
@@ -2048,10 +1938,8 @@ class OpenShiftFacts(object):
portal_net='172.30.0.0/16',
client_binary='oc', admin_binary='oadm',
dns_domain='cluster.local',
- install_examples=True,
debug_level=2,
- config_base='/etc/origin',
- data_dir='/var/lib/origin')
+ config_base='/etc/origin')
if 'master' in roles:
defaults['master'] = dict(api_use_ssl=True, api_port='8443',
diff --git a/roles/openshift_gcp/tasks/main.yaml b/roles/openshift_gcp/tasks/main.yaml
new file mode 100644
index 000000000..ad205ba33
--- /dev/null
+++ b/roles/openshift_gcp/tasks/main.yaml
@@ -0,0 +1,43 @@
+#
+# This role relies on gcloud invoked via templated bash in order to
+# provide a high performance deployment option. The next logical step
+# is to transition to a deployment manager template which is then instantiated.
+# TODO: use a formal set of role parameters consistent with openshift_aws
+#
+---
+- name: Templatize DNS script
+ template: src=dns.j2.sh dest=/tmp/openshift_gcp_provision_dns.sh mode=u+rx
+- name: Templatize provision script
+ template: src=provision.j2.sh dest=/tmp/openshift_gcp_provision.sh mode=u+rx
+- name: Templatize de-provision script
+ template: src=remove.j2.sh dest=/tmp/openshift_gcp_provision_remove.sh mode=u+rx
+ when:
+ - state | default('present') == 'absent'
+
+- name: Provision GCP DNS domain
+ command: /tmp/openshift_gcp_provision_dns.sh
+ args:
+ chdir: "{{ playbook_dir }}/files"
+ register: dns_provision
+ when:
+ - state | default('present') == 'present'
+
+- name: Ensure that DNS resolves to the hosted zone
+ assert:
+ that:
+ - "lookup('dig', public_hosted_zone, 'qtype=NS', wantlist=True) | sort | join(',') == dns_provision.stdout"
+ msg: "The DNS domain {{ public_hosted_zone }} defined in 'public_hosted_zone' must have NS records pointing to the Google nameservers: '{{ dns_provision.stdout }}' instead of '{{ lookup('dig', public_hosted_zone, 'qtype=NS') }}'."
+ when:
+ - state | default('present') == 'present'
+
+- name: Provision GCP resources
+ command: /tmp/openshift_gcp_provision.sh
+ args:
+ chdir: "{{ playbook_dir }}/files"
+ when:
+ - state | default('present') == 'present'
+
+- name: De-provision GCP resources
+ command: /tmp/openshift_gcp_provision_remove.sh
+ when:
+ - state | default('present') == 'absent'
diff --git a/roles/openshift_gcp/templates/dns.j2.sh b/roles/openshift_gcp/templates/dns.j2.sh
new file mode 100644
index 000000000..eacf84b4d
--- /dev/null
+++ b/roles/openshift_gcp/templates/dns.j2.sh
@@ -0,0 +1,13 @@
+#!/bin/bash
+
+set -euo pipefail
+
+dns_zone="{{ dns_managed_zone | default(provision_prefix + 'managed-zone') }}"
+
+# Check the DNS managed zone in Google Cloud DNS, create it if it doesn't exist
+if ! gcloud --project "{{ gce_project_id }}" dns managed-zones describe "${dns_zone}" &>/dev/null; then
+ gcloud --project "{{ gce_project_id }}" dns managed-zones create "${dns_zone}" --dns-name "{{ public_hosted_zone }}" --description "{{ public_hosted_zone }} domain" >/dev/null
+fi
+
+# Always output the expected nameservers as a comma delimited list
+gcloud --project "{{ gce_project_id }}" dns managed-zones describe "${dns_zone}" --format='value(nameServers)' | tr ';' ','
diff --git a/roles/openshift_gcp/templates/provision.j2.sh b/roles/openshift_gcp/templates/provision.j2.sh
new file mode 100644
index 000000000..e68e9683f
--- /dev/null
+++ b/roles/openshift_gcp/templates/provision.j2.sh
@@ -0,0 +1,318 @@
+#!/bin/bash
+
+set -euo pipefail
+
+# Create SSH key for GCE
+if [ ! -f "{{ gce_ssh_private_key }}" ]; then
+ ssh-keygen -t rsa -f "{{ gce_ssh_private_key }}" -C gce-provision-cloud-user -N ''
+ ssh-add "{{ gce_ssh_private_key }}" || true
+fi
+
+# Check if the ~/.ssh/google_compute_engine.pub key is in the project metadata, and if not, add it there
+pub_key=$(cut -d ' ' -f 2 < "{{ gce_ssh_private_key }}.pub")
+key_tmp_file='/tmp/ocp-gce-keys'
+if ! gcloud --project "{{ gce_project_id }}" compute project-info describe | grep -q "$pub_key"; then
+ if gcloud --project "{{ gce_project_id }}" compute project-info describe | grep -q ssh-rsa; then
+ gcloud --project "{{ gce_project_id }}" compute project-info describe | grep ssh-rsa | sed -e 's/^[[:space:]]*//' -e 's/[[:space:]]*$//' -e 's/value: //' > "$key_tmp_file"
+ fi
+ echo -n 'cloud-user:' >> "$key_tmp_file"
+ cat "{{ gce_ssh_private_key }}.pub" >> "$key_tmp_file"
+ gcloud --project "{{ gce_project_id }}" compute project-info add-metadata --metadata-from-file "sshKeys=${key_tmp_file}"
+ rm -f "$key_tmp_file"
+fi
+
+metadata=""
+if [[ -n "{{ provision_gce_startup_script_file }}" ]]; then
+ if [[ ! -f "{{ provision_gce_startup_script_file }}" ]]; then
+ echo "Startup script file missing at {{ provision_gce_startup_script_file }} from=$(pwd)"
+ exit 1
+ fi
+ metadata+="--metadata-from-file=startup-script={{ provision_gce_startup_script_file }}"
+fi
+if [[ -n "{{ provision_gce_user_data_file }}" ]]; then
+ if [[ ! -f "{{ provision_gce_user_data_file }}" ]]; then
+ echo "User data file missing at {{ provision_gce_user_data_file }}"
+ exit 1
+ fi
+ if [[ -n "${metadata}" ]]; then
+ metadata+=","
+ else
+ metadata="--metadata-from-file="
+ fi
+ metadata+="user-data={{ provision_gce_user_data_file }}"
+fi
+
+# Select image or image family
+image="{{ provision_gce_registered_image }}"
+if ! gcloud --project "{{ gce_project_id }}" compute images describe "${image}" &>/dev/null; then
+ if ! gcloud --project "{{ gce_project_id }}" compute images describe-from-family "${image}" &>/dev/null; then
+ echo "No compute image or image-family found, create an image named '{{ provision_gce_registered_image }}' to continue'"
+ exit 1
+ fi
+ image="family/${image}"
+fi
+
+### PROVISION THE INFRASTRUCTURE ###
+
+dns_zone="{{ dns_managed_zone | default(provision_prefix + 'managed-zone') }}"
+
+# Check the DNS managed zone in Google Cloud DNS, create it if it doesn't exist and exit after printing NS servers
+if ! gcloud --project "{{ gce_project_id }}" dns managed-zones describe "${dns_zone}" &>/dev/null; then
+ echo "DNS zone '${dns_zone}' doesn't exist. Must be configured prior to running this script"
+ exit 1
+fi
+
+# Create network
+if ! gcloud --project "{{ gce_project_id }}" compute networks describe "{{ gce_network_name }}" &>/dev/null; then
+ gcloud --project "{{ gce_project_id }}" compute networks create "{{ gce_network_name }}" --mode "auto"
+else
+ echo "Network '{{ gce_network_name }}' already exists"
+fi
+
+# Firewall rules in a form:
+# ['name']='parameters for "gcloud compute firewall-rules create"'
+# For all possible parameters see: gcloud compute firewall-rules create --help
+range=""
+if [[ -n "{{ openshift_node_port_range }}" ]]; then
+ range=",tcp:{{ openshift_node_port_range }},udp:{{ openshift_node_port_range }}"
+fi
+declare -A FW_RULES=(
+ ['icmp']='--allow icmp'
+ ['ssh-external']='--allow tcp:22'
+ ['ssh-internal']='--allow tcp:22 --source-tags bastion'
+ ['master-internal']="--allow tcp:2224,tcp:2379,tcp:2380,tcp:4001,udp:4789,udp:5404,udp:5405,tcp:8053,udp:8053,tcp:8444,tcp:10250,tcp:10255,udp:10255,tcp:24224,udp:24224 --source-tags ocp --target-tags ocp-master"
+ ['master-external']="--allow tcp:80,tcp:443,tcp:1936,tcp:8080,tcp:8443${range} --target-tags ocp-master"
+ ['node-internal']="--allow udp:4789,tcp:10250,tcp:10255,udp:10255 --source-tags ocp --target-tags ocp-node,ocp-infra-node"
+ ['infra-node-internal']="--allow tcp:5000 --source-tags ocp --target-tags ocp-infra-node"
+ ['infra-node-external']="--allow tcp:80,tcp:443,tcp:1936${range} --target-tags ocp-infra-node"
+)
+for rule in "${!FW_RULES[@]}"; do
+ ( if ! gcloud --project "{{ gce_project_id }}" compute firewall-rules describe "{{ provision_prefix }}$rule" &>/dev/null; then
+ gcloud --project "{{ gce_project_id }}" compute firewall-rules create "{{ provision_prefix }}$rule" --network "{{ gce_network_name }}" ${FW_RULES[$rule]}
+ else
+ echo "Firewall rule '{{ provision_prefix }}${rule}' already exists"
+ fi ) &
+done
+
+
+# Master IP
+( if ! gcloud --project "{{ gce_project_id }}" compute addresses describe "{{ provision_prefix }}master-ssl-lb-ip" --global &>/dev/null; then
+ gcloud --project "{{ gce_project_id }}" compute addresses create "{{ provision_prefix }}master-ssl-lb-ip" --global
+else
+ echo "IP '{{ provision_prefix }}master-ssl-lb-ip' already exists"
+fi ) &
+
+# Internal master IP
+( if ! gcloud --project "{{ gce_project_id }}" compute addresses describe "{{ provision_prefix }}master-network-lb-ip" --region "{{ gce_region_name }}" &>/dev/null; then
+ gcloud --project "{{ gce_project_id }}" compute addresses create "{{ provision_prefix }}master-network-lb-ip" --region "{{ gce_region_name }}"
+else
+ echo "IP '{{ provision_prefix }}master-network-lb-ip' already exists"
+fi ) &
+
+# Router IP
+( if ! gcloud --project "{{ gce_project_id }}" compute addresses describe "{{ provision_prefix }}router-network-lb-ip" --region "{{ gce_region_name }}" &>/dev/null; then
+ gcloud --project "{{ gce_project_id }}" compute addresses create "{{ provision_prefix }}router-network-lb-ip" --region "{{ gce_region_name }}"
+else
+ echo "IP '{{ provision_prefix }}router-network-lb-ip' already exists"
+fi ) &
+
+
+{% for node_group in provision_gce_node_groups %}
+# configure {{ node_group.name }}
+(
+ if ! gcloud --project "{{ gce_project_id }}" compute instance-templates describe "{{ provision_prefix }}instance-template-{{ node_group.name }}" &>/dev/null; then
+ gcloud --project "{{ gce_project_id }}" compute instance-templates create "{{ provision_prefix }}instance-template-{{ node_group.name }}" \
+ --machine-type "{{ node_group.machine_type }}" --network "{{ gce_network_name }}" \
+ --tags "{{ provision_prefix }}ocp,ocp,{{ node_group.tags }}" \
+ --boot-disk-size "{{ node_group.boot_disk_size }}" --boot-disk-type "pd-ssd" \
+ --scopes "logging-write,monitoring-write,useraccounts-ro,service-control,service-management,storage-ro,compute-rw" \
+ --image "${image}" ${metadata}
+ else
+ echo "Instance template '{{ provision_prefix }}instance-template-{{ node_group.name }}' already exists"
+ fi
+
+ # Create instance group
+ if ! gcloud --project "{{ gce_project_id }}" compute instance-groups managed describe "{{ provision_prefix }}ig-{{ node_group.suffix }}" --zone "{{ gce_zone_name }}" &>/dev/null; then
+ gcloud --project "{{ gce_project_id }}" compute instance-groups managed create "{{ provision_prefix }}ig-{{ node_group.suffix }}" \
+ --zone "{{ gce_zone_name }}" --template "{{ provision_prefix }}instance-template-{{ node_group.name }}" --size "{{ node_group.scale }}"
+ else
+ echo "Instance group '{{ provision_prefix }}ig-{{ node_group.suffix }}' already exists"
+ fi
+) &
+{% endfor %}
+
+for i in `jobs -p`; do wait $i; done
+
+
+# Configure the master external LB rules
+(
+# Master health check
+if ! gcloud --project "{{ gce_project_id }}" compute health-checks describe "{{ provision_prefix }}master-ssl-lb-health-check" &>/dev/null; then
+ gcloud --project "{{ gce_project_id }}" compute health-checks create https "{{ provision_prefix }}master-ssl-lb-health-check" --port "{{ internal_console_port }}" --request-path "/healthz"
+else
+ echo "Health check '{{ provision_prefix }}master-ssl-lb-health-check' already exists"
+fi
+
+gcloud --project "{{ gce_project_id }}" compute instance-groups managed set-named-ports "{{ provision_prefix }}ig-m" \
+ --zone "{{ gce_zone_name }}" --named-ports "{{ provision_prefix }}port-name-master:{{ internal_console_port }}"
+
+# Master backend service
+if ! gcloud --project "{{ gce_project_id }}" compute backend-services describe "{{ provision_prefix }}master-ssl-lb-backend" --global &>/dev/null; then
+ gcloud --project "{{ gce_project_id }}" compute backend-services create "{{ provision_prefix }}master-ssl-lb-backend" --health-checks "{{ provision_prefix }}master-ssl-lb-health-check" --port-name "{{ provision_prefix }}port-name-master" --protocol "TCP" --global --timeout="{{ provision_gce_master_https_timeout | default('2m') }}"
+ gcloud --project "{{ gce_project_id }}" compute backend-services add-backend "{{ provision_prefix }}master-ssl-lb-backend" --instance-group "{{ provision_prefix }}ig-m" --global --instance-group-zone "{{ gce_zone_name }}"
+else
+ echo "Backend service '{{ provision_prefix }}master-ssl-lb-backend' already exists"
+fi
+
+# Master tcp proxy target
+if ! gcloud --project "{{ gce_project_id }}" compute target-tcp-proxies describe "{{ provision_prefix }}master-ssl-lb-target" &>/dev/null; then
+ gcloud --project "{{ gce_project_id }}" compute target-tcp-proxies create "{{ provision_prefix }}master-ssl-lb-target" --backend-service "{{ provision_prefix }}master-ssl-lb-backend"
+else
+ echo "Proxy target '{{ provision_prefix }}master-ssl-lb-target' already exists"
+fi
+
+# Master forwarding rule
+if ! gcloud --project "{{ gce_project_id }}" compute forwarding-rules describe "{{ provision_prefix }}master-ssl-lb-rule" --global &>/dev/null; then
+ IP=$(gcloud --project "{{ gce_project_id }}" compute addresses describe "{{ provision_prefix }}master-ssl-lb-ip" --global --format='value(address)')
+ gcloud --project "{{ gce_project_id }}" compute forwarding-rules create "{{ provision_prefix }}master-ssl-lb-rule" --address "$IP" --global --ports "{{ console_port }}" --target-tcp-proxy "{{ provision_prefix }}master-ssl-lb-target"
+else
+ echo "Forwarding rule '{{ provision_prefix }}master-ssl-lb-rule' already exists"
+fi
+) &
+
+
+# Configure the master internal LB rules
+(
+# Internal master health check
+if ! gcloud --project "{{ gce_project_id }}" compute http-health-checks describe "{{ provision_prefix }}master-network-lb-health-check" &>/dev/null; then
+ gcloud --project "{{ gce_project_id }}" compute http-health-checks create "{{ provision_prefix }}master-network-lb-health-check" --port "8080" --request-path "/healthz"
+else
+ echo "Health check '{{ provision_prefix }}master-network-lb-health-check' already exists"
+fi
+
+# Internal master target pool
+if ! gcloud --project "{{ gce_project_id }}" compute target-pools describe "{{ provision_prefix }}master-network-lb-pool" --region "{{ gce_region_name }}" &>/dev/null; then
+ gcloud --project "{{ gce_project_id }}" compute target-pools create "{{ provision_prefix }}master-network-lb-pool" --http-health-check "{{ provision_prefix }}master-network-lb-health-check" --region "{{ gce_region_name }}"
+else
+ echo "Target pool '{{ provision_prefix }}master-network-lb-pool' already exists"
+fi
+
+# Internal master forwarding rule
+if ! gcloud --project "{{ gce_project_id }}" compute forwarding-rules describe "{{ provision_prefix }}master-network-lb-rule" --region "{{ gce_region_name }}" &>/dev/null; then
+ IP=$(gcloud --project "{{ gce_project_id }}" compute addresses describe "{{ provision_prefix }}master-network-lb-ip" --region "{{ gce_region_name }}" --format='value(address)')
+ gcloud --project "{{ gce_project_id }}" compute forwarding-rules create "{{ provision_prefix }}master-network-lb-rule" --address "$IP" --region "{{ gce_region_name }}" --target-pool "{{ provision_prefix }}master-network-lb-pool"
+else
+ echo "Forwarding rule '{{ provision_prefix }}master-network-lb-rule' already exists"
+fi
+) &
+
+
+# Configure the infra node rules
+(
+# Router health check
+if ! gcloud --project "{{ gce_project_id }}" compute http-health-checks describe "{{ provision_prefix }}router-network-lb-health-check" &>/dev/null; then
+ gcloud --project "{{ gce_project_id }}" compute http-health-checks create "{{ provision_prefix }}router-network-lb-health-check" --port "1936" --request-path "/healthz"
+else
+ echo "Health check '{{ provision_prefix }}router-network-lb-health-check' already exists"
+fi
+
+# Router target pool
+if ! gcloud --project "{{ gce_project_id }}" compute target-pools describe "{{ provision_prefix }}router-network-lb-pool" --region "{{ gce_region_name }}" &>/dev/null; then
+ gcloud --project "{{ gce_project_id }}" compute target-pools create "{{ provision_prefix }}router-network-lb-pool" --http-health-check "{{ provision_prefix }}router-network-lb-health-check" --region "{{ gce_region_name }}"
+else
+ echo "Target pool '{{ provision_prefix }}router-network-lb-pool' already exists"
+fi
+
+# Router forwarding rule
+if ! gcloud --project "{{ gce_project_id }}" compute forwarding-rules describe "{{ provision_prefix }}router-network-lb-rule" --region "{{ gce_region_name }}" &>/dev/null; then
+ IP=$(gcloud --project "{{ gce_project_id }}" compute addresses describe "{{ provision_prefix }}router-network-lb-ip" --region "{{ gce_region_name }}" --format='value(address)')
+ gcloud --project "{{ gce_project_id }}" compute forwarding-rules create "{{ provision_prefix }}router-network-lb-rule" --address "$IP" --region "{{ gce_region_name }}" --target-pool "{{ provision_prefix }}router-network-lb-pool"
+else
+ echo "Forwarding rule '{{ provision_prefix }}router-network-lb-rule' already exists"
+fi
+) &
+
+for i in `jobs -p`; do wait $i; done
+
+# set the target pools
+(
+if [[ "ig-m" == "{{ provision_gce_router_network_instance_group }}" ]]; then
+ gcloud --project "{{ gce_project_id }}" compute instance-groups managed set-target-pools "{{ provision_prefix }}ig-m" --target-pools "{{ provision_prefix }}master-network-lb-pool,{{ provision_prefix }}router-network-lb-pool" --zone "{{ gce_zone_name }}"
+else
+ gcloud --project "{{ gce_project_id }}" compute instance-groups managed set-target-pools "{{ provision_prefix }}ig-m" --target-pools "{{ provision_prefix }}master-network-lb-pool" --zone "{{ gce_zone_name }}"
+ gcloud --project "{{ gce_project_id }}" compute instance-groups managed set-target-pools "{{ provision_prefix }}{{ provision_gce_router_network_instance_group }}" --target-pools "{{ provision_prefix }}router-network-lb-pool" --zone "{{ gce_zone_name }}"
+fi
+) &
+
+# configure DNS
+(
+# Retry DNS changes until they succeed since this may be a shared resource
+while true; do
+ dns="${TMPDIR:-/tmp}/dns.yaml"
+ rm -f $dns
+
+ # DNS record for master lb
+ if ! gcloud --project "{{ gce_project_id }}" dns record-sets list -z "${dns_zone}" --name "{{ openshift_master_cluster_public_hostname }}" 2>/dev/null | grep -q "{{ openshift_master_cluster_public_hostname }}"; then
+ IP=$(gcloud --project "{{ gce_project_id }}" compute addresses describe "{{ provision_prefix }}master-ssl-lb-ip" --global --format='value(address)')
+ if [[ ! -f $dns ]]; then
+ gcloud --project "{{ gce_project_id }}" dns record-sets transaction --transaction-file=$dns start -z "${dns_zone}"
+ fi
+ gcloud --project "{{ gce_project_id }}" dns record-sets transaction --transaction-file=$dns add -z "${dns_zone}" --ttl 3600 --name "{{ openshift_master_cluster_public_hostname }}." --type A "$IP"
+ else
+ echo "DNS record for '{{ openshift_master_cluster_public_hostname }}' already exists"
+ fi
+
+ # DNS record for internal master lb
+ if ! gcloud --project "{{ gce_project_id }}" dns record-sets list -z "${dns_zone}" --name "{{ openshift_master_cluster_hostname }}" 2>/dev/null | grep -q "{{ openshift_master_cluster_hostname }}"; then
+ IP=$(gcloud --project "{{ gce_project_id }}" compute addresses describe "{{ provision_prefix }}master-network-lb-ip" --region "{{ gce_region_name }}" --format='value(address)')
+ if [[ ! -f $dns ]]; then
+ gcloud --project "{{ gce_project_id }}" dns record-sets transaction --transaction-file=$dns start -z "${dns_zone}"
+ fi
+ gcloud --project "{{ gce_project_id }}" dns record-sets transaction --transaction-file=$dns add -z "${dns_zone}" --ttl 3600 --name "{{ openshift_master_cluster_hostname }}." --type A "$IP"
+ else
+ echo "DNS record for '{{ openshift_master_cluster_hostname }}' already exists"
+ fi
+
+ # DNS record for router lb
+ if ! gcloud --project "{{ gce_project_id }}" dns record-sets list -z "${dns_zone}" --name "{{ wildcard_zone }}" 2>/dev/null | grep -q "{{ wildcard_zone }}"; then
+ IP=$(gcloud --project "{{ gce_project_id }}" compute addresses describe "{{ provision_prefix }}router-network-lb-ip" --region "{{ gce_region_name }}" --format='value(address)')
+ if [[ ! -f $dns ]]; then
+ gcloud --project "{{ gce_project_id }}" dns record-sets transaction --transaction-file=$dns start -z "${dns_zone}"
+ fi
+ gcloud --project "{{ gce_project_id }}" dns record-sets transaction --transaction-file=$dns add -z "${dns_zone}" --ttl 3600 --name "{{ wildcard_zone }}." --type A "$IP"
+ gcloud --project "{{ gce_project_id }}" dns record-sets transaction --transaction-file=$dns add -z "${dns_zone}" --ttl 3600 --name "*.{{ wildcard_zone }}." --type CNAME "{{ wildcard_zone }}."
+ else
+ echo "DNS record for '{{ wildcard_zone }}' already exists"
+ fi
+
+ # Commit all DNS changes, retrying if preconditions are not met
+ if [[ -f $dns ]]; then
+ if ! out="$( gcloud --project "{{ gce_project_id }}" dns record-sets transaction --transaction-file=$dns execute -z "${dns_zone}" 2>&1 )"; then
+ rc=$?
+ if [[ "${out}" == *"HTTPError 412: Precondition not met"* ]]; then
+ continue
+ fi
+ exit $rc
+ fi
+ fi
+ break
+done
+) &
+
+# Create bucket for registry
+(
+if ! gsutil ls -p "{{ gce_project_id }}" "gs://{{ openshift_hosted_registry_storage_gcs_bucket }}" &>/dev/null; then
+ gsutil mb -p "{{ gce_project_id }}" -l "{{ gce_region_name }}" "gs://{{ openshift_hosted_registry_storage_gcs_bucket }}"
+else
+ echo "Bucket '{{ openshift_hosted_registry_storage_gcs_bucket }}' already exists"
+fi
+) &
+
+# wait until all node groups are stable
+{% for node_group in provision_gce_node_groups %}
+# wait for stable {{ node_group.name }}
+( gcloud --project "{{ gce_project_id }}" compute instance-groups managed wait-until-stable "{{ provision_prefix }}ig-{{ node_group.suffix }}" --zone "{{ gce_zone_name }}" --timeout=300) &
+{% endfor %}
+
+
+for i in `jobs -p`; do wait $i; done
diff --git a/roles/openshift_gcp/templates/remove.j2.sh b/roles/openshift_gcp/templates/remove.j2.sh
new file mode 100644
index 000000000..41ceab2b5
--- /dev/null
+++ b/roles/openshift_gcp/templates/remove.j2.sh
@@ -0,0 +1,156 @@
+#!/bin/bash
+
+set -euo pipefail
+
+function teardown_cmd() {
+ a=( $@ )
+ local name=$1
+ a=( "${a[@]:1}" )
+ local flag=0
+ local found=
+ for i in ${a[@]}; do
+ if [[ "$i" == "--"* ]]; then
+ found=true
+ break
+ fi
+ flag=$((flag+1))
+ done
+ if [[ -z "${found}" ]]; then
+ flag=$((flag+1))
+ fi
+ if gcloud --project "{{ gce_project_id }}" ${a[@]::$flag} describe "${name}" ${a[@]:$flag} &>/dev/null; then
+ gcloud --project "{{ gce_project_id }}" ${a[@]::$flag} delete -q "${name}" ${a[@]:$flag}
+ fi
+}
+
+function teardown() {
+ for i in `seq 1 20`; do
+ if teardown_cmd $@; then
+ break
+ fi
+ sleep 0.5
+ done
+}
+
+# Preemptively spin down the instances
+{% for node_group in provision_gce_node_groups %}
+# scale down {{ node_group.name }}
+(
+ # performs a delete and scale down as one operation to ensure maximum parallelism
+ if ! instances=$( gcloud --project "{{ gce_project_id }}" compute instance-groups managed list-instances "{{ provision_prefix }}ig-{{ node_group.suffix }}" --zone "{{ gce_zone_name }}" --format='value[terminator=","](instance)' ); then
+ exit 0
+ fi
+ instances="${instances%?}"
+ if [[ -z "${instances}" ]]; then
+ echo "warning: No instances in {{ node_group.name }}" 1>&2
+ exit 0
+ fi
+ if ! gcloud --project "{{ gce_project_id }}" compute instance-groups managed delete-instances "{{ provision_prefix }}ig-{{ node_group.suffix }}" --zone "{{ gce_zone_name }}" --instances "${instances}"; then
+ echo "warning: Unable to scale down the node group {{ node_group.name }}" 1>&2
+ exit 0
+ fi
+) &
+{% endfor %}
+
+# Bucket for registry
+(
+if gsutil ls -p "{{ gce_project_id }}" "gs://{{ openshift_hosted_registry_storage_gcs_bucket }}" &>/dev/null; then
+ gsutil -m rm -r "gs://{{ openshift_hosted_registry_storage_gcs_bucket }}"
+fi
+) &
+
+# DNS
+(
+dns_zone="{{ dns_managed_zone | default(provision_prefix + 'managed-zone') }}"
+if gcloud --project "{{ gce_project_id }}" dns managed-zones describe "${dns_zone}" &>/dev/null; then
+ # Retry DNS changes until they succeed since this may be a shared resource
+ while true; do
+ dns="${TMPDIR:-/tmp}/dns.yaml"
+ rm -f "${dns}"
+
+ # export all dns records that match into a zone format, and turn each line into a set of args for
+ # record-sets transaction.
+ gcloud dns record-sets export --project "{{ gce_project_id }}" -z "${dns_zone}" --zone-file-format "${dns}"
+ if grep -F -e '{{ openshift_master_cluster_hostname }}' -e '{{ openshift_master_cluster_public_hostname }}' -e '{{ wildcard_zone }}' "${dns}" | \
+ awk '{ print "--name", $1, "--ttl", $2, "--type", $4, $5; }' > "${dns}.input"
+ then
+ rm -f "${dns}"
+ gcloud --project "{{ gce_project_id }}" dns record-sets transaction --transaction-file=$dns start -z "${dns_zone}"
+ cat "${dns}.input" | xargs -L1 gcloud --project "{{ gce_project_id }}" dns record-sets transaction --transaction-file="${dns}" remove -z "${dns_zone}"
+
+ # Commit all DNS changes, retrying if preconditions are not met
+ if ! out="$( gcloud --project "{{ gce_project_id }}" dns record-sets transaction --transaction-file=$dns execute -z "${dns_zone}" 2>&1 )"; then
+ rc=$?
+ if [[ "${out}" == *"HTTPError 412: Precondition not met"* ]]; then
+ continue
+ fi
+ exit $rc
+ fi
+ fi
+ rm "${dns}.input"
+ break
+ done
+fi
+) &
+
+(
+# Router network rules
+teardown "{{ provision_prefix }}router-network-lb-rule" compute forwarding-rules --region "{{ gce_region_name }}"
+teardown "{{ provision_prefix }}router-network-lb-pool" compute target-pools --region "{{ gce_region_name }}"
+teardown "{{ provision_prefix }}router-network-lb-health-check" compute http-health-checks
+teardown "{{ provision_prefix }}router-network-lb-ip" compute addresses --region "{{ gce_region_name }}"
+
+# Internal master network rules
+teardown "{{ provision_prefix }}master-network-lb-rule" compute forwarding-rules --region "{{ gce_region_name }}"
+teardown "{{ provision_prefix }}master-network-lb-pool" compute target-pools --region "{{ gce_region_name }}"
+teardown "{{ provision_prefix }}master-network-lb-health-check" compute http-health-checks
+teardown "{{ provision_prefix }}master-network-lb-ip" compute addresses --region "{{ gce_region_name }}"
+) &
+
+(
+# Master SSL network rules
+teardown "{{ provision_prefix }}master-ssl-lb-rule" compute forwarding-rules --global
+teardown "{{ provision_prefix }}master-ssl-lb-target" compute target-tcp-proxies
+teardown "{{ provision_prefix }}master-ssl-lb-ip" compute addresses --global
+teardown "{{ provision_prefix }}master-ssl-lb-backend" compute backend-services --global
+teardown "{{ provision_prefix }}master-ssl-lb-health-check" compute health-checks
+) &
+
+#Firewall rules
+#['name']='parameters for "gcloud compute firewall-rules create"'
+#For all possible parameters see: gcloud compute firewall-rules create --help
+declare -A FW_RULES=(
+ ['icmp']=""
+ ['ssh-external']=""
+ ['ssh-internal']=""
+ ['master-internal']=""
+ ['master-external']=""
+ ['node-internal']=""
+ ['infra-node-internal']=""
+ ['infra-node-external']=""
+)
+for rule in "${!FW_RULES[@]}"; do
+ ( if gcloud --project "{{ gce_project_id }}" compute firewall-rules describe "{{ provision_prefix }}$rule" &>/dev/null; then
+ # retry a few times because this call can be flaky
+ for i in `seq 1 3`; do
+ if gcloud -q --project "{{ gce_project_id }}" compute firewall-rules delete "{{ provision_prefix }}$rule"; then
+ break
+ fi
+ done
+ fi ) &
+done
+
+for i in `jobs -p`; do wait $i; done
+
+{% for node_group in provision_gce_node_groups %}
+# teardown {{ node_group.name }} - any load balancers referencing these groups must be removed
+(
+ teardown "{{ provision_prefix }}ig-{{ node_group.suffix }}" compute instance-groups managed --zone "{{ gce_zone_name }}"
+ teardown "{{ provision_prefix }}instance-template-{{ node_group.name }}" compute instance-templates
+) &
+{% endfor %}
+
+for i in `jobs -p`; do wait $i; done
+
+# Network
+teardown "{{ gce_network_name }}" compute networks
diff --git a/roles/openshift_gcp_image_prep/files/partition.conf b/roles/openshift_gcp_image_prep/files/partition.conf
new file mode 100644
index 000000000..b87e5e0b6
--- /dev/null
+++ b/roles/openshift_gcp_image_prep/files/partition.conf
@@ -0,0 +1,3 @@
+[Service]
+ExecStartPost=-/usr/bin/growpart /dev/sda 1
+ExecStartPost=-/sbin/xfs_growfs /
diff --git a/roles/openshift_gcp_image_prep/tasks/main.yaml b/roles/openshift_gcp_image_prep/tasks/main.yaml
new file mode 100644
index 000000000..fee5ab618
--- /dev/null
+++ b/roles/openshift_gcp_image_prep/tasks/main.yaml
@@ -0,0 +1,18 @@
+---
+# GCE instances are starting with xfs AND barrier=1, which is only for extfs.
+- name: Remove barrier=1 from XFS fstab entries
+ lineinfile:
+ path: /etc/fstab
+ regexp: '^(.+)xfs(.+?),?barrier=1,?(.*?)$'
+ line: '\1xfs\2 \4'
+ backrefs: yes
+
+- name: Ensure the root filesystem has XFS group quota turned on
+ lineinfile:
+ path: /boot/grub2/grub.cfg
+ regexp: '^(.*)linux16 (.*)$'
+ line: '\1linux16 \2 rootflags=gquota'
+ backrefs: yes
+
+- name: Ensure the root partition grows on startup
+ copy: src=partition.conf dest=/etc/systemd/system/google-instance-setup.service.d/
diff --git a/roles/openshift_health_checker/action_plugins/openshift_health_check.py b/roles/openshift_health_checker/action_plugins/openshift_health_check.py
index 8d35db6b5..326176273 100644
--- a/roles/openshift_health_checker/action_plugins/openshift_health_check.py
+++ b/roles/openshift_health_checker/action_plugins/openshift_health_check.py
@@ -3,7 +3,10 @@ Ansible action plugin to execute health checks in OpenShift clusters.
"""
import sys
import os
+import base64
import traceback
+import errno
+import json
from collections import defaultdict
from ansible.plugins.action import ActionBase
@@ -38,8 +41,13 @@ class ActionModule(ActionBase):
# storing the information we need in the result.
result['playbook_context'] = task_vars.get('r_openshift_health_checker_playbook_context')
+ # if the user wants to write check results to files, they provide this directory:
+ output_dir = task_vars.get("openshift_checks_output_dir")
+ if output_dir:
+ output_dir = os.path.join(output_dir, task_vars["ansible_host"])
+
try:
- known_checks = self.load_known_checks(tmp, task_vars)
+ known_checks = self.load_known_checks(tmp, task_vars, output_dir)
args = self._task.args
requested_checks = normalize(args.get('checks', []))
@@ -65,21 +73,20 @@ class ActionModule(ActionBase):
for name in resolved_checks:
display.banner("CHECK [{} : {}]".format(name, task_vars["ansible_host"]))
- check = known_checks[name]
- check_results[name] = run_check(name, check, user_disabled_checks)
- if check.changed:
- check_results[name]["changed"] = True
+ check_results[name] = run_check(name, known_checks[name], user_disabled_checks, output_dir)
result["changed"] = any(r.get("changed") for r in check_results.values())
if any(r.get("failed") for r in check_results.values()):
result["failed"] = True
result["msg"] = "One or more checks failed"
+ write_result_to_output_dir(output_dir, result)
return result
- def load_known_checks(self, tmp, task_vars):
+ def load_known_checks(self, tmp, task_vars, output_dir=None):
"""Find all existing checks and return a mapping of names to instances."""
load_checks()
+ want_full_results = bool(output_dir)
known_checks = {}
for cls in OpenShiftCheck.subclasses():
@@ -90,7 +97,12 @@ class ActionModule(ActionBase):
"duplicate check name '{}' in: '{}' and '{}'"
"".format(name, full_class_name(cls), full_class_name(other_cls))
)
- known_checks[name] = cls(execute_module=self._execute_module, tmp=tmp, task_vars=task_vars)
+ known_checks[name] = cls(
+ execute_module=self._execute_module,
+ tmp=tmp,
+ task_vars=task_vars,
+ want_full_results=want_full_results
+ )
return known_checks
@@ -185,9 +197,11 @@ def normalize(checks):
return [name.strip() for name in checks if name.strip()]
-def run_check(name, check, user_disabled_checks):
+def run_check(name, check, user_disabled_checks, output_dir=None):
"""Run a single check if enabled and return a result dict."""
- if name in user_disabled_checks:
+
+ # determine if we're going to run the check (not inactive or disabled)
+ if name in user_disabled_checks or '*' in user_disabled_checks:
return dict(skipped=True, skipped_reason="Disabled by user request")
# pylint: disable=broad-except; capturing exceptions broadly is intentional,
@@ -201,12 +215,134 @@ def run_check(name, check, user_disabled_checks):
if not is_active:
return dict(skipped=True, skipped_reason="Not active for this host")
+ # run the check
+ result = {}
try:
- return check.run()
+ result = check.run()
except OpenShiftCheckException as exc:
- return dict(failed=True, msg=str(exc))
+ check.register_failure(exc)
+ except Exception as exc:
+ check.register_failure("\n".join([str(exc), traceback.format_exc()]))
+
+ # process the check state; compose the result hash, write files as needed
+ if check.changed:
+ result["changed"] = True
+ if check.failures or result.get("failed"):
+ if "msg" in result: # failure result has msg; combine with any registered failures
+ check.register_failure(result.get("msg"))
+ result["failures"] = [(fail.name, str(fail)) for fail in check.failures]
+ result["failed"] = True
+ result["msg"] = "\n".join(str(fail) for fail in check.failures)
+ write_to_output_file(output_dir, name + ".failures.json", result["failures"])
+ if check.logs:
+ write_to_output_file(output_dir, name + ".log.json", check.logs)
+ if check.files_to_save:
+ write_files_to_save(output_dir, check)
+
+ return result
+
+
+def prepare_output_dir(dirname):
+ """Create the directory, including parents. Return bool for success/failure."""
+ try:
+ os.makedirs(dirname)
+ return True
+ except OSError as exc:
+ # trying to create existing dir leads to error;
+ # that error is fine, but for any other, assume the dir is not there
+ return exc.errno == errno.EEXIST
+
+
+def copy_remote_file_to_dir(check, file_to_save, output_dir, fname):
+ """Copy file from remote host to local file in output_dir, if given."""
+ if not output_dir or not prepare_output_dir(output_dir):
+ return
+ local_file = os.path.join(output_dir, fname)
+
+ # pylint: disable=broad-except; do not need to do anything about failure to write dir/file
+ # and do not want exceptions to break anything.
+ try:
+ # NOTE: it would have been nice to copy the file directly without loading it into
+ # memory, but there does not seem to be a good way to do this via ansible.
+ result = check.execute_module("slurp", dict(src=file_to_save), register=False)
+ if result.get("failed"):
+ display.warning("Could not retrieve file {}: {}".format(file_to_save, result.get("msg")))
+ return
+
+ content = result["content"]
+ if result.get("encoding") == "base64":
+ content = base64.b64decode(content)
+ with open(local_file, "wb") as outfile:
+ outfile.write(content)
+ except Exception as exc:
+ display.warning("Failed writing remote {} to local {}: {}".format(file_to_save, local_file, exc))
+ return
+
+
+def _no_fail(obj):
+ # pylint: disable=broad-except; do not want serialization to fail for any reason
+ try:
+ return str(obj)
+ except Exception:
+ return "[not serializable]"
+
+
+def write_to_output_file(output_dir, filename, data):
+ """If output_dir provided, write data to file. Serialize as JSON if data is not a string."""
+
+ if not output_dir or not prepare_output_dir(output_dir):
+ return
+ filename = os.path.join(output_dir, filename)
+ try:
+ with open(filename, 'w') as outfile:
+ if isinstance(data, string_types):
+ outfile.write(data)
+ else:
+ json.dump(data, outfile, sort_keys=True, indent=4, default=_no_fail)
+ # pylint: disable=broad-except; do not want serialization/write to break for any reason
+ except Exception as exc:
+ display.warning("Could not write output file {}: {}".format(filename, exc))
+
+
+def write_result_to_output_dir(output_dir, result):
+ """If output_dir provided, write the result as json to result.json.
+
+ Success/failure of the write is recorded as "output_files" in the result hash afterward.
+ Otherwise this is much like write_to_output_file.
+ """
+
+ if not output_dir:
+ return
+ if not prepare_output_dir(output_dir):
+ result["output_files"] = "Error creating output directory " + output_dir
+ return
+
+ filename = os.path.join(output_dir, "result.json")
+ try:
+ with open(filename, 'w') as outfile:
+ json.dump(result, outfile, sort_keys=True, indent=4, default=_no_fail)
+ result["output_files"] = "Check results for this host written to " + filename
+ # pylint: disable=broad-except; do not want serialization/write to break for any reason
except Exception as exc:
- return dict(failed=True, msg=str(exc), exception=traceback.format_exc())
+ result["output_files"] = "Error writing check results to {}:\n{}".format(filename, exc)
+
+
+def write_files_to_save(output_dir, check):
+ """Write files to check subdir in output dir."""
+ if not output_dir:
+ return
+ output_dir = os.path.join(output_dir, check.name)
+ seen_file = defaultdict(lambda: 0)
+ for file_to_save in check.files_to_save:
+ fname = file_to_save.filename
+ while seen_file[fname]: # just to be sure we never re-write a file, append numbers as needed
+ seen_file[fname] += 1
+ fname = "{}.{}".format(fname, seen_file[fname])
+ seen_file[fname] += 1
+ if file_to_save.remote_filename:
+ copy_remote_file_to_dir(check, file_to_save.remote_filename, output_dir, fname)
+ else:
+ write_to_output_file(output_dir, fname, file_to_save.contents)
def full_class_name(cls):
diff --git a/roles/openshift_health_checker/callback_plugins/zz_failure_summary.py b/roles/openshift_health_checker/callback_plugins/zz_failure_summary.py
index 349655966..dcaf87eca 100644
--- a/roles/openshift_health_checker/callback_plugins/zz_failure_summary.py
+++ b/roles/openshift_health_checker/callback_plugins/zz_failure_summary.py
@@ -10,6 +10,7 @@ import traceback
from ansible.plugins.callback import CallbackBase
from ansible import constants as C
from ansible.utils.color import stringc
+from ansible.module_utils.six import string_types
FAILED_NO_MSG = u'Failed without returning a message.'
@@ -140,11 +141,19 @@ def deduplicate_failures(failures):
Returns a new list of failures such that identical failures from different
hosts are grouped together in a single entry. The relative order of failures
is preserved.
+
+ If failures is unhashable, the original list of failures is returned.
"""
groups = defaultdict(list)
for failure in failures:
group_key = tuple(sorted((key, value) for key, value in failure.items() if key != 'host'))
- groups[group_key].append(failure)
+ try:
+ groups[group_key].append(failure)
+ except TypeError:
+ # abort and return original list of failures when failures has an
+ # unhashable type.
+ return failures
+
result = []
for failure in failures:
group_key = tuple(sorted((key, value) for key, value in failure.items() if key != 'host'))
@@ -159,7 +168,10 @@ def format_failure(failure):
"""Return a list of pretty-formatted text entries describing a failure, including
relevant information about it. Expect that the list of text entries will be joined
by a newline separator when output to the user."""
- host = u', '.join(failure['host'])
+ if isinstance(failure['host'], string_types):
+ host = failure['host']
+ else:
+ host = u', '.join(failure['host'])
play = failure['play']
task = failure['task']
msg = failure['msg']
diff --git a/roles/openshift_health_checker/library/aos_version.py b/roles/openshift_health_checker/library/aos_version.py
index c8769b511..db3c0b654 100644
--- a/roles/openshift_health_checker/library/aos_version.py
+++ b/roles/openshift_health_checker/library/aos_version.py
@@ -26,15 +26,13 @@ from ansible.module_utils.six import string_types
YUM_IMPORT_EXCEPTION = None
DNF_IMPORT_EXCEPTION = None
-PKG_MGR = None
try:
import yum # pylint: disable=import-error
- PKG_MGR = "yum"
except ImportError as err:
YUM_IMPORT_EXCEPTION = err
+
try:
import dnf # pylint: disable=import-error
- PKG_MGR = "dnf"
except ImportError as err:
DNF_IMPORT_EXCEPTION = err
@@ -51,14 +49,19 @@ def main():
module = AnsibleModule(
argument_spec=dict(
package_list=dict(type="list", required=True),
+ package_mgr=dict(type="str", required=True),
),
supports_check_mode=True
)
- if YUM_IMPORT_EXCEPTION and DNF_IMPORT_EXCEPTION:
+ # determine the package manager to use
+ package_mgr = module.params['package_mgr']
+ if package_mgr not in ('yum', 'dnf'):
+ module.fail_json(msg="package_mgr must be one of: yum, dnf")
+ pkg_mgr_exception = dict(yum=YUM_IMPORT_EXCEPTION, dnf=DNF_IMPORT_EXCEPTION)[package_mgr]
+ if pkg_mgr_exception:
module.fail_json(
- msg="aos_version module could not import yum or dnf: %s %s" %
- (YUM_IMPORT_EXCEPTION, DNF_IMPORT_EXCEPTION)
+ msg="aos_version module could not import {}: {}".format(package_mgr, pkg_mgr_exception)
)
# determine the packages we will look for
@@ -78,7 +81,7 @@ def main():
# get the list of packages available and complain if anything is wrong
try:
- pkgs = _retrieve_available_packages(expected_pkg_names)
+ pkgs = _retrieve_available_packages(package_mgr, expected_pkg_names)
if versioned_pkgs:
_check_precise_version_found(pkgs, _to_dict(versioned_pkgs))
_check_higher_version_found(pkgs, _to_dict(versioned_pkgs))
@@ -93,7 +96,7 @@ def _to_dict(pkg_list):
return {pkg["name"]: pkg for pkg in pkg_list}
-def _retrieve_available_packages(expected_pkgs):
+def _retrieve_available_packages(pkg_mgr, expected_pkgs):
# The openshift excluder prevents unintended updates to openshift
# packages by setting yum excludes on those packages. See:
# https://wiki.centos.org/SpecialInterestGroup/PaaS/OpenShift-Origin-Control-Updates
@@ -103,14 +106,15 @@ def _retrieve_available_packages(expected_pkgs):
# be excluded. So, for our purposes here, disable excludes to see
# what will really be available during an install or upgrade.
- if PKG_MGR == "yum":
+ if pkg_mgr == "yum":
# search for package versions available for openshift pkgs
yb = yum.YumBase() # pylint: disable=invalid-name
yb.conf.disable_excludes = ['all']
try:
- pkgs = yb.pkgSack.returnPackages(patterns=expected_pkgs)
+ pkgs = yb.rpmdb.returnPackages(patterns=expected_pkgs)
+ pkgs += yb.pkgSack.returnPackages(patterns=expected_pkgs)
except yum.Errors.PackageSackError as excinfo:
# you only hit this if *none* of the packages are available
raise AosVersionException('\n'.join([
@@ -118,7 +122,7 @@ def _retrieve_available_packages(expected_pkgs):
'Check your subscription and repo settings.',
str(excinfo),
]))
- elif PKG_MGR == "dnf":
+ elif pkg_mgr == "dnf":
dbase = dnf.Base() # pyling: disable=invalid-name
dbase.conf.disable_excludes = ['all']
@@ -127,8 +131,11 @@ def _retrieve_available_packages(expected_pkgs):
dquery = dbase.sack.query()
aquery = dquery.available()
+ iquery = dquery.installed()
- pkgs = list(aquery.filter(name=expected_pkgs))
+ available_pkgs = list(aquery.filter(name=expected_pkgs))
+ installed_pkgs = list(iquery.filter(name=expected_pkgs))
+ pkgs = available_pkgs + installed_pkgs
if not pkgs:
# pkgs list is empty, raise because no expected packages found
diff --git a/roles/openshift_health_checker/openshift_checks/__init__.py b/roles/openshift_health_checker/openshift_checks/__init__.py
index 02ee1d0f9..28cb53cc5 100644
--- a/roles/openshift_health_checker/openshift_checks/__init__.py
+++ b/roles/openshift_health_checker/openshift_checks/__init__.py
@@ -2,8 +2,11 @@
Health checks for OpenShift clusters.
"""
+import json
import operator
import os
+import time
+import collections
from abc import ABCMeta, abstractmethod, abstractproperty
from importlib import import_module
@@ -27,7 +30,7 @@ class OpenShiftCheckException(Exception):
class OpenShiftCheckExceptionList(OpenShiftCheckException):
- """A container for multiple logging errors that may be detected in one check."""
+ """A container for multiple errors that may be detected in one check."""
def __init__(self, errors):
self.errors = errors
super(OpenShiftCheckExceptionList, self).__init__(
@@ -40,26 +43,53 @@ class OpenShiftCheckExceptionList(OpenShiftCheckException):
return self.errors[index]
+FileToSave = collections.namedtuple("FileToSave", "filename contents remote_filename")
+
+
+# pylint: disable=too-many-instance-attributes; all represent significantly different state.
+# Arguably they could be separated into two hashes, one for storing parameters, and one for
+# storing result state; but that smells more like clutter than clarity.
@six.add_metaclass(ABCMeta)
class OpenShiftCheck(object):
- """
- A base class for defining checks for an OpenShift cluster environment.
+ """A base class for defining checks for an OpenShift cluster environment.
- Expect optional params: method execute_module, dict task_vars, and string tmp.
+ Optional init params: method execute_module, dict task_vars, and string tmp
execute_module is expected to have a signature compatible with _execute_module
from ansible plugins/action/__init__.py, e.g.:
def execute_module(module_name=None, module_args=None, tmp=None, task_vars=None, *args):
This is stored so that it can be invoked in subclasses via check.execute_module("name", args)
which provides the check's stored task_vars and tmp.
+
+ Optional init param: want_full_results
+ If the check can gather logs, tarballs, etc., do so when True; but no need to spend
+ the time if they're not wanted (won't be written to output directory).
"""
- def __init__(self, execute_module=None, task_vars=None, tmp=None):
+ def __init__(self, execute_module=None, task_vars=None, tmp=None, want_full_results=False):
+ # store a method for executing ansible modules from the check
self._execute_module = execute_module
+ # the task variables and tmpdir passed into the health checker task
self.task_vars = task_vars or {}
self.tmp = tmp
+ # a boolean for disabling the gathering of results (files, computations) that won't
+ # actually be recorded/used
+ self.want_full_results = want_full_results
+
+ # mainly for testing purposes; see execute_module_with_retries
+ self._module_retries = 3
+ self._module_retry_interval = 5 # seconds
+ # state to be recorded for inspection after the check runs:
+ #
# set to True when the check changes the host, for accurate total "changed" count
self.changed = False
+ # list of OpenShiftCheckException for check to report (alternative to returning a failed result)
+ self.failures = []
+ # list of FileToSave - files the check specifies to be written locally if so configured
+ self.files_to_save = []
+ # log messages for the check - tuples of (description, msg) where msg is serializable.
+ # These are intended to be a sequential record of what the check observed and determined.
+ self.logs = []
@abstractproperty
def name(self):
@@ -82,7 +112,13 @@ class OpenShiftCheck(object):
@abstractmethod
def run(self):
- """Executes a check, normally implemented as a module."""
+ """Executes a check against a host and returns a result hash similar to Ansible modules.
+
+ Actually the direction ahead is to record state in the attributes and
+ not bother building a result hash. Instead, return an empty hash and let
+ the action plugin fill it in. Or raise an OpenShiftCheckException.
+ Returning a hash may become deprecated if it does not prove necessary.
+ """
return {}
@classmethod
@@ -94,7 +130,43 @@ class OpenShiftCheck(object):
for subclass in subclass.subclasses():
yield subclass
- def execute_module(self, module_name=None, module_args=None):
+ def register_failure(self, error):
+ """Record in the check that a failure occurred.
+
+ Recorded failures are merged into the result hash for now. They are also saved to output directory
+ (if provided) <check>.failures.json and registered as a log entry for context <check>.log.json.
+ """
+ # It should be an exception; make it one if not
+ if not isinstance(error, OpenShiftCheckException):
+ error = OpenShiftCheckException(str(error))
+ self.failures.append(error)
+ # duplicate it in the logs so it can be seen in the context of any
+ # information that led to the failure
+ self.register_log("failure: " + error.name, str(error))
+
+ def register_log(self, context, msg):
+ """Record an entry for the check log.
+
+ Notes are intended to serve as context of the whole sequence of what the check observed.
+ They are be saved as an ordered list in a local check log file.
+ They are not to included in the result or in the ansible log; it's just for the record.
+ """
+ self.logs.append([context, msg])
+
+ def register_file(self, filename, contents=None, remote_filename=""):
+ """Record a file that a check makes available to be saved individually to output directory.
+
+ Either file contents should be passed in, or a file to be copied from the remote host
+ should be specified. Contents that are not a string are to be serialized as JSON.
+
+ NOTE: When copying a file from remote host, it is slurped into memory as base64, meaning
+ you should avoid using this on huge files (more than say 10M).
+ """
+ if contents is None and not remote_filename:
+ raise OpenShiftCheckException("File data/source not specified; this is a bug in the check.")
+ self.files_to_save.append(FileToSave(filename, contents, remote_filename))
+
+ def execute_module(self, module_name=None, module_args=None, save_as_name=None, register=True):
"""Invoke an Ansible module from a check.
Invoke stored _execute_module, normally copied from the action
@@ -106,6 +178,12 @@ class OpenShiftCheck(object):
Ansible version).
So e.g. check.execute_module("foo", dict(arg1=...))
+
+ save_as_name specifies a file name for saving the result to an output directory,
+ if needed, and is intended to uniquely identify the result of invoking execute_module.
+ If not provided, the module name will be used.
+ If register is set False, then the result won't be registered in logs or files to save.
+
Return: result hash from module execution.
"""
if self._execute_module is None:
@@ -113,7 +191,33 @@ class OpenShiftCheck(object):
self.__class__.__name__ +
" invoked execute_module without providing the method at initialization."
)
- return self._execute_module(module_name, module_args, self.tmp, self.task_vars)
+ result = self._execute_module(module_name, module_args, self.tmp, self.task_vars)
+ if result.get("changed"):
+ self.changed = True
+ for output in ["result", "stdout"]:
+ # output is often JSON; attempt to decode
+ try:
+ result[output + "_json"] = json.loads(result[output])
+ except (KeyError, ValueError):
+ pass
+
+ if register:
+ self.register_log("execute_module: " + module_name, result)
+ self.register_file(save_as_name or module_name + ".json", result)
+ return result
+
+ def execute_module_with_retries(self, module_name, module_args):
+ """Run execute_module and retry on failure."""
+ result = {}
+ tries = 0
+ while True:
+ res = self.execute_module(module_name, module_args)
+ if tries > self._module_retries or not res.get("failed"):
+ result.update(res)
+ return result
+ result["last_failed"] = res
+ tries += 1
+ time.sleep(self._module_retry_interval)
def get_var(self, *keys, **kwargs):
"""Get deeply nested values from task_vars.
@@ -171,8 +275,12 @@ class OpenShiftCheck(object):
'There is a bug in this check. While trying to convert variable \n'
' "{var}={value}"\n'
'the given converter cannot be used or failed unexpectedly:\n'
- '{error}'.format(var=".".join(keys), value=value, error=error)
- )
+ '{type}: {error}'.format(
+ var=".".join(keys),
+ value=value,
+ type=error.__class__.__name__,
+ error=error
+ ))
@staticmethod
def get_major_minor_version(openshift_image_tag):
@@ -214,7 +322,9 @@ class OpenShiftCheck(object):
mount_point = os.path.dirname(mount_point)
try:
- return mount_for_path[mount_point]
+ mount = mount_for_path[mount_point]
+ self.register_log("mount point for " + path, mount)
+ return mount
except KeyError:
known_mounts = ', '.join('"{}"'.format(mount) for mount in sorted(mount_for_path))
raise OpenShiftCheckException(
@@ -242,7 +352,7 @@ def load_checks(path=None, subpkg=""):
modules = modules + load_checks(os.path.join(path, name), subpkg + "." + name)
continue
- if name.endswith(".py") and not name.startswith(".") and name not in LOADER_EXCLUDES:
+ if name.endswith(".py") and name not in LOADER_EXCLUDES:
modules.append(import_module(__package__ + subpkg + "." + name[:-3]))
return modules
diff --git a/roles/openshift_health_checker/openshift_checks/disk_availability.py b/roles/openshift_health_checker/openshift_checks/disk_availability.py
index f302fd14b..cdf56e959 100644
--- a/roles/openshift_health_checker/openshift_checks/disk_availability.py
+++ b/roles/openshift_health_checker/openshift_checks/disk_availability.py
@@ -70,6 +70,10 @@ class DiskAvailability(OpenShiftCheck):
# If it is not a number, then it should be a nested dict.
pass
+ self.register_log("recommended thresholds", self.recommended_disk_space_bytes)
+ if user_config:
+ self.register_log("user-configured thresholds", user_config)
+
# TODO: as suggested in
# https://github.com/openshift/openshift-ansible/pull/4436#discussion_r122180021,
# maybe we could support checking disk availability in paths that are
@@ -113,10 +117,7 @@ class DiskAvailability(OpenShiftCheck):
'in your Ansible inventory, and lower the recommended disk space availability\n'
'if necessary for this upgrade.').format(config_bytes)
- return {
- 'failed': True,
- 'msg': msg,
- }
+ self.register_failure(msg)
return {}
diff --git a/roles/openshift_health_checker/openshift_checks/docker_image_availability.py b/roles/openshift_health_checker/openshift_checks/docker_image_availability.py
index 857a80c74..9c35f0f92 100644
--- a/roles/openshift_health_checker/openshift_checks/docker_image_availability.py
+++ b/roles/openshift_health_checker/openshift_checks/docker_image_availability.py
@@ -32,6 +32,12 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck):
# we use python-docker-py to check local docker for images, and skopeo
# to look for images available remotely without waiting to pull them.
dependencies = ["python-docker-py", "skopeo"]
+ skopeo_img_check_command = "timeout 10 skopeo inspect --tls-verify=false docker://{registry}/{image}"
+
+ def __init__(self, *args, **kwargs):
+ super(DockerImageAvailability, self).__init__(*args, **kwargs)
+ # record whether we could reach a registry or not (and remember results)
+ self.reachable_registries = {}
def is_active(self):
"""Skip hosts with unsupported deployment types."""
@@ -63,13 +69,21 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck):
unavailable_images = set(missing_images) - set(available_images)
if unavailable_images:
- return {
- "failed": True,
- "msg": (
- "One or more required Docker images are not available:\n {}\n"
- "Configured registries: {}"
- ).format(",\n ".join(sorted(unavailable_images)), ", ".join(registries)),
- }
+ registries = [
+ reg if self.reachable_registries.get(reg, True) else reg + " (unreachable)"
+ for reg in registries
+ ]
+ msg = (
+ "One or more required Docker images are not available:\n {}\n"
+ "Configured registries: {}\n"
+ "Checked by: {}"
+ ).format(
+ ",\n ".join(sorted(unavailable_images)),
+ ", ".join(registries),
+ self.skopeo_img_check_command
+ )
+
+ return dict(failed=True, msg=msg)
return {}
@@ -125,31 +139,31 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck):
def local_images(self, images):
"""Filter a list of images and return those available locally."""
- return [
- image for image in images
- if self.is_image_local(image)
- ]
+ registries = self.known_docker_registries()
+ found_images = []
+ for image in images:
+ # docker could have the image name as-is or prefixed with any registry
+ imglist = [image] + [reg + "/" + image for reg in registries]
+ if self.is_image_local(imglist):
+ found_images.append(image)
+ return found_images
def is_image_local(self, image):
"""Check if image is already in local docker index."""
result = self.execute_module("docker_image_facts", {"name": image})
- if result.get("failed", False):
- return False
-
- return bool(result.get("images", []))
+ return bool(result.get("images")) and not result.get("failed")
def known_docker_registries(self):
"""Build a list of docker registries available according to inventory vars."""
- docker_facts = self.get_var("openshift", "docker")
- regs = set(docker_facts["additional_registries"])
+ regs = list(self.get_var("openshift.docker.additional_registries", default=[]))
deployment_type = self.get_var("openshift_deployment_type")
- if deployment_type == "origin":
- regs.update(["docker.io"])
- elif "enterprise" in deployment_type:
- regs.update(["registry.access.redhat.com"])
+ if deployment_type == "origin" and "docker.io" not in regs:
+ regs.append("docker.io")
+ elif "enterprise" in deployment_type and "registry.access.redhat.com" not in regs:
+ regs.append("registry.access.redhat.com")
- return list(regs)
+ return regs
def available_images(self, images, default_registries):
"""Search remotely for images. Returns: list of images found."""
@@ -162,18 +176,35 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck):
"""Use Skopeo to determine if required image exists in known registry(s)."""
registries = default_registries
- # if image already includes a registry, only use that
+ # If image already includes a registry, only use that.
+ # NOTE: This logic would incorrectly identify images that do not use a namespace, e.g.
+ # registry.access.redhat.com/rhel7 as if the registry were a namespace.
+ # It's not clear that there's any way to distinguish them, but fortunately
+ # the current set of images all look like [registry/]namespace/name[:version].
if image.count("/") > 1:
registry, image = image.split("/", 1)
registries = [registry]
for registry in registries:
- args = {
- "_raw_params": "timeout 10 skopeo inspect --tls-verify=false "
- "docker://{}/{}".format(registry, image)
- }
- result = self.execute_module("command", args)
+ if registry not in self.reachable_registries:
+ self.reachable_registries[registry] = self.connect_to_registry(registry)
+ if not self.reachable_registries[registry]:
+ continue
+
+ args = {"_raw_params": self.skopeo_img_check_command.format(registry=registry, image=image)}
+ result = self.execute_module_with_retries("command", args)
if result.get("rc", 0) == 0 and not result.get("failed"):
return True
+ if result.get("rc") == 124: # RC 124 == timed out; mark unreachable
+ self.reachable_registries[registry] = False
return False
+
+ def connect_to_registry(self, registry):
+ """Use ansible wait_for module to test connectivity from host to registry. Returns bool."""
+ # test a simple TCP connection
+ host, _, port = registry.partition(":")
+ port = port or 443
+ args = dict(host=host, port=port, state="started", timeout=30)
+ result = self.execute_module("wait_for", args)
+ return result.get("rc", 0) == 0 and not result.get("failed")
diff --git a/roles/openshift_health_checker/openshift_checks/logging/elasticsearch.py b/roles/openshift_health_checker/openshift_checks/logging/elasticsearch.py
index 7fc843fd7..986a01f38 100644
--- a/roles/openshift_health_checker/openshift_checks/logging/elasticsearch.py
+++ b/roles/openshift_health_checker/openshift_checks/logging/elasticsearch.py
@@ -72,7 +72,7 @@ class Elasticsearch(LoggingCheck):
for pod_name in pods_by_name.keys():
# Compare what each ES node reports as master and compare for split brain
get_master_cmd = self._build_es_curl_cmd(pod_name, "https://localhost:9200/_cat/master")
- master_name_str = self.exec_oc(get_master_cmd, [])
+ master_name_str = self.exec_oc(get_master_cmd, [], save_as_name="get_master_names.json")
master_names = (master_name_str or '').split(' ')
if len(master_names) > 1:
es_master_names.add(master_names[1])
@@ -113,7 +113,7 @@ class Elasticsearch(LoggingCheck):
# get ES cluster nodes
node_cmd = self._build_es_curl_cmd(list(pods_by_name.keys())[0], 'https://localhost:9200/_nodes')
- cluster_node_data = self.exec_oc(node_cmd, [])
+ cluster_node_data = self.exec_oc(node_cmd, [], save_as_name="get_es_nodes.json")
try:
cluster_nodes = json.loads(cluster_node_data)['nodes']
except (ValueError, KeyError):
@@ -142,7 +142,7 @@ class Elasticsearch(LoggingCheck):
errors = []
for pod_name in pods_by_name.keys():
cluster_health_cmd = self._build_es_curl_cmd(pod_name, 'https://localhost:9200/_cluster/health?pretty=true')
- cluster_health_data = self.exec_oc(cluster_health_cmd, [])
+ cluster_health_data = self.exec_oc(cluster_health_cmd, [], save_as_name='get_es_health.json')
try:
health_res = json.loads(cluster_health_data)
if not health_res or not health_res.get('status'):
@@ -171,7 +171,7 @@ class Elasticsearch(LoggingCheck):
errors = []
for pod_name in pods_by_name.keys():
df_cmd = 'exec {} -- df --output=ipcent,pcent /elasticsearch/persistent'.format(pod_name)
- disk_output = self.exec_oc(df_cmd, [])
+ disk_output = self.exec_oc(df_cmd, [], save_as_name='get_pv_diskspace.json')
lines = disk_output.splitlines()
# expecting one header looking like 'IUse% Use%' and one body line
body_re = r'\s*(\d+)%?\s+(\d+)%?\s*$'
diff --git a/roles/openshift_health_checker/openshift_checks/logging/logging.py b/roles/openshift_health_checker/openshift_checks/logging/logging.py
index ecd8adb64..06bdfebf6 100644
--- a/roles/openshift_health_checker/openshift_checks/logging/logging.py
+++ b/roles/openshift_health_checker/openshift_checks/logging/logging.py
@@ -78,7 +78,7 @@ class LoggingCheck(OpenShiftCheck):
"""Returns the namespace in which logging is configured to deploy."""
return self.get_var("openshift_logging_namespace", default="logging")
- def exec_oc(self, cmd_str="", extra_args=None):
+ def exec_oc(self, cmd_str="", extra_args=None, save_as_name=None):
"""
Execute an 'oc' command in the remote host.
Returns: output of command and namespace,
@@ -92,7 +92,7 @@ class LoggingCheck(OpenShiftCheck):
"extra_args": list(extra_args) if extra_args else [],
}
- result = self.execute_module("ocutil", args)
+ result = self.execute_module("ocutil", args, save_as_name=save_as_name)
if result.get("failed"):
if result['result'] == '[Errno 2] No such file or directory':
raise CouldNotUseOc(
diff --git a/roles/openshift_health_checker/openshift_checks/logging/logging_index_time.py b/roles/openshift_health_checker/openshift_checks/logging/logging_index_time.py
index d781db649..cacdf4213 100644
--- a/roles/openshift_health_checker/openshift_checks/logging/logging_index_time.py
+++ b/roles/openshift_health_checker/openshift_checks/logging/logging_index_time.py
@@ -104,7 +104,7 @@ class LoggingIndexTime(LoggingCheck):
"https://logging-es:9200/project.{namespace}*/_count?q=message:{uuid}"
)
exec_cmd = exec_cmd.format(pod_name=pod_name, namespace=self.logging_namespace(), uuid=uuid)
- result = self.exec_oc(exec_cmd, [])
+ result = self.exec_oc(exec_cmd, [], save_as_name="query_for_uuid.json")
try:
count = json.loads(result)["count"]
diff --git a/roles/openshift_health_checker/openshift_checks/mixins.py b/roles/openshift_health_checker/openshift_checks/mixins.py
index e9bae60a3..b90ebf6dd 100644
--- a/roles/openshift_health_checker/openshift_checks/mixins.py
+++ b/roles/openshift_health_checker/openshift_checks/mixins.py
@@ -36,7 +36,7 @@ class DockerHostMixin(object):
# NOTE: we would use the "package" module but it's actually an action plugin
# and it's not clear how to invoke one of those. This is about the same anyway:
- result = self.execute_module(
+ result = self.execute_module_with_retries(
self.get_var("ansible_pkg_mgr", default="yum"),
{"name": self.dependencies, "state": "present"},
)
@@ -49,5 +49,4 @@ class DockerHostMixin(object):
" {deps}\n{msg}"
).format(deps=',\n '.join(self.dependencies), msg=msg)
failed = result.get("failed", False) or result.get("rc", 0) != 0
- self.changed = result.get("changed", False)
return msg, failed
diff --git a/roles/openshift_health_checker/openshift_checks/package_availability.py b/roles/openshift_health_checker/openshift_checks/package_availability.py
index a86180b00..21355c2f0 100644
--- a/roles/openshift_health_checker/openshift_checks/package_availability.py
+++ b/roles/openshift_health_checker/openshift_checks/package_availability.py
@@ -26,7 +26,7 @@ class PackageAvailability(NotContainerizedMixin, OpenShiftCheck):
packages.update(self.node_packages(rpm_prefix))
args = {"packages": sorted(set(packages))}
- return self.execute_module("check_yum_update", args)
+ return self.execute_module_with_retries("check_yum_update", args)
@staticmethod
def master_packages(rpm_prefix):
diff --git a/roles/openshift_health_checker/openshift_checks/package_update.py b/roles/openshift_health_checker/openshift_checks/package_update.py
index 1e9aecbe0..8464e8a5e 100644
--- a/roles/openshift_health_checker/openshift_checks/package_update.py
+++ b/roles/openshift_health_checker/openshift_checks/package_update.py
@@ -11,4 +11,4 @@ class PackageUpdate(NotContainerizedMixin, OpenShiftCheck):
def run(self):
args = {"packages": []}
- return self.execute_module("check_yum_update", args)
+ return self.execute_module_with_retries("check_yum_update", args)
diff --git a/roles/openshift_health_checker/openshift_checks/package_version.py b/roles/openshift_health_checker/openshift_checks/package_version.py
index 8b780114f..d4aec3ed8 100644
--- a/roles/openshift_health_checker/openshift_checks/package_version.py
+++ b/roles/openshift_health_checker/openshift_checks/package_version.py
@@ -46,6 +46,7 @@ class PackageVersion(NotContainerizedMixin, OpenShiftCheck):
check_multi_minor_release = deployment_type in ['openshift-enterprise']
args = {
+ "package_mgr": self.get_var("ansible_pkg_mgr"),
"package_list": [
{
"name": "openvswitch",
@@ -75,7 +76,7 @@ class PackageVersion(NotContainerizedMixin, OpenShiftCheck):
],
}
- return self.execute_module("aos_version", args)
+ return self.execute_module_with_retries("aos_version", args)
def get_required_ovs_version(self):
"""Return the correct Open vSwitch version(s) for the current OpenShift version."""
diff --git a/roles/openshift_health_checker/test/action_plugin_test.py b/roles/openshift_health_checker/test/action_plugin_test.py
index c109ebd24..f14887303 100644
--- a/roles/openshift_health_checker/test/action_plugin_test.py
+++ b/roles/openshift_health_checker/test/action_plugin_test.py
@@ -3,10 +3,12 @@ import pytest
from ansible.playbook.play_context import PlayContext
from openshift_health_check import ActionModule, resolve_checks
-from openshift_checks import OpenShiftCheckException
+from openshift_health_check import copy_remote_file_to_dir, write_result_to_output_dir, write_to_output_file
+from openshift_checks import OpenShiftCheckException, FileToSave
-def fake_check(name='fake_check', tags=None, is_active=True, run_return=None, run_exception=None, changed=False):
+def fake_check(name='fake_check', tags=None, is_active=True, run_return=None, run_exception=None,
+ run_logs=None, run_files=None, changed=False, get_var_return=None):
"""Returns a new class that is compatible with OpenShiftCheck for testing."""
_name, _tags = name, tags
@@ -14,12 +16,16 @@ def fake_check(name='fake_check', tags=None, is_active=True, run_return=None, ru
class FakeCheck(object):
name = _name
tags = _tags or []
- changed = False
- def __init__(self, execute_module=None, task_vars=None, tmp=None):
- pass
+ def __init__(self, **_):
+ self.changed = False
+ self.failures = []
+ self.logs = run_logs or []
+ self.files_to_save = run_files or []
def is_active(self):
+ if isinstance(is_active, Exception):
+ raise is_active
return is_active
def run(self):
@@ -28,6 +34,13 @@ def fake_check(name='fake_check', tags=None, is_active=True, run_return=None, ru
raise run_exception
return run_return
+ def get_var(*args, **_):
+ return get_var_return
+
+ def register_failure(self, exc):
+ self.failures.append(OpenShiftCheckException(str(exc)))
+ return
+
return FakeCheck
@@ -98,23 +111,33 @@ def test_action_plugin_cannot_load_checks_with_the_same_name(plugin, task_vars,
assert failed(result, msg_has=['duplicate', 'duplicate_name', 'FakeCheck'])
-def test_action_plugin_skip_non_active_checks(plugin, task_vars, monkeypatch):
- checks = [fake_check(is_active=False)]
+@pytest.mark.parametrize('is_active, skipped_reason', [
+ (False, "Not active for this host"),
+ (Exception("borked"), "exception"),
+])
+def test_action_plugin_skip_non_active_checks(is_active, skipped_reason, plugin, task_vars, monkeypatch):
+ checks = [fake_check(is_active=is_active)]
monkeypatch.setattr('openshift_checks.OpenShiftCheck.subclasses', classmethod(lambda cls: checks))
result = plugin.run(tmp=None, task_vars=task_vars)
- assert result['checks']['fake_check'] == dict(skipped=True, skipped_reason="Not active for this host")
+ assert result['checks']['fake_check'].get('skipped')
+ assert skipped_reason in result['checks']['fake_check'].get('skipped_reason')
assert not failed(result)
assert not changed(result)
assert not skipped(result)
-def test_action_plugin_skip_disabled_checks(plugin, task_vars, monkeypatch):
+@pytest.mark.parametrize('to_disable', [
+ 'fake_check',
+ ['fake_check', 'spam'],
+ '*,spam,eggs',
+])
+def test_action_plugin_skip_disabled_checks(to_disable, plugin, task_vars, monkeypatch):
checks = [fake_check('fake_check', is_active=True)]
monkeypatch.setattr('openshift_checks.OpenShiftCheck.subclasses', classmethod(lambda cls: checks))
- task_vars['openshift_disable_check'] = 'fake_check'
+ task_vars['openshift_disable_check'] = to_disable
result = plugin.run(tmp=None, task_vars=task_vars)
assert result['checks']['fake_check'] == dict(skipped=True, skipped_reason="Disabled by user request")
@@ -123,10 +146,21 @@ def test_action_plugin_skip_disabled_checks(plugin, task_vars, monkeypatch):
assert not skipped(result)
+def test_action_plugin_run_list_checks(monkeypatch):
+ task = FakeTask('openshift_health_check', {'checks': []})
+ plugin = ActionModule(task, None, PlayContext(), None, None, None)
+ monkeypatch.setattr(plugin, 'load_known_checks', lambda *_: {})
+ result = plugin.run()
+
+ assert failed(result, msg_has="Available checks")
+ assert not changed(result)
+ assert not skipped(result)
+
+
def test_action_plugin_run_check_ok(plugin, task_vars, monkeypatch):
check_return_value = {'ok': 'test'}
- check_class = fake_check(run_return=check_return_value)
- monkeypatch.setattr(plugin, 'load_known_checks', lambda tmp, task_vars: {'fake_check': check_class()})
+ check_class = fake_check(run_return=check_return_value, run_files=[None])
+ monkeypatch.setattr(plugin, 'load_known_checks', lambda *_: {'fake_check': check_class()})
monkeypatch.setattr('openshift_health_check.resolve_checks', lambda *args: ['fake_check'])
result = plugin.run(tmp=None, task_vars=task_vars)
@@ -140,7 +174,7 @@ def test_action_plugin_run_check_ok(plugin, task_vars, monkeypatch):
def test_action_plugin_run_check_changed(plugin, task_vars, monkeypatch):
check_return_value = {'ok': 'test'}
check_class = fake_check(run_return=check_return_value, changed=True)
- monkeypatch.setattr(plugin, 'load_known_checks', lambda tmp, task_vars: {'fake_check': check_class()})
+ monkeypatch.setattr(plugin, 'load_known_checks', lambda *_: {'fake_check': check_class()})
monkeypatch.setattr('openshift_health_check.resolve_checks', lambda *args: ['fake_check'])
result = plugin.run(tmp=None, task_vars=task_vars)
@@ -153,9 +187,9 @@ def test_action_plugin_run_check_changed(plugin, task_vars, monkeypatch):
def test_action_plugin_run_check_fail(plugin, task_vars, monkeypatch):
- check_return_value = {'failed': True}
+ check_return_value = {'failed': True, 'msg': 'this is a failure'}
check_class = fake_check(run_return=check_return_value)
- monkeypatch.setattr(plugin, 'load_known_checks', lambda tmp, task_vars: {'fake_check': check_class()})
+ monkeypatch.setattr(plugin, 'load_known_checks', lambda *_: {'fake_check': check_class()})
monkeypatch.setattr('openshift_health_check.resolve_checks', lambda *args: ['fake_check'])
result = plugin.run(tmp=None, task_vars=task_vars)
@@ -166,24 +200,51 @@ def test_action_plugin_run_check_fail(plugin, task_vars, monkeypatch):
assert not skipped(result)
-def test_action_plugin_run_check_exception(plugin, task_vars, monkeypatch):
+@pytest.mark.parametrize('exc_class, expect_traceback', [
+ (OpenShiftCheckException, False),
+ (Exception, True),
+])
+def test_action_plugin_run_check_exception(plugin, task_vars, exc_class, expect_traceback, monkeypatch):
exception_msg = 'fake check has an exception'
- run_exception = OpenShiftCheckException(exception_msg)
+ run_exception = exc_class(exception_msg)
check_class = fake_check(run_exception=run_exception, changed=True)
- monkeypatch.setattr(plugin, 'load_known_checks', lambda tmp, task_vars: {'fake_check': check_class()})
+ monkeypatch.setattr(plugin, 'load_known_checks', lambda *_: {'fake_check': check_class()})
monkeypatch.setattr('openshift_health_check.resolve_checks', lambda *args: ['fake_check'])
result = plugin.run(tmp=None, task_vars=task_vars)
assert failed(result['checks']['fake_check'], msg_has=exception_msg)
+ assert expect_traceback == ("Traceback" in result['checks']['fake_check']['msg'])
assert failed(result, msg_has=['failed'])
assert changed(result['checks']['fake_check'])
assert changed(result)
assert not skipped(result)
+def test_action_plugin_run_check_output_dir(plugin, task_vars, tmpdir, monkeypatch):
+ check_class = fake_check(
+ run_return={},
+ run_logs=[('thing', 'note')],
+ run_files=[
+ FileToSave('save.file', 'contents', None),
+ FileToSave('save.file', 'duplicate', None),
+ FileToSave('copy.file', None, 'foo'), # note: copy runs execute_module => exception
+ ],
+ )
+ task_vars['openshift_checks_output_dir'] = str(tmpdir)
+ check_class.get_var = lambda self, name, **_: task_vars.get(name)
+ monkeypatch.setattr(plugin, 'load_known_checks', lambda *_: {'fake_check': check_class()})
+ monkeypatch.setattr('openshift_health_check.resolve_checks', lambda *args: ['fake_check'])
+
+ plugin.run(tmp=None, task_vars=task_vars)
+ assert any(path.basename == task_vars['ansible_host'] for path in tmpdir.listdir())
+ assert any(path.basename == 'fake_check.log.json' for path in tmpdir.visit())
+ assert any(path.basename == 'save.file' for path in tmpdir.visit())
+ assert any(path.basename == 'save.file.2' for path in tmpdir.visit())
+
+
def test_action_plugin_resolve_checks_exception(plugin, task_vars, monkeypatch):
- monkeypatch.setattr(plugin, 'load_known_checks', lambda tmp, task_vars: {})
+ monkeypatch.setattr(plugin, 'load_known_checks', lambda *_: {})
result = plugin.run(tmp=None, task_vars=task_vars)
@@ -249,3 +310,38 @@ def test_resolve_checks_failure(names, all_checks, words_in_exception):
resolve_checks(names, all_checks)
for word in words_in_exception:
assert word in str(excinfo.value)
+
+
+@pytest.mark.parametrize('give_output_dir, result, expect_file', [
+ (False, None, False),
+ (True, dict(content="c3BhbQo=", encoding="base64"), True),
+ (True, dict(content="encoding error", encoding="base64"), False),
+ (True, dict(content="spam", no_encoding=None), True),
+ (True, dict(failed=True, msg="could not slurp"), False),
+])
+def test_copy_remote_file_to_dir(give_output_dir, result, expect_file, tmpdir):
+ check = fake_check()()
+ check.execute_module = lambda *args, **_: result
+ copy_remote_file_to_dir(check, "remote_file", str(tmpdir) if give_output_dir else "", "local_file")
+ assert expect_file == any(path.basename == "local_file" for path in tmpdir.listdir())
+
+
+def test_write_to_output_exceptions(tmpdir, monkeypatch, capsys):
+
+ class Spam(object):
+ def __str__(self):
+ raise Exception("break str")
+
+ test = {1: object(), 2: Spam()}
+ test[3] = test
+ write_result_to_output_dir(str(tmpdir), test)
+ assert "Error writing" in test["output_files"]
+
+ output_dir = tmpdir.join("eggs")
+ output_dir.write("spam") # so now it's not a dir
+ write_to_output_file(str(output_dir), "somefile", "somedata")
+ assert "Could not write" in capsys.readouterr()[1]
+
+ monkeypatch.setattr("openshift_health_check.prepare_output_dir", lambda *_: False)
+ write_result_to_output_dir(str(tmpdir), test)
+ assert "Error creating" in test["output_files"]
diff --git a/roles/openshift_health_checker/test/disk_availability_test.py b/roles/openshift_health_checker/test/disk_availability_test.py
index f4fd2dfed..9ae679b79 100644
--- a/roles/openshift_health_checker/test/disk_availability_test.py
+++ b/roles/openshift_health_checker/test/disk_availability_test.py
@@ -183,11 +183,12 @@ def test_fails_with_insufficient_disk_space(name, group_names, configured_min, a
ansible_mounts=ansible_mounts,
)
- result = DiskAvailability(fake_execute_module, task_vars).run()
+ check = DiskAvailability(fake_execute_module, task_vars)
+ check.run()
- assert result['failed']
+ assert check.failures
for chunk in 'below recommended'.split() + expect_chunks:
- assert chunk in result.get('msg', '')
+ assert chunk in str(check.failures[0])
@pytest.mark.parametrize('name,group_names,context,ansible_mounts,failed,extra_words', [
@@ -237,11 +238,11 @@ def test_min_required_space_changes_with_upgrade_context(name, group_names, cont
)
check = DiskAvailability(fake_execute_module, task_vars)
- result = check.run()
+ check.run()
- assert result.get("failed", False) == failed
+ assert bool(check.failures) == failed
for word in extra_words:
- assert word in result.get('msg', '')
+ assert word in str(check.failures[0])
def fake_execute_module(*args):
diff --git a/roles/openshift_health_checker/test/docker_image_availability_test.py b/roles/openshift_health_checker/test/docker_image_availability_test.py
index 8d0a53df9..6a7c16c7e 100644
--- a/roles/openshift_health_checker/test/docker_image_availability_test.py
+++ b/roles/openshift_health_checker/test/docker_image_availability_test.py
@@ -3,6 +3,23 @@ import pytest
from openshift_checks.docker_image_availability import DockerImageAvailability
+@pytest.fixture()
+def task_vars():
+ return dict(
+ openshift=dict(
+ common=dict(
+ service_type='origin',
+ is_containerized=False,
+ is_atomic=False,
+ ),
+ docker=dict(),
+ ),
+ openshift_deployment_type='origin',
+ openshift_image_tag='',
+ group_names=['nodes', 'masters'],
+ )
+
+
@pytest.mark.parametrize('deployment_type, is_containerized, group_names, expect_active', [
("origin", True, [], True),
("openshift-enterprise", True, [], True),
@@ -15,12 +32,10 @@ from openshift_checks.docker_image_availability import DockerImageAvailability
("origin", False, ["nodes", "masters"], True),
("openshift-enterprise", False, ["etcd"], False),
])
-def test_is_active(deployment_type, is_containerized, group_names, expect_active):
- task_vars = dict(
- openshift=dict(common=dict(is_containerized=is_containerized)),
- openshift_deployment_type=deployment_type,
- group_names=group_names,
- )
+def test_is_active(task_vars, deployment_type, is_containerized, group_names, expect_active):
+ task_vars['openshift_deployment_type'] = deployment_type
+ task_vars['openshift']['common']['is_containerized'] = is_containerized
+ task_vars['group_names'] = group_names
assert DockerImageAvailability(None, task_vars).is_active() == expect_active
@@ -30,10 +45,10 @@ def test_is_active(deployment_type, is_containerized, group_names, expect_active
(True, False),
(False, True),
])
-def test_all_images_available_locally(is_containerized, is_atomic):
+def test_all_images_available_locally(task_vars, is_containerized, is_atomic):
def execute_module(module_name, module_args, *_):
if module_name == "yum":
- return {"changed": True}
+ return {}
assert module_name == "docker_image_facts"
assert 'name' in module_args
@@ -42,19 +57,9 @@ def test_all_images_available_locally(is_containerized, is_atomic):
'images': [module_args['name']],
}
- result = DockerImageAvailability(execute_module, task_vars=dict(
- openshift=dict(
- common=dict(
- service_type='origin',
- is_containerized=is_containerized,
- is_atomic=is_atomic,
- ),
- docker=dict(additional_registries=["docker.io"]),
- ),
- openshift_deployment_type='origin',
- openshift_image_tag='3.4',
- group_names=['nodes', 'masters'],
- )).run()
+ task_vars['openshift']['common']['is_containerized'] = is_containerized
+ task_vars['openshift']['common']['is_atomic'] = is_atomic
+ result = DockerImageAvailability(execute_module, task_vars).run()
assert not result.get('failed', False)
@@ -63,53 +68,36 @@ def test_all_images_available_locally(is_containerized, is_atomic):
False,
True,
])
-def test_all_images_available_remotely(available_locally):
+def test_all_images_available_remotely(task_vars, available_locally):
def execute_module(module_name, *_):
if module_name == 'docker_image_facts':
return {'images': [], 'failed': available_locally}
- return {'changed': False}
+ return {}
- result = DockerImageAvailability(execute_module, task_vars=dict(
- openshift=dict(
- common=dict(
- service_type='origin',
- is_containerized=False,
- is_atomic=False,
- ),
- docker=dict(additional_registries=["docker.io", "registry.access.redhat.com"]),
- ),
- openshift_deployment_type='origin',
- openshift_image_tag='v3.4',
- group_names=['nodes', 'masters'],
- )).run()
+ task_vars['openshift']['docker']['additional_registries'] = ["docker.io", "registry.access.redhat.com"]
+ task_vars['openshift_image_tag'] = 'v3.4'
+ check = DockerImageAvailability(execute_module, task_vars)
+ check._module_retry_interval = 0
+ result = check.run()
assert not result.get('failed', False)
-def test_all_images_unavailable():
- def execute_module(module_name=None, *_):
- if module_name == "command":
- return {
- 'failed': True,
- }
+def test_all_images_unavailable(task_vars):
+ def execute_module(module_name=None, *args):
+ if module_name == "wait_for":
+ return {}
+ elif module_name == "command":
+ return {'failed': True}
- return {
- 'changed': False,
- }
+ return {} # docker_image_facts failure
- actual = DockerImageAvailability(execute_module, task_vars=dict(
- openshift=dict(
- common=dict(
- service_type='origin',
- is_containerized=False,
- is_atomic=False,
- ),
- docker=dict(additional_registries=["docker.io"]),
- ),
- openshift_deployment_type="openshift-enterprise",
- openshift_image_tag='latest',
- group_names=['nodes', 'masters'],
- )).run()
+ task_vars['openshift']['docker']['additional_registries'] = ["docker.io"]
+ task_vars['openshift_deployment_type'] = "openshift-enterprise"
+ task_vars['openshift_image_tag'] = 'latest'
+ check = DockerImageAvailability(execute_module, task_vars)
+ check._module_retry_interval = 0
+ actual = check.run()
assert actual['failed']
assert "required Docker images are not available" in actual['msg']
@@ -125,62 +113,63 @@ def test_all_images_unavailable():
["dependencies can be installed via `yum`"]
),
])
-def test_skopeo_update_failure(message, extra_words):
+def test_skopeo_update_failure(task_vars, message, extra_words):
def execute_module(module_name=None, *_):
if module_name == "yum":
return {
"failed": True,
"msg": message,
- "changed": False,
}
- return {'changed': False}
+ return {}
- actual = DockerImageAvailability(execute_module, task_vars=dict(
- openshift=dict(
- common=dict(
- service_type='origin',
- is_containerized=False,
- is_atomic=False,
- ),
- docker=dict(additional_registries=["unknown.io"]),
- ),
- openshift_deployment_type="openshift-enterprise",
- openshift_image_tag='',
- group_names=['nodes', 'masters'],
- )).run()
+ task_vars['openshift']['docker']['additional_registries'] = ["unknown.io"]
+ task_vars['openshift_deployment_type'] = "openshift-enterprise"
+ check = DockerImageAvailability(execute_module, task_vars)
+ check._module_retry_interval = 0
+ actual = check.run()
assert actual["failed"]
for word in extra_words:
assert word in actual["msg"]
-@pytest.mark.parametrize("deployment_type,registries", [
- ("origin", ["unknown.io"]),
- ("openshift-enterprise", ["registry.access.redhat.com"]),
- ("openshift-enterprise", []),
-])
-def test_registry_availability(deployment_type, registries):
+@pytest.mark.parametrize(
+ "image, registries, connection_test_failed, skopeo_failed, "
+ "expect_success, expect_registries_reached", [
+ (
+ "spam/eggs:v1", ["test.reg"],
+ True, True,
+ False,
+ {"test.reg": False},
+ ),
+ (
+ "spam/eggs:v1", ["test.reg"],
+ False, True,
+ False,
+ {"test.reg": True},
+ ),
+ (
+ "eggs.reg/spam/eggs:v1", ["test.reg"],
+ False, False,
+ True,
+ {"eggs.reg": True},
+ ),
+ ])
+def test_registry_availability(image, registries, connection_test_failed, skopeo_failed,
+ expect_success, expect_registries_reached):
def execute_module(module_name=None, *_):
- return {
- 'changed': False,
- }
+ if module_name == "wait_for":
+ return dict(msg="msg", failed=connection_test_failed)
+ elif module_name == "command":
+ return dict(msg="msg", failed=skopeo_failed)
- actual = DockerImageAvailability(execute_module, task_vars=dict(
- openshift=dict(
- common=dict(
- service_type='origin',
- is_containerized=False,
- is_atomic=False,
- ),
- docker=dict(additional_registries=registries),
- ),
- openshift_deployment_type=deployment_type,
- openshift_image_tag='',
- group_names=['nodes', 'masters'],
- )).run()
+ check = DockerImageAvailability(execute_module, task_vars())
+ check._module_retry_interval = 0
- assert not actual.get("failed", False)
+ available = check.is_available_skopeo_image(image, registries)
+ assert available == expect_success
+ assert expect_registries_reached == check.reachable_registries
@pytest.mark.parametrize("deployment_type, is_containerized, groups, oreg_url, expected", [
@@ -257,7 +246,7 @@ def test_required_images(deployment_type, is_containerized, groups, oreg_url, ex
openshift_image_tag='vtest',
)
- assert expected == DockerImageAvailability("DUMMY", task_vars).required_images()
+ assert expected == DockerImageAvailability(task_vars=task_vars).required_images()
def test_containerized_etcd():
@@ -271,4 +260,4 @@ def test_containerized_etcd():
group_names=['etcd'],
)
expected = set(['registry.access.redhat.com/rhel7/etcd'])
- assert expected == DockerImageAvailability("DUMMY", task_vars).required_images()
+ assert expected == DockerImageAvailability(task_vars=task_vars).required_images()
diff --git a/roles/openshift_health_checker/test/elasticsearch_test.py b/roles/openshift_health_checker/test/elasticsearch_test.py
index 09bacd9ac..3fa5e8929 100644
--- a/roles/openshift_health_checker/test/elasticsearch_test.py
+++ b/roles/openshift_health_checker/test/elasticsearch_test.py
@@ -72,7 +72,7 @@ def test_check_elasticsearch():
assert_error_in_list('NoRunningPods', excinfo.value)
# canned oc responses to match so all the checks pass
- def exec_oc(cmd, args):
+ def exec_oc(cmd, args, **_):
if '_cat/master' in cmd:
return 'name logging-es'
elif '/_nodes' in cmd:
@@ -97,7 +97,7 @@ def test_check_running_es_pods():
def test_check_elasticsearch_masters():
pods = [plain_es_pod]
- check = canned_elasticsearch(task_vars_config_base, lambda *_: plain_es_pod['_test_master_name_str'])
+ check = canned_elasticsearch(task_vars_config_base, lambda *args, **_: plain_es_pod['_test_master_name_str'])
assert not check.check_elasticsearch_masters(pods_by_name(pods))
@@ -117,7 +117,7 @@ def test_check_elasticsearch_masters():
])
def test_check_elasticsearch_masters_error(pods, expect_error):
test_pods = list(pods)
- check = canned_elasticsearch(task_vars_config_base, lambda *_: test_pods.pop(0)['_test_master_name_str'])
+ check = canned_elasticsearch(task_vars_config_base, lambda *args, **_: test_pods.pop(0)['_test_master_name_str'])
assert_error_in_list(expect_error, check.check_elasticsearch_masters(pods_by_name(pods)))
@@ -129,7 +129,7 @@ es_node_list = {
def test_check_elasticsearch_node_list():
- check = canned_elasticsearch(task_vars_config_base, lambda *_: json.dumps(es_node_list))
+ check = canned_elasticsearch(task_vars_config_base, lambda *args, **_: json.dumps(es_node_list))
assert not check.check_elasticsearch_node_list(pods_by_name([plain_es_pod]))
@@ -151,13 +151,13 @@ def test_check_elasticsearch_node_list():
),
])
def test_check_elasticsearch_node_list_errors(pods, node_list, expect_error):
- check = canned_elasticsearch(task_vars_config_base, lambda cmd, args: json.dumps(node_list))
+ check = canned_elasticsearch(task_vars_config_base, lambda cmd, args, **_: json.dumps(node_list))
assert_error_in_list(expect_error, check.check_elasticsearch_node_list(pods_by_name(pods)))
def test_check_elasticsearch_cluster_health():
test_health_data = [{"status": "green"}]
- check = canned_elasticsearch(exec_oc=lambda *_: json.dumps(test_health_data.pop(0)))
+ check = canned_elasticsearch(exec_oc=lambda *args, **_: json.dumps(test_health_data.pop(0)))
assert not check.check_es_cluster_health(pods_by_name([plain_es_pod]))
@@ -175,12 +175,12 @@ def test_check_elasticsearch_cluster_health():
])
def test_check_elasticsearch_cluster_health_errors(pods, health_data, expect_error):
test_health_data = list(health_data)
- check = canned_elasticsearch(exec_oc=lambda *_: json.dumps(test_health_data.pop(0)))
+ check = canned_elasticsearch(exec_oc=lambda *args, **_: json.dumps(test_health_data.pop(0)))
assert_error_in_list(expect_error, check.check_es_cluster_health(pods_by_name(pods)))
def test_check_elasticsearch_diskspace():
- check = canned_elasticsearch(exec_oc=lambda *_: 'IUse% Use%\n 3% 4%\n')
+ check = canned_elasticsearch(exec_oc=lambda *args, **_: 'IUse% Use%\n 3% 4%\n')
assert not check.check_elasticsearch_diskspace(pods_by_name([plain_es_pod]))
@@ -199,5 +199,5 @@ def test_check_elasticsearch_diskspace():
),
])
def test_check_elasticsearch_diskspace_errors(disk_data, expect_error):
- check = canned_elasticsearch(exec_oc=lambda *_: disk_data)
+ check = canned_elasticsearch(exec_oc=lambda *args, **_: disk_data)
assert_error_in_list(expect_error, check.check_elasticsearch_diskspace(pods_by_name([plain_es_pod])))
diff --git a/roles/openshift_health_checker/test/logging_index_time_test.py b/roles/openshift_health_checker/test/logging_index_time_test.py
index 22566b295..c48ade9b8 100644
--- a/roles/openshift_health_checker/test/logging_index_time_test.py
+++ b/roles/openshift_health_checker/test/logging_index_time_test.py
@@ -102,7 +102,7 @@ def test_with_running_pods():
),
], ids=lambda argval: argval[0])
def test_wait_until_cmd_or_err_succeeds(name, json_response, uuid, timeout):
- check = canned_loggingindextime(lambda *_: json.dumps(json_response))
+ check = canned_loggingindextime(lambda *args, **_: json.dumps(json_response))
check.wait_until_cmd_or_err(plain_running_elasticsearch_pod, uuid, timeout)
@@ -131,7 +131,7 @@ def test_wait_until_cmd_or_err_succeeds(name, json_response, uuid, timeout):
)
], ids=lambda argval: argval[0])
def test_wait_until_cmd_or_err(name, json_response, timeout, expect_error):
- check = canned_loggingindextime(lambda *_: json.dumps(json_response))
+ check = canned_loggingindextime(lambda *args, **_: json.dumps(json_response))
with pytest.raises(OpenShiftCheckException) as error:
check.wait_until_cmd_or_err(plain_running_elasticsearch_pod, SAMPLE_UUID, timeout)
@@ -139,7 +139,7 @@ def test_wait_until_cmd_or_err(name, json_response, timeout, expect_error):
def test_curl_kibana_with_uuid():
- check = canned_loggingindextime(lambda *_: json.dumps({"statusCode": 404}))
+ check = canned_loggingindextime(lambda *args, **_: json.dumps({"statusCode": 404}))
check.generate_uuid = lambda: SAMPLE_UUID
assert SAMPLE_UUID == check.curl_kibana_with_uuid(plain_running_kibana_pod)
@@ -161,7 +161,7 @@ def test_curl_kibana_with_uuid():
),
], ids=lambda argval: argval[0])
def test_failed_curl_kibana_with_uuid(name, json_response, expect_error):
- check = canned_loggingindextime(lambda *_: json.dumps(json_response))
+ check = canned_loggingindextime(lambda *args, **_: json.dumps(json_response))
check.generate_uuid = lambda: SAMPLE_UUID
with pytest.raises(OpenShiftCheckException) as error:
diff --git a/roles/openshift_health_checker/test/openshift_check_test.py b/roles/openshift_health_checker/test/openshift_check_test.py
index 789784c77..bc0c3b26c 100644
--- a/roles/openshift_health_checker/test/openshift_check_test.py
+++ b/roles/openshift_health_checker/test/openshift_check_test.py
@@ -106,13 +106,40 @@ def test_get_var_convert(task_vars, keys, convert, expected):
assert dummy_check(task_vars).get_var(*keys, convert=convert) == expected
-@pytest.mark.parametrize("keys, convert", [
- (("bar", "baz"), int),
- (("bar.baz"), float),
- (("foo"), "bogus"),
- (("foo"), lambda a, b: 1),
- (("foo"), lambda a: 1 / 0),
+def convert_oscexc(_):
+ raise OpenShiftCheckException("known failure")
+
+
+def convert_exc(_):
+ raise Exception("failure unknown")
+
+
+@pytest.mark.parametrize("keys, convert, expect_text", [
+ (("bar", "baz"), int, "Cannot convert"),
+ (("bar.baz",), float, "Cannot convert"),
+ (("foo",), "bogus", "TypeError"),
+ (("foo",), lambda a, b: 1, "TypeError"),
+ (("foo",), lambda a: 1 / 0, "ZeroDivisionError"),
+ (("foo",), convert_oscexc, "known failure"),
+ (("foo",), convert_exc, "failure unknown"),
])
-def test_get_var_convert_error(task_vars, keys, convert):
- with pytest.raises(OpenShiftCheckException):
+def test_get_var_convert_error(task_vars, keys, convert, expect_text):
+ with pytest.raises(OpenShiftCheckException) as excinfo:
dummy_check(task_vars).get_var(*keys, convert=convert)
+ assert expect_text in str(excinfo.value)
+
+
+def test_register(task_vars):
+ check = dummy_check(task_vars)
+
+ check.register_failure(OpenShiftCheckException("spam"))
+ assert "spam" in str(check.failures[0])
+
+ with pytest.raises(OpenShiftCheckException) as excinfo:
+ check.register_file("spam") # no file contents specified
+ assert "not specified" in str(excinfo.value)
+
+ # normally execute_module registers the result file; test disabling that
+ check._execute_module = lambda *args, **_: dict()
+ check.execute_module("eggs", module_args={}, register=False)
+ assert not check.files_to_save
diff --git a/roles/openshift_health_checker/test/ovs_version_test.py b/roles/openshift_health_checker/test/ovs_version_test.py
index e1bf29d2a..602f32989 100644
--- a/roles/openshift_health_checker/test/ovs_version_test.py
+++ b/roles/openshift_health_checker/test/ovs_version_test.py
@@ -50,7 +50,7 @@ def test_ovs_package_version(openshift_release, expected_ovs_version):
openshift_release=openshift_release,
openshift_image_tag='v' + openshift_release,
)
- return_value = object()
+ return_value = {} # note: check.execute_module modifies return hash contents
def execute_module(module_name=None, module_args=None, *_):
assert module_name == 'rpm_version'
diff --git a/roles/openshift_health_checker/test/package_availability_test.py b/roles/openshift_health_checker/test/package_availability_test.py
index 1fe648b75..b34e8fbfc 100644
--- a/roles/openshift_health_checker/test/package_availability_test.py
+++ b/roles/openshift_health_checker/test/package_availability_test.py
@@ -49,14 +49,14 @@ def test_is_active(pkg_mgr, is_containerized, is_active):
),
])
def test_package_availability(task_vars, must_have_packages, must_not_have_packages):
- return_value = object()
+ return_value = {}
def execute_module(module_name=None, module_args=None, *_):
assert module_name == 'check_yum_update'
assert 'packages' in module_args
assert set(module_args['packages']).issuperset(must_have_packages)
assert not set(module_args['packages']).intersection(must_not_have_packages)
- return return_value
+ return {'foo': return_value}
result = PackageAvailability(execute_module, task_vars).run()
- assert result is return_value
+ assert result['foo'] is return_value
diff --git a/roles/openshift_health_checker/test/package_update_test.py b/roles/openshift_health_checker/test/package_update_test.py
index 06489b0d7..85d3c9cab 100644
--- a/roles/openshift_health_checker/test/package_update_test.py
+++ b/roles/openshift_health_checker/test/package_update_test.py
@@ -2,14 +2,14 @@ from openshift_checks.package_update import PackageUpdate
def test_package_update():
- return_value = object()
+ return_value = {}
def execute_module(module_name=None, module_args=None, *_):
assert module_name == 'check_yum_update'
assert 'packages' in module_args
# empty list of packages means "generic check if 'yum update' will work"
assert module_args['packages'] == []
- return return_value
+ return {'foo': return_value}
result = PackageUpdate(execute_module).run()
- assert result is return_value
+ assert result['foo'] is return_value
diff --git a/roles/openshift_health_checker/test/package_version_test.py b/roles/openshift_health_checker/test/package_version_test.py
index 6054d3f3e..8564cd4db 100644
--- a/roles/openshift_health_checker/test/package_version_test.py
+++ b/roles/openshift_health_checker/test/package_version_test.py
@@ -5,6 +5,7 @@ from openshift_checks.package_version import PackageVersion, OpenShiftCheckExcep
def task_vars_for(openshift_release, deployment_type):
return dict(
+ ansible_pkg_mgr='yum',
openshift=dict(common=dict(service_type=deployment_type)),
openshift_release=openshift_release,
openshift_image_tag='v' + openshift_release,
@@ -27,6 +28,7 @@ def test_openshift_version_not_supported():
def test_invalid_openshift_release_format():
task_vars = dict(
+ ansible_pkg_mgr='yum',
openshift=dict(common=dict(service_type='origin')),
openshift_image_tag='v0',
openshift_deployment_type='origin',
@@ -50,7 +52,7 @@ def test_invalid_openshift_release_format():
])
def test_package_version(openshift_release):
- return_value = object()
+ return_value = {"foo": object()}
def execute_module(module_name=None, module_args=None, tmp=None, task_vars=None, *_):
assert module_name == 'aos_version'
@@ -64,7 +66,7 @@ def test_package_version(openshift_release):
check = PackageVersion(execute_module, task_vars_for(openshift_release, 'origin'))
result = check.run()
- assert result is return_value
+ assert result == return_value
@pytest.mark.parametrize('deployment_type,openshift_release,expected_docker_version', [
@@ -77,7 +79,7 @@ def test_package_version(openshift_release):
])
def test_docker_package_version(deployment_type, openshift_release, expected_docker_version):
- return_value = object()
+ return_value = {"foo": object()}
def execute_module(module_name=None, module_args=None, *_):
assert module_name == 'aos_version'
@@ -91,7 +93,7 @@ def test_docker_package_version(deployment_type, openshift_release, expected_doc
check = PackageVersion(execute_module, task_vars_for(openshift_release, deployment_type))
result = check.run()
- assert result is return_value
+ assert result == return_value
@pytest.mark.parametrize('group_names,is_containerized,is_active', [
diff --git a/roles/openshift_health_checker/test/zz_failure_summary_test.py b/roles/openshift_health_checker/test/zz_failure_summary_test.py
index 0fc258133..69f27653c 100644
--- a/roles/openshift_health_checker/test/zz_failure_summary_test.py
+++ b/roles/openshift_health_checker/test/zz_failure_summary_test.py
@@ -65,6 +65,21 @@ import pytest
},
],
),
+ # if a failure contain an unhashable value, it will not be deduplicated
+ (
+ [
+ {
+ 'host': 'master1',
+ 'msg': {'unhashable': 'value'},
+ },
+ ],
+ [
+ {
+ 'host': 'master1',
+ 'msg': {'unhashable': 'value'},
+ },
+ ],
+ ),
])
def test_deduplicate_failures(failures, deduplicated):
assert deduplicate_failures(failures) == deduplicated
diff --git a/roles/openshift_hosted/README.md b/roles/openshift_hosted/README.md
index 3e5d7f860..29ae58556 100644
--- a/roles/openshift_hosted/README.md
+++ b/roles/openshift_hosted/README.md
@@ -39,7 +39,6 @@ variables also control configuration behavior:
Dependencies
------------
-* openshift_common
* openshift_hosted_facts
Example Playbook
diff --git a/roles/openshift_hosted/defaults/main.yml b/roles/openshift_hosted/defaults/main.yml
index c26df3afa..712a2a591 100644
--- a/roles/openshift_hosted/defaults/main.yml
+++ b/roles/openshift_hosted/defaults/main.yml
@@ -5,8 +5,8 @@ r_openshift_hosted_router_use_firewalld: "{{ os_firewall_use_firewalld | default
r_openshift_hosted_registry_firewall_enabled: "{{ os_firewall_enabled | default(True) }}"
r_openshift_hosted_registry_use_firewalld: "{{ os_firewall_use_firewalld | default(False) }}"
-openshift_hosted_router_wait: "{{ not openshift_master_bootstrap_enabled | default(True) }}"
-openshift_hosted_registry_wait: "{{ not openshift_master_bootstrap_enabled | default(True) }}"
+openshift_hosted_router_wait: "{{ not (openshift_master_bootstrap_enabled | default(False)) }}"
+openshift_hosted_registry_wait: "{{ not (openshift_master_bootstrap_enabled | default(False)) }}"
registry_volume_claim: 'registry-claim'
@@ -47,3 +47,9 @@ r_openshift_hosted_registry_os_firewall_allow:
- service: Docker Registry Port
port: 5000/tcp
cond: "{{ r_openshift_hosted_use_calico }}"
+
+# NOTE
+# r_openshift_hosted_use_calico_default may be defined external to this role.
+# openshift_use_calico, if defined, may affect other roles or play behavior.
+r_openshift_hosted_use_calico_default: "{{ openshift_use_calico | default(False) }}"
+r_openshift_hosted_use_calico: "{{ r_openshift_hosted_use_calico_default }}"
diff --git a/roles/openshift_hosted/tasks/registry/registry.yml b/roles/openshift_hosted/tasks/registry/registry.yml
index 3e424da12..48f53aef8 100644
--- a/roles/openshift_hosted/tasks/registry/registry.yml
+++ b/roles/openshift_hosted/tasks/registry/registry.yml
@@ -61,6 +61,14 @@
openshift_hosted_registry_env_vars: "{{ openshift_hosted_registry_env_vars | combine({'OPENSHIFT_DEFAULT_REGISTRY':'docker-registry.default.svc:5000'}) }}"
when: openshift_push_via_dns | default(false) | bool
+- name: Update registry proxy settings for dc/docker-registry
+ set_fact:
+ openshift_hosted_registry_env_vars: "{{ {'HTTPS_PROXY': (openshift.common.https_proxy | default('')),
+ 'HTTP_PROXY': (openshift.common.http_proxy | default('')),
+ 'NO_PROXY': (openshift.common.no_proxy | default(''))}
+ | combine(openshift_hosted_registry_env_vars) }}"
+ when: (openshift.common.https_proxy | default(False)) or (openshift.common.http_proxy | default('')) != ''
+
- name: Create the registry service account
oc_serviceaccount:
name: "{{ openshift_hosted_registry_serviceaccount }}"
@@ -129,7 +137,7 @@
edits: "{{ openshift_hosted_registry_edits }}"
force: "{{ True|bool in openshift_hosted_registry_force }}"
-- when: openshift_hosted_registry_wait
+- when: openshift_hosted_registry_wait | bool
block:
- name: Ensure OpenShift registry correctly rolls out (best-effort today)
command: |
diff --git a/roles/openshift_hosted/tasks/registry/secure.yml b/roles/openshift_hosted/tasks/registry/secure.yml
index a8a6f6fc8..434b679df 100644
--- a/roles/openshift_hosted/tasks/registry/secure.yml
+++ b/roles/openshift_hosted/tasks/registry/secure.yml
@@ -1,7 +1,7 @@
---
- name: Configure facts for docker-registry
set_fact:
- openshift_hosted_registry_routecertificates: "{{ ('routecertificates' in openshift.hosted.registry.keys()) | ternary(openshift.hosted.registry.routecertificates, {}) }}"
+ openshift_hosted_registry_routecertificates: "{{ ('routecertificates' in openshift.hosted.registry.keys()) | ternary(openshift_hosted_registry_routecertificates, {}) }}"
openshift_hosted_registry_routehost: "{{ ('routehost' in openshift.hosted.registry.keys()) | ternary(openshift.hosted.registry.routehost, False) }}"
openshift_hosted_registry_routetermination: "{{ ('routetermination' in openshift.hosted.registry.keys()) | ternary(openshift.hosted.registry.routetermination, 'passthrough') }}"
diff --git a/roles/openshift_hosted/tasks/router/router.yml b/roles/openshift_hosted/tasks/router/router.yml
index e57ed733e..2a42b5a7c 100644
--- a/roles/openshift_hosted/tasks/router/router.yml
+++ b/roles/openshift_hosted/tasks/router/router.yml
@@ -18,6 +18,15 @@
openshift_hosted_router_selector: "{{ openshift.hosted.router.selector | default(None) }}"
openshift_hosted_router_image: "{{ openshift.hosted.router.registryurl }}"
+- name: Get the certificate contents for router
+ copy:
+ backup: True
+ dest: "/etc/origin/master/{{ item | basename }}"
+ src: "{{ item }}"
+ with_items: "{{ openshift_hosted_routers | oo_collect(attribute='certificate') |
+ oo_select_keys_from_list(['keyfile', 'certfile', 'cafile']) }}"
+ when: ( not openshift_hosted_router_create_certificate | bool ) or openshift_hosted_router_certificate != {}
+
# This is for when we desire a cluster signed cert
# The certificate is generated and placed in master_config_dir/
- block:
@@ -43,15 +52,6 @@
# End Block
when: ( openshift_hosted_router_create_certificate | bool ) and openshift_hosted_router_certificate == {}
-- name: Get the certificate contents for router
- copy:
- backup: True
- dest: "/etc/origin/master/{{ item | basename }}"
- src: "{{ item }}"
- with_items: "{{ openshift_hosted_routers | oo_collect(attribute='certificate') |
- oo_select_keys_from_list(['keyfile', 'certfile', 'cafile']) }}"
- when: not openshift_hosted_router_create_certificate | bool
-
- name: Create the router service account(s)
oc_serviceaccount:
name: "{{ item.serviceaccount }}"
@@ -94,7 +94,7 @@
stats_port: "{{ item.stats_port }}"
with_items: "{{ openshift_hosted_routers }}"
-- when: openshift_hosted_router_wait
+- when: openshift_hosted_router_wait | bool
block:
- name: Ensure OpenShift router correctly rolls out (best-effort today)
command: |
diff --git a/roles/openshift_hosted_logging/meta/main.yaml b/roles/openshift_hosted_logging/meta/main.yaml
index 044c8043c..ab07a77c1 100644
--- a/roles/openshift_hosted_logging/meta/main.yaml
+++ b/roles/openshift_hosted_logging/meta/main.yaml
@@ -1,4 +1,3 @@
---
dependencies:
- - { role: openshift_common }
- { role: openshift_master_facts }
diff --git a/roles/openshift_hosted_templates/files/v1.3/enterprise/registry-console.yaml b/roles/openshift_hosted_templates/files/v1.3/enterprise/registry-console.yaml
index 11478263c..72754df2e 100644
--- a/roles/openshift_hosted_templates/files/v1.3/enterprise/registry-console.yaml
+++ b/roles/openshift_hosted_templates/files/v1.3/enterprise/registry-console.yaml
@@ -89,7 +89,7 @@ objects:
- annotations: null
from:
kind: DockerImage
- name: ${IMAGE_PREFIX}registry-console
+ name: ${IMAGE_PREFIX}registry-console:${IMAGE_VERSION}
name: ${IMAGE_VERSION}
- kind: OAuthClient
apiVersion: v1
diff --git a/roles/openshift_hosted_templates/files/v1.3/origin/registry-console.yaml b/roles/openshift_hosted_templates/files/v1.3/origin/registry-console.yaml
index 80cc4233b..6811ece28 100644
--- a/roles/openshift_hosted_templates/files/v1.3/origin/registry-console.yaml
+++ b/roles/openshift_hosted_templates/files/v1.3/origin/registry-console.yaml
@@ -89,7 +89,7 @@ objects:
- annotations: null
from:
kind: DockerImage
- name: ${IMAGE_NAME}
+ name: ${IMAGE_NAME}:${IMAGE_VERSION}
name: ${IMAGE_VERSION}
- kind: OAuthClient
apiVersion: v1
diff --git a/roles/openshift_hosted_templates/files/v1.4/enterprise/registry-console.yaml b/roles/openshift_hosted_templates/files/v1.4/enterprise/registry-console.yaml
index 0e3d006a7..298f8039e 100644
--- a/roles/openshift_hosted_templates/files/v1.4/enterprise/registry-console.yaml
+++ b/roles/openshift_hosted_templates/files/v1.4/enterprise/registry-console.yaml
@@ -89,7 +89,7 @@ objects:
- annotations: null
from:
kind: DockerImage
- name: ${IMAGE_PREFIX}registry-console
+ name: ${IMAGE_PREFIX}registry-console:${IMAGE_VERSION}
name: ${IMAGE_VERSION}
- kind: OAuthClient
apiVersion: v1
diff --git a/roles/openshift_hosted_templates/files/v1.4/origin/registry-console.yaml b/roles/openshift_hosted_templates/files/v1.4/origin/registry-console.yaml
index 80cc4233b..6811ece28 100644
--- a/roles/openshift_hosted_templates/files/v1.4/origin/registry-console.yaml
+++ b/roles/openshift_hosted_templates/files/v1.4/origin/registry-console.yaml
@@ -89,7 +89,7 @@ objects:
- annotations: null
from:
kind: DockerImage
- name: ${IMAGE_NAME}
+ name: ${IMAGE_NAME}:${IMAGE_VERSION}
name: ${IMAGE_VERSION}
- kind: OAuthClient
apiVersion: v1
diff --git a/roles/openshift_hosted_templates/files/v1.5/enterprise/registry-console.yaml b/roles/openshift_hosted_templates/files/v1.5/enterprise/registry-console.yaml
index 28feac4e6..dace26793 100644
--- a/roles/openshift_hosted_templates/files/v1.5/enterprise/registry-console.yaml
+++ b/roles/openshift_hosted_templates/files/v1.5/enterprise/registry-console.yaml
@@ -89,7 +89,7 @@ objects:
- annotations: null
from:
kind: DockerImage
- name: ${IMAGE_PREFIX}registry-console
+ name: ${IMAGE_PREFIX}registry-console:${IMAGE_VERSION}
name: ${IMAGE_VERSION}
- kind: OAuthClient
apiVersion: v1
diff --git a/roles/openshift_hosted_templates/files/v1.5/origin/registry-console.yaml b/roles/openshift_hosted_templates/files/v1.5/origin/registry-console.yaml
index 80cc4233b..6811ece28 100644
--- a/roles/openshift_hosted_templates/files/v1.5/origin/registry-console.yaml
+++ b/roles/openshift_hosted_templates/files/v1.5/origin/registry-console.yaml
@@ -89,7 +89,7 @@ objects:
- annotations: null
from:
kind: DockerImage
- name: ${IMAGE_NAME}
+ name: ${IMAGE_NAME}:${IMAGE_VERSION}
name: ${IMAGE_VERSION}
- kind: OAuthClient
apiVersion: v1
diff --git a/roles/openshift_hosted_templates/files/v3.6/enterprise/registry-console.yaml b/roles/openshift_hosted_templates/files/v3.6/enterprise/registry-console.yaml
index 8bf98ba41..f821efd6b 100644
--- a/roles/openshift_hosted_templates/files/v3.6/enterprise/registry-console.yaml
+++ b/roles/openshift_hosted_templates/files/v3.6/enterprise/registry-console.yaml
@@ -89,7 +89,7 @@ objects:
- annotations: null
from:
kind: DockerImage
- name: ${IMAGE_PREFIX}registry-console
+ name: ${IMAGE_PREFIX}registry-console:${IMAGE_VERSION}
name: ${IMAGE_VERSION}
- kind: OAuthClient
apiVersion: v1
diff --git a/roles/openshift_hosted_templates/files/v3.6/origin/registry-console.yaml b/roles/openshift_hosted_templates/files/v3.6/origin/registry-console.yaml
index 80cc4233b..6811ece28 100644
--- a/roles/openshift_hosted_templates/files/v3.6/origin/registry-console.yaml
+++ b/roles/openshift_hosted_templates/files/v3.6/origin/registry-console.yaml
@@ -89,7 +89,7 @@ objects:
- annotations: null
from:
kind: DockerImage
- name: ${IMAGE_NAME}
+ name: ${IMAGE_NAME}:${IMAGE_VERSION}
name: ${IMAGE_VERSION}
- kind: OAuthClient
apiVersion: v1
diff --git a/roles/openshift_hosted_templates/files/v3.7/enterprise/registry-console.yaml b/roles/openshift_hosted_templates/files/v3.7/enterprise/registry-console.yaml
index bbaf76c17..019d836fe 100644
--- a/roles/openshift_hosted_templates/files/v3.7/enterprise/registry-console.yaml
+++ b/roles/openshift_hosted_templates/files/v3.7/enterprise/registry-console.yaml
@@ -89,7 +89,7 @@ objects:
- annotations: null
from:
kind: DockerImage
- name: ${IMAGE_PREFIX}registry-console
+ name: ${IMAGE_PREFIX}registry-console:${IMAGE_VERSION}
name: ${IMAGE_VERSION}
- kind: OAuthClient
apiVersion: v1
diff --git a/roles/openshift_hosted_templates/files/v3.7/origin/registry-console.yaml b/roles/openshift_hosted_templates/files/v3.7/origin/registry-console.yaml
index 80cc4233b..6811ece28 100644
--- a/roles/openshift_hosted_templates/files/v3.7/origin/registry-console.yaml
+++ b/roles/openshift_hosted_templates/files/v3.7/origin/registry-console.yaml
@@ -89,7 +89,7 @@ objects:
- annotations: null
from:
kind: DockerImage
- name: ${IMAGE_NAME}
+ name: ${IMAGE_NAME}:${IMAGE_VERSION}
name: ${IMAGE_VERSION}
- kind: OAuthClient
apiVersion: v1
diff --git a/roles/openshift_hosted_templates/meta/main.yml b/roles/openshift_hosted_templates/meta/main.yml
index 9c12865bf..4027f524b 100644
--- a/roles/openshift_hosted_templates/meta/main.yml
+++ b/roles/openshift_hosted_templates/meta/main.yml
@@ -11,5 +11,4 @@ galaxy_info:
- 7
categories:
- cloud
-dependencies:
-- role: openshift_common
+dependencies: []
diff --git a/roles/openshift_loadbalancer/defaults/main.yml b/roles/openshift_loadbalancer/defaults/main.yml
index 41a2b12a2..239b16427 100644
--- a/roles/openshift_loadbalancer/defaults/main.yml
+++ b/roles/openshift_loadbalancer/defaults/main.yml
@@ -24,4 +24,10 @@ r_openshift_loadbalancer_os_firewall_allow:
port: "{{ openshift_master_api_port | default(8443) }}/tcp"
- service: nuage mon
port: "{{ nuage_mon_rest_server_port | default(9443) }}/tcp"
- cond: "{{ openshift_use_nuage | default(false) | bool }}"
+ cond: "{{ r_openshift_lb_use_nuage | bool }}"
+
+# NOTE
+# r_openshift_lb_use_nuage_default may be defined external to this role.
+# openshift_use_nuage, if defined, may affect other roles or play behavior.
+r_openshift_lb_use_nuage_default: "{{ openshift_use_nuage | default(False) }}"
+r_openshift_lb_use_nuage: "{{ r_openshift_lb_use_nuage_default }}"
diff --git a/roles/openshift_logging/tasks/install_logging.yaml b/roles/openshift_logging/tasks/install_logging.yaml
index a77df9986..de5e25061 100644
--- a/roles/openshift_logging/tasks/install_logging.yaml
+++ b/roles/openshift_logging/tasks/install_logging.yaml
@@ -134,6 +134,7 @@
openshift_logging_elasticsearch_pvc_pv_selector: "{{ openshift_logging_es_ops_pv_selector }}"
openshift_logging_elasticsearch_memory_limit: "{{ openshift_logging_es_ops_memory_limit }}"
openshift_logging_elasticsearch_cpu_limit: "{{ openshift_logging_es_ops_cpu_limit }}"
+ openshift_logging_elasticsearch_nodeselector: "{{ openshift_logging_es_ops_nodeselector }}"
openshift_logging_es_key: "{{ openshift_logging_es_ops_key }}"
openshift_logging_es_cert: "{{ openshift_logging_es_ops_cert }}"
openshift_logging_es_ca_ext: "{{ openshift_logging_es_ops_ca_ext }}"
@@ -165,6 +166,7 @@
openshift_logging_elasticsearch_pvc_pv_selector: "{{ openshift_logging_es_ops_pv_selector }}"
openshift_logging_elasticsearch_memory_limit: "{{ openshift_logging_es_ops_memory_limit }}"
openshift_logging_elasticsearch_cpu_limit: "{{ openshift_logging_es_ops_cpu_limit }}"
+ openshift_logging_elasticsearch_nodeselector: "{{ openshift_logging_es_ops_nodeselector }}"
openshift_logging_es_key: "{{ openshift_logging_es_ops_key }}"
openshift_logging_es_cert: "{{ openshift_logging_es_ops_cert }}"
openshift_logging_es_ca_ext: "{{ openshift_logging_es_ops_ca_ext }}"
diff --git a/roles/openshift_logging_curator/templates/curator.j2 b/roles/openshift_logging_curator/templates/curator.j2
index 6431f86d9..e74918a40 100644
--- a/roles/openshift_logging_curator/templates/curator.j2
+++ b/roles/openshift_logging_curator/templates/curator.j2
@@ -44,6 +44,8 @@ spec:
cpu: "{{curator_cpu_limit}}"
{% if curator_memory_limit is defined and curator_memory_limit is not none and curator_memory_limit != "" %}
memory: "{{curator_memory_limit}}"
+ requests:
+ memory: "{{curator_memory_limit}}"
{% endif %}
env:
-
diff --git a/roles/openshift_logging_curator/vars/main.yml b/roles/openshift_logging_curator/vars/main.yml
index 97525479e..95bf462d1 100644
--- a/roles/openshift_logging_curator/vars/main.yml
+++ b/roles/openshift_logging_curator/vars/main.yml
@@ -1,3 +1,3 @@
---
-__latest_curator_version: "3_5"
-__allowed_curator_versions: ["3_5", "3_6"]
+__latest_curator_version: "3_6"
+__allowed_curator_versions: ["3_5", "3_6", "3_7"]
diff --git a/roles/openshift_logging_elasticsearch/templates/elasticsearch.yml.j2 b/roles/openshift_logging_elasticsearch/templates/elasticsearch.yml.j2
index 0c06a7677..65b08d970 100644
--- a/roles/openshift_logging_elasticsearch/templates/elasticsearch.yml.j2
+++ b/roles/openshift_logging_elasticsearch/templates/elasticsearch.yml.j2
@@ -24,7 +24,8 @@ network:
cloud:
kubernetes:
- service: ${SERVICE_DNS}
+ pod_label: ${POD_LABEL}
+ pod_port: 9300
namespace: ${NAMESPACE}
discovery:
diff --git a/roles/openshift_logging_elasticsearch/templates/es.j2 b/roles/openshift_logging_elasticsearch/templates/es.j2
index cbe6b89f2..3c8f390c4 100644
--- a/roles/openshift_logging_elasticsearch/templates/es.j2
+++ b/roles/openshift_logging_elasticsearch/templates/es.j2
@@ -48,7 +48,7 @@ spec:
cpu: "{{es_cpu_limit}}"
{% endif %}
requests:
- memory: "512Mi"
+ memory: "{{es_memory_limit}}"
ports:
-
containerPort: 9200
@@ -90,6 +90,12 @@ spec:
name: "RECOVER_AFTER_TIME"
value: "{{openshift_logging_elasticsearch_recover_after_time}}"
-
+ name: "READINESS_PROBE_TIMEOUT"
+ value: "30"
+ -
+ name: "POD_LABEL"
+ value: "component={{component}}"
+ -
name: "IS_MASTER"
value: "{% if deploy_type in ['data-master', 'master'] %}true{% else %}false{% endif %}"
@@ -106,6 +112,13 @@ spec:
readOnly: true
- name: elasticsearch-storage
mountPath: /elasticsearch/persistent
+ readinessProbe:
+ exec:
+ command:
+ - "/usr/share/java/elasticsearch/probe/readiness.sh"
+ initialDelaySeconds: 10
+ timeoutSeconds: 30
+ periodSeconds: 5
volumes:
- name: elasticsearch
secret:
diff --git a/roles/openshift_logging_elasticsearch/vars/main.yml b/roles/openshift_logging_elasticsearch/vars/main.yml
index 20fa63543..09e2ee4d0 100644
--- a/roles/openshift_logging_elasticsearch/vars/main.yml
+++ b/roles/openshift_logging_elasticsearch/vars/main.yml
@@ -1,6 +1,6 @@
---
-__latest_es_version: "3_5"
-__allowed_es_versions: ["3_5", "3_6"]
+__latest_es_version: "3_6"
+__allowed_es_versions: ["3_5", "3_6", "3_7"]
__allowed_es_types: ["data-master", "data-client", "master", "client"]
__es_log_appenders: ['file', 'console']
__kibana_index_modes: ["unique", "shared_ops"]
diff --git a/roles/openshift_logging_fluentd/templates/fluentd.j2 b/roles/openshift_logging_fluentd/templates/fluentd.j2
index 88e039e3f..a4afb6618 100644
--- a/roles/openshift_logging_fluentd/templates/fluentd.j2
+++ b/roles/openshift_logging_fluentd/templates/fluentd.j2
@@ -36,6 +36,8 @@ spec:
limits:
cpu: {{ openshift_logging_fluentd_cpu_limit }}
memory: {{ openshift_logging_fluentd_memory_limit }}
+ requests:
+ memory: {{ openshift_logging_fluentd_memory_limit }}
volumeMounts:
- name: runlogjournal
mountPath: /run/log/journal
diff --git a/roles/openshift_logging_fluentd/vars/main.yml b/roles/openshift_logging_fluentd/vars/main.yml
index ec8e565c3..92a426952 100644
--- a/roles/openshift_logging_fluentd/vars/main.yml
+++ b/roles/openshift_logging_fluentd/vars/main.yml
@@ -1,5 +1,5 @@
---
-__latest_fluentd_version: "3_5"
-__allowed_fluentd_versions: ["3_5", "3_6"]
+__latest_fluentd_version: "3_6"
+__allowed_fluentd_versions: ["3_5", "3_6", "3_7"]
__allowed_fluentd_types: ["hosted", "secure-aggregator", "secure-host"]
__allowed_mux_client_modes: ["minimal", "maximal"]
diff --git a/roles/openshift_logging_kibana/templates/kibana.j2 b/roles/openshift_logging_kibana/templates/kibana.j2
index 512d99d06..da1386d3e 100644
--- a/roles/openshift_logging_kibana/templates/kibana.j2
+++ b/roles/openshift_logging_kibana/templates/kibana.j2
@@ -46,6 +46,8 @@ spec:
{% endif %}
{% if kibana_memory_limit is not none and kibana_memory_limit != "" %}
memory: "{{ kibana_memory_limit }}"
+ requests:
+ memory: "{{ kibana_memory_limit }}"
{% endif %}
{% endif %}
env:
@@ -82,6 +84,8 @@ spec:
{% endif %}
{% if kibana_proxy_memory_limit is not none and kibana_proxy_memory_limit != "" %}
memory: "{{ kibana_proxy_memory_limit }}"
+ requests:
+ memory: "{{ kibana_proxy_memory_limit }}"
{% endif %}
{% endif %}
ports:
diff --git a/roles/openshift_logging_kibana/vars/main.yml b/roles/openshift_logging_kibana/vars/main.yml
index 87b281c4b..241877a02 100644
--- a/roles/openshift_logging_kibana/vars/main.yml
+++ b/roles/openshift_logging_kibana/vars/main.yml
@@ -1,3 +1,3 @@
---
-__latest_kibana_version: "3_5"
-__allowed_kibana_versions: ["3_5", "3_6"]
+__latest_kibana_version: "3_6"
+__allowed_kibana_versions: ["3_5", "3_6", "3_7"]
diff --git a/roles/openshift_logging_mux/templates/mux.j2 b/roles/openshift_logging_mux/templates/mux.j2
index 70afe5cee..ff18d3270 100644
--- a/roles/openshift_logging_mux/templates/mux.j2
+++ b/roles/openshift_logging_mux/templates/mux.j2
@@ -45,6 +45,8 @@ spec:
{% endif %}
{% if mux_memory_limit is not none %}
memory: "{{mux_memory_limit}}"
+ requests:
+ memory: "{{mux_memory_limit}}"
{% endif %}
{% endif %}
ports:
diff --git a/roles/openshift_logging_mux/vars/main.yml b/roles/openshift_logging_mux/vars/main.yml
index 4234b74e2..e7b57f4b5 100644
--- a/roles/openshift_logging_mux/vars/main.yml
+++ b/roles/openshift_logging_mux/vars/main.yml
@@ -1,3 +1,3 @@
---
-__latest_mux_version: "3_5"
-__allowed_mux_versions: ["3_5", "3_6"]
+__latest_mux_version: "3_6"
+__allowed_mux_versions: ["3_5", "3_6", "3_7"]
diff --git a/roles/openshift_manageiq/vars/main.yml b/roles/openshift_manageiq/vars/main.yml
index 7ccc2fc3b..f142f89f0 100644
--- a/roles/openshift_manageiq/vars/main.yml
+++ b/roles/openshift_manageiq/vars/main.yml
@@ -3,6 +3,9 @@ manage_iq_tasks:
- resource_kind: role
resource_name: admin
user: management-admin
+- resource_kind: role
+ resource_name: admin
+ user: system:serviceaccount:management-infra:management-admin
- resource_kind: cluster-role
resource_name: management-infra-admin
user: system:serviceaccount:management-infra:management-admin
diff --git a/roles/openshift_master/README.md b/roles/openshift_master/README.md
index fbf69c270..86fa57b50 100644
--- a/roles/openshift_master/README.md
+++ b/roles/openshift_master/README.md
@@ -17,7 +17,6 @@ From this role:
| Name | Default value | |
|---------------------------------------------------|-----------------------|-------------------------------------------------------------------------------|
-| openshift_master_debug_level | openshift_debug_level | Verbosity of the debug logs for master |
| openshift_node_ips | [] | List of the openshift node ip addresses to pre-register when master starts up |
| oreg_url | UNDEF | Default docker registry to use |
| oreg_url_master | UNDEF | Default docker registry to use, specifically on the master |
@@ -29,18 +28,10 @@ From this role:
| openshift_master_public_console_url | UNDEF | |
| openshift_master_saconfig_limit_secret_references | false | |
-From openshift_common:
-
-| Name | Default Value | |
-|-------------------------------|----------------|----------------------------------------|
-| openshift_debug_level | 2 | Global openshift debug log verbosity |
-| openshift_public_ip | UNDEF | Public IP address to use for this host |
-| openshift_hostname | UNDEF | hostname to use for this instance |
Dependencies
------------
-openshift_common
Example Playbook
----------------
diff --git a/roles/openshift_master/defaults/main.yml b/roles/openshift_master/defaults/main.yml
index d70106276..4c8d6fdad 100644
--- a/roles/openshift_master/defaults/main.yml
+++ b/roles/openshift_master/defaults/main.yml
@@ -22,5 +22,24 @@ r_openshift_master_os_firewall_allow:
oreg_url: ''
oreg_host: "{{ oreg_url.split('/')[0] if '.' in oreg_url.split('/')[0] else '' }}"
-oreg_auth_credentials_path: "{{ openshift.common.data_dir }}/.docker"
+oreg_auth_credentials_path: "{{ r_openshift_master_data_dir }}/.docker"
oreg_auth_credentials_replace: False
+l_bind_docker_reg_auth: False
+
+# NOTE
+# r_openshift_master_*_default may be defined external to this role.
+# openshift_use_*, if defined, may affect other roles or play behavior.
+r_openshift_master_use_openshift_sdn_default: "{{ openshift_use_openshift_sdn | default(True) }}"
+r_openshift_master_use_openshift_sdn: "{{ r_openshift_master_use_openshift_sdn_default }}"
+
+r_openshift_master_use_nuage_default: "{{ openshift_use_nuage | default(False) }}"
+r_openshift_master_use_nuage: "{{ r_openshift_master_use_nuage_default }}"
+
+r_openshift_master_use_contiv_default: "{{ openshift_use_contiv | default(False) }}"
+r_openshift_master_use_contiv: "{{ r_openshift_master_use_contiv_default }}"
+
+r_openshift_master_data_dir_default: "{{ openshift_data_dir | default('/var/lib/origin') }}"
+r_openshift_master_data_dir: "{{ r_openshift_master_data_dir_default }}"
+
+r_openshift_master_sdn_network_plugin_name_default: "{{ os_sdn_network_plugin_name | default('redhat/openshift-ovs-subnet') }}"
+r_openshift_master_sdn_network_plugin_name: "{{ r_openshift_master_sdn_network_plugin_name_default }}"
diff --git a/roles/openshift_master/meta/main.yml b/roles/openshift_master/meta/main.yml
index bd2383f61..a657668a9 100644
--- a/roles/openshift_master/meta/main.yml
+++ b/roles/openshift_master/meta/main.yml
@@ -14,19 +14,3 @@ galaxy_info:
dependencies:
- role: lib_openshift
- role: lib_os_firewall
-- role: openshift_master_facts
-- role: openshift_hosted_facts
-- role: openshift_master_certificates
-- role: openshift_etcd_client_certificates
- etcd_cert_subdir: "openshift-master-{{ openshift.common.hostname }}"
- etcd_cert_config_dir: "{{ openshift.common.config_base }}/master"
- etcd_cert_prefix: "master.etcd-"
- when: groups.oo_etcd_to_config | default([]) | length != 0
-- role: openshift_clock
-- role: openshift_cloud_provider
-- role: openshift_builddefaults
-- role: openshift_buildoverrides
-- role: nickhammond.logrotate
-- role: contiv
- contiv_role: netmaster
- when: openshift.common.use_contiv | bool
diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml
index a06defdb9..6203bfc7b 100644
--- a/roles/openshift_master/tasks/main.yml
+++ b/roles/openshift_master/tasks/main.yml
@@ -47,9 +47,9 @@
when:
- not openshift.common.is_containerized | bool
-- name: Create openshift.common.data_dir
+- name: Create r_openshift_master_data_dir
file:
- path: "{{ openshift.common.data_dir }}"
+ path: "{{ r_openshift_master_data_dir }}"
state: directory
mode: 0755
owner: root
@@ -169,7 +169,7 @@
register: l_already_set
- set_fact:
- openshift_push_via_dns: "{{ (openshift_use_dnsmasq | default(true) and openshift.common.version_gte_3_6) or (l_already_set.stdout is defined and l_already_set.stdout | match('OPENSHIFT_DEFAULT_REGISTRY=docker-registry.default.svc:5000')) }}"
+ openshift_push_via_dns: "{{ openshift.common.version_gte_3_6 or (l_already_set.stdout is defined and l_already_set.stdout | match('OPENSHIFT_DEFAULT_REGISTRY=docker-registry.default.svc:5000')) }}"
- name: Set fact of all etcd host IPs
openshift_facts:
@@ -177,9 +177,33 @@
local_facts:
no_proxy_etcd_host_ips: "{{ openshift_no_proxy_etcd_host_ips }}"
+- include: registry_auth.yml
+
- name: Install the systemd units
include: systemd_units.yml
+- name: Checking for journald.conf
+ stat: path=/etc/systemd/journald.conf
+ register: journald_conf_file
+
+- name: Update journald setup
+ replace:
+ dest: /etc/systemd/journald.conf
+ regexp: '^(\#| )?{{ item.var }}=\s*.*?$'
+ replace: ' {{ item.var }}={{ item.val }}'
+ backup: yes
+ with_items: "{{ journald_vars_to_replace | default([]) }}"
+ when: journald_conf_file.stat.exists
+ register: journald_update
+
+# I need to restart journald immediatelly, otherwise it gets into way during
+# further steps in ansible
+- name: Restart journald
+ systemd:
+ name: systemd-journald
+ state: restarted
+ when: journald_update | changed
+
- name: Install Master system container
include: system_container.yml
when:
@@ -229,22 +253,6 @@
- restart master controllers
when: openshift_master_bootstrap_enabled | default(False)
-- name: Check for credentials file for registry auth
- stat:
- path: "{{oreg_auth_credentials_path }}"
- when:
- - oreg_auth_user is defined
- register: master_oreg_auth_credentials_stat
-
-- name: Create credentials for registry auth
- command: "docker --config={{ oreg_auth_credentials_path }} login -u {{ oreg_auth_user }} -p {{ oreg_auth_password }} {{ oreg_host }}"
- when:
- - oreg_auth_user is defined
- - (not master_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool
- notify:
- - restart master api
- - restart master controllers
-
- include: set_loopback_context.yml
when:
- openshift.common.version_gte_3_2_or_1_2
diff --git a/roles/openshift_master/tasks/registry_auth.yml b/roles/openshift_master/tasks/registry_auth.yml
new file mode 100644
index 000000000..96b6c614e
--- /dev/null
+++ b/roles/openshift_master/tasks/registry_auth.yml
@@ -0,0 +1,27 @@
+---
+- name: Check for credentials file for registry auth
+ stat:
+ path: "{{ oreg_auth_credentials_path }}"
+ when: oreg_auth_user is defined
+ register: master_oreg_auth_credentials_stat
+
+# Container images may need the registry credentials
+- name: Setup ro mount of /root/.docker for containerized hosts
+ set_fact:
+ l_bind_docker_reg_auth: True
+ when:
+ - openshift.common.is_containerized | bool
+ - oreg_auth_user is defined
+ - (not master_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool
+ notify:
+ - restart master api
+ - restart master controllers
+
+- name: Create credentials for registry auth
+ command: "docker --config={{ oreg_auth_credentials_path }} login -u {{ oreg_auth_user }} -p {{ oreg_auth_password }} {{ oreg_host }}"
+ when:
+ - oreg_auth_user is defined
+ - (not master_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool
+ notify:
+ - restart master api
+ - restart master controllers
diff --git a/roles/openshift_master/tasks/systemd_units.yml b/roles/openshift_master/tasks/systemd_units.yml
index 782a35abe..7a918c57e 100644
--- a/roles/openshift_master/tasks/systemd_units.yml
+++ b/roles/openshift_master/tasks/systemd_units.yml
@@ -7,9 +7,16 @@
# openshift_master_config_dir is set.
- name: Set openshift_master_config_dir if unset
set_fact:
- openshift_master_config_dir: '/var/lib/origin'
+ openshift_master_config_dir: '/etc/origin/master'
when: openshift_master_config_dir is not defined
+# This play may be consumed outside the role, we need to ensure that
+# r_openshift_master_data_dir is set.
+- name: Set r_openshift_master_data_dir if unset
+ set_fact:
+ r_openshift_master_data_dir: "{{ openshift_data_dir | default('/var/lib/origin') }}"
+ when: r_openshift_master_data_dir is not defined
+
- name: Remove the legacy master service if it exists
include: clean_systemd_units.yml
diff --git a/roles/openshift_master/tasks/update_etcd_client_urls.yml b/roles/openshift_master/tasks/update_etcd_client_urls.yml
new file mode 100644
index 000000000..1ab105808
--- /dev/null
+++ b/roles/openshift_master/tasks/update_etcd_client_urls.yml
@@ -0,0 +1,8 @@
+---
+- yedit:
+ src: "{{ openshift.common.config_base }}/master/master-config.yaml"
+ key: 'etcdClientInfo.urls'
+ value: "{{ openshift.master.etcd_urls }}"
+ notify:
+ - restart master api
+ - restart master controllers
diff --git a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2 b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2
index e8f7c47b0..a184a59f6 100644
--- a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2
+++ b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2
@@ -12,12 +12,22 @@ Requires={{ openshift.docker.service_name }}.service
EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-master-api
Environment=GOTRACEBACK=crash
ExecStartPre=-/usr/bin/docker rm -f {{ openshift.common.service_type}}-master-api
-ExecStart=/usr/bin/docker run --rm --privileged --net=host --name {{ openshift.common.service_type }}-master-api --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-master-api -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }} -v /var/log:/var/log -v /var/run/docker.sock:/var/run/docker.sock -v {{ openshift.common.config_base }}:{{ openshift.common.config_base }} {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} -v /etc/pki:/etc/pki:ro {{ openshift.master.master_image }}:${IMAGE_VERSION} start master api --config=${CONFIG_FILE} $OPTIONS
+ExecStart=/usr/bin/docker run --rm --privileged --net=host \
+ --name {{ openshift.common.service_type }}-master-api \
+ --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-master-api \
+ -v {{ r_openshift_master_data_dir }}:{{ r_openshift_master_data_dir }} \
+ -v /var/log:/var/log -v /var/run/docker.sock:/var/run/docker.sock \
+ -v {{ openshift.common.config_base }}:{{ openshift.common.config_base }} \
+ {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} \
+ -v /etc/pki:/etc/pki:ro \
+ {% if l_bind_docker_reg_auth %} -v {{ oreg_auth_credentials_path }}:/root/.docker:ro{% endif %}\
+ {{ openshift.master.master_image }}:${IMAGE_VERSION} start master api \
+ --config=${CONFIG_FILE} $OPTIONS
ExecStartPost=/usr/bin/sleep 10
ExecStop=/usr/bin/docker stop {{ openshift.common.service_type }}-master-api
LimitNOFILE=131072
LimitCORE=infinity
-WorkingDirectory={{ openshift.common.data_dir }}
+WorkingDirectory={{ r_openshift_master_data_dir }}
SyslogIdentifier={{ openshift.common.service_type }}-master-api
Restart=always
RestartSec=5s
diff --git a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2 b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2
index 69db62f16..2ded05f53 100644
--- a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2
+++ b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2
@@ -11,12 +11,22 @@ PartOf={{ openshift.docker.service_name }}.service
EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-master-controllers
Environment=GOTRACEBACK=crash
ExecStartPre=-/usr/bin/docker rm -f {{ openshift.common.service_type}}-master-controllers
-ExecStart=/usr/bin/docker run --rm --privileged --net=host --name {{ openshift.common.service_type }}-master-controllers --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-master-controllers -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }} -v /var/run/docker.sock:/var/run/docker.sock -v {{ openshift.common.config_base }}:{{ openshift.common.config_base }} {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} -v /etc/pki:/etc/pki:ro {{ openshift.master.master_image }}:${IMAGE_VERSION} start master controllers --config=${CONFIG_FILE} $OPTIONS
+ExecStart=/usr/bin/docker run --rm --privileged --net=host \
+ --name {{ openshift.common.service_type }}-master-controllers \
+ --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-master-controllers \
+ -v {{ r_openshift_master_data_dir }}:{{ r_openshift_master_data_dir }} \
+ -v /var/run/docker.sock:/var/run/docker.sock \
+ -v {{ openshift.common.config_base }}:{{ openshift.common.config_base }} \
+ {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} \
+ -v /etc/pki:/etc/pki:ro \
+ {% if l_bind_docker_reg_auth %} -v {{ oreg_auth_credentials_path }}:/root/.docker:ro{% endif %}\
+ {{ openshift.master.master_image }}:${IMAGE_VERSION} start master controllers \
+ --config=${CONFIG_FILE} $OPTIONS
ExecStartPost=/usr/bin/sleep 10
ExecStop=/usr/bin/docker stop {{ openshift.common.service_type }}-master-controllers
LimitNOFILE=131072
LimitCORE=infinity
-WorkingDirectory={{ openshift.common.data_dir }}
+WorkingDirectory={{ r_openshift_master_data_dir }}
SyslogIdentifier={{ openshift.common.service_type }}-master-controllers
Restart=always
RestartSec=5s
diff --git a/roles/openshift_master/templates/master.yaml.v1.j2 b/roles/openshift_master/templates/master.yaml.v1.j2
index c14579435..d045b402b 100644
--- a/roles/openshift_master/templates/master.yaml.v1.j2
+++ b/roles/openshift_master/templates/master.yaml.v1.j2
@@ -106,7 +106,7 @@ etcdConfig:
clientCA: ca.crt
{% endif %}
keyFile: etcd.server.key
- storageDirectory: {{ openshift.common.data_dir }}/openshift.local.etcd
+ storageDirectory: {{ r_openshift_master_data_dir }}/openshift.local.etcd
{% endif %}
etcdStorageConfig:
kubernetesStoragePrefix: kubernetes.io
@@ -179,8 +179,8 @@ masterPublicURL: {{ openshift.master.public_api_url }}
networkConfig:
clusterNetworkCIDR: {{ openshift.master.sdn_cluster_network_cidr }}
hostSubnetLength: {{ openshift.master.sdn_host_subnet_length }}
-{% if openshift.common.use_openshift_sdn or openshift.common.use_nuage or openshift.common.use_contiv or openshift.common.sdn_network_plugin_name == 'cni' %}
- networkPluginName: {{ openshift.common.sdn_network_plugin_name }}
+{% if r_openshift_master_use_openshift_sdn or r_openshift_master_use_nuage or r_openshift_master_use_contiv or r_openshift_master_sdn_network_plugin_name == 'cni' %}
+ networkPluginName: {{ r_openshift_master_sdn_network_plugin_name_default }}
{% endif %}
# serviceNetworkCIDR must match kubernetesMasterConfig.servicesSubnet
serviceNetworkCIDR: {{ openshift.common.portal_net }}
diff --git a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.service.j2 b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.service.j2
index 0e78d2d23..02bfd6f62 100644
--- a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.service.j2
+++ b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.service.j2
@@ -13,7 +13,7 @@ Environment=GOTRACEBACK=crash
ExecStart=/usr/bin/openshift start master api --config=${CONFIG_FILE} $OPTIONS
LimitNOFILE=131072
LimitCORE=infinity
-WorkingDirectory={{ openshift.common.data_dir }}
+WorkingDirectory={{ r_openshift_master_data_dir }}
SyslogIdentifier=atomic-openshift-master-api
Restart=always
RestartSec=5s
diff --git a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.service.j2 b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.service.j2
index 94928f88c..e284413f7 100644
--- a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.service.j2
+++ b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.service.j2
@@ -17,7 +17,7 @@ Environment=GOTRACEBACK=crash
ExecStart=/usr/bin/openshift start master controllers --config=${CONFIG_FILE} $OPTIONS
LimitNOFILE=131072
LimitCORE=infinity
-WorkingDirectory={{ openshift.common.data_dir }}
+WorkingDirectory={{ r_openshift_master_data_dir }}
SyslogIdentifier={{ openshift.common.service_type }}-master-controllers
Restart=always
RestartSec=5s
diff --git a/roles/openshift_master/vars/main.yml b/roles/openshift_master/vars/main.yml
index cf39b73f6..0c681c764 100644
--- a/roles/openshift_master/vars/main.yml
+++ b/roles/openshift_master/vars/main.yml
@@ -20,3 +20,22 @@ openshift_master_valid_grant_methods:
- deny
openshift_master_is_scaleup_host: False
+
+# These defaults assume forcing journald persistence, fsync to disk once
+# a second, rate-limiting to 10,000 logs a second, no forwarding to
+# syslog or wall, using 8GB of disk space maximum, using 10MB journal
+# files, keeping only a days worth of logs per journal file, and
+# retaining journal files no longer than a month.
+journald_vars_to_replace:
+- { var: Storage, val: persistent }
+- { var: Compress, val: yes }
+- { var: SyncIntervalSec, val: 1s }
+- { var: RateLimitInterval, val: 1s }
+- { var: RateLimitBurst, val: 10000 }
+- { var: SystemMaxUse, val: 8G }
+- { var: SystemKeepFree, val: 20% }
+- { var: SystemMaxFileSize, val: 10M }
+- { var: MaxRetentionSec, val: 1month }
+- { var: MaxFileSec, val: 1day }
+- { var: ForwardToSyslog, val: no }
+- { var: ForwardToWall, val: no }
diff --git a/roles/openshift_master_facts/filter_plugins/openshift_master.py b/roles/openshift_master_facts/filter_plugins/openshift_master.py
index e767772ce..56c864ec7 100644
--- a/roles/openshift_master_facts/filter_plugins/openshift_master.py
+++ b/roles/openshift_master_facts/filter_plugins/openshift_master.py
@@ -380,11 +380,6 @@ class OpenIDIdentityProvider(IdentityProviderOauthBase):
if 'extra_authorize_parameters' in self._idp:
self._idp['extraAuthorizeParameters'] = self._idp.pop('extra_authorize_parameters')
- if 'extraAuthorizeParameters' in self._idp:
- if 'include_granted_scopes' in self._idp['extraAuthorizeParameters']:
- val = ansible_bool(self._idp['extraAuthorizeParameters'].pop('include_granted_scopes'))
- self._idp['extraAuthorizeParameters']['include_granted_scopes'] = val
-
def validate(self):
''' validate this idp instance '''
IdentityProviderOauthBase.validate(self)
diff --git a/roles/openshift_metrics/tasks/pre_install.yaml b/roles/openshift_metrics/tasks/pre_install.yaml
index 2e2013d40..d6756f9b9 100644
--- a/roles/openshift_metrics/tasks/pre_install.yaml
+++ b/roles/openshift_metrics/tasks/pre_install.yaml
@@ -10,7 +10,7 @@
is invalid, must be one of: emptydir, pv, dynamic
when:
- openshift_metrics_cassandra_storage_type not in openshift_metrics_cassandra_storage_types
- - "not {{ openshift_metrics_heapster_standalone | bool }}"
+ - not (openshift_metrics_heapster_standalone | bool)
- name: list existing secrets
command: >
diff --git a/roles/openshift_node/README.md b/roles/openshift_node/README.md
index fb0b494da..32670b18e 100644
--- a/roles/openshift_node/README.md
+++ b/roles/openshift_node/README.md
@@ -17,22 +17,12 @@ From this role:
| Name | Default value | |
|----------------------------|-----------------------|----------------------------------------------------------|
-| openshift_node_debug_level | openshift_debug_level | Verbosity of the debug logs for node |
| oreg_url | UNDEF (Optional) | Default docker registry to use |
| oreg_url_node | UNDEF (Optional) | Default docker registry to use, specifically on the node |
-From openshift_common:
-
-| Name | Default Value | |
-|-------------------------------|---------------------|---------------------|
-| openshift_debug_level | 2 | Global openshift debug log verbosity |
-| openshift_public_ip | UNDEF (Required) | Public IP address to use for this host |
-| openshift_hostname | UNDEF (Required) | hostname to use for this instance |
-
Dependencies
------------
-openshift_common
Example Playbook
----------------
diff --git a/roles/openshift_node/defaults/main.yml b/roles/openshift_node/defaults/main.yml
index a7dad5b1f..433e92201 100644
--- a/roles/openshift_node/defaults/main.yml
+++ b/roles/openshift_node/defaults/main.yml
@@ -60,7 +60,7 @@ openshift_deployment_type: origin
openshift_node_bootstrap: False
r_openshift_node_os_firewall_deny: []
-r_openshift_node_os_firewall_allow:
+default_r_openshift_node_os_firewall_allow:
- service: Kubernetes kubelet
port: 10250/tcp
- service: http
@@ -69,18 +69,42 @@ r_openshift_node_os_firewall_allow:
port: 443/tcp
- service: OpenShift OVS sdn
port: 4789/udp
- cond: openshift.common.use_openshift_sdn | default(true) | bool
+ cond: openshift_use_openshift_sdn | bool
- service: Calico BGP Port
port: 179/tcp
- cond: "{{ openshift.common.use_calico | bool }}"
+ cond: "{{ openshift_node_use_calico }}"
- service: Kubernetes service NodePort TCP
port: "{{ openshift_node_port_range | default('') }}/tcp"
cond: "{{ openshift_node_port_range is defined }}"
- service: Kubernetes service NodePort UDP
port: "{{ openshift_node_port_range | default('') }}/udp"
cond: "{{ openshift_node_port_range is defined }}"
+# Allow multiple port ranges to be added to the role
+r_openshift_node_os_firewall_allow: "{{ default_r_openshift_node_os_firewall_allow | union(openshift_node_open_ports | default([])) }}"
oreg_url: ''
oreg_host: "{{ oreg_url.split('/')[0] if '.' in oreg_url.split('/')[0] else '' }}"
-oreg_auth_credentials_path: "{{ openshift.common.data_dir }}/.docker"
+oreg_auth_credentials_path: "{{ openshift_node_data_dir }}/.docker"
oreg_auth_credentials_replace: False
+l_bind_docker_reg_auth: False
+
+# NOTE
+# r_openshift_node_*_default may be defined external to this role.
+# openshift_use_*, if defined, may affect other roles or play behavior.
+openshift_node_use_openshift_sdn_default: "{{ openshift_use_openshift_sdn | default(True) }}"
+openshift_node_use_openshift_sdn: "{{ openshift_node_use_openshift_sdn_default }}"
+
+openshift_node_sdn_network_plugin_name_default: "{{ os_sdn_network_plugin_name | default('redhat/openshift-ovs-subnet') }}"
+openshift_node_sdn_network_plugin_name: "{{ openshift_node_sdn_network_plugin_name_default }}"
+
+openshift_node_use_calico_default: "{{ openshift_use_calico | default(False) }}"
+openshift_node_use_calico: "{{ openshift_node_use_calico_default }}"
+
+openshift_node_use_nuage_default: "{{ openshift_use_nuage | default(False) }}"
+openshift_node_use_nuage: "{{ openshift_node_use_nuage_default }}"
+
+openshift_node_use_contiv_default: "{{ openshift_use_contiv | default(False) }}"
+openshift_node_use_contiv: "{{ openshift_node_use_contiv_default }}"
+
+openshift_node_data_dir_default: "{{ openshift_data_dir | default('/var/lib/origin') }}"
+openshift_node_data_dir: "{{ openshift_node_data_dir_default }}"
diff --git a/roles/openshift_node/handlers/main.yml b/roles/openshift_node/handlers/main.yml
index 14ba48aba..25a6fc721 100644
--- a/roles/openshift_node/handlers/main.yml
+++ b/roles/openshift_node/handlers/main.yml
@@ -3,7 +3,7 @@
systemd:
name: openvswitch
state: restarted
- when: (not skip_node_svc_handlers | default(False) | bool) and not (ovs_service_status_changed | default(false) | bool) and openshift.common.use_openshift_sdn | default(true) | bool
+ when: (not skip_node_svc_handlers | default(False) | bool) and not (ovs_service_status_changed | default(false) | bool) and openshift_node_use_openshift_sdn | bool
register: l_openshift_node_stop_openvswitch_result
until: not l_openshift_node_stop_openvswitch_result | failed
retries: 3
@@ -29,8 +29,5 @@
- not (node_service_status_changed | default(false) | bool)
- not openshift_node_bootstrap
-- name: reload sysctl.conf
- command: /sbin/sysctl -p
-
- name: reload systemd units
command: systemctl daemon-reload
diff --git a/roles/openshift_node/meta/main.yml b/roles/openshift_node/meta/main.yml
index 3db980514..ce5ecb9d0 100644
--- a/roles/openshift_node/meta/main.yml
+++ b/roles/openshift_node/meta/main.yml
@@ -15,11 +15,9 @@ dependencies:
- role: openshift_node_facts
- role: lib_openshift
- role: lib_os_firewall
-- role: openshift_common
- role: openshift_clock
- role: openshift_docker
- role: openshift_node_certificates
when: not openshift_node_bootstrap
- role: openshift_cloud_provider
- role: openshift_node_dnsmasq
- when: openshift.common.use_dnsmasq | bool
diff --git a/roles/openshift_node/tasks/bootstrap.yml b/roles/openshift_node/tasks/bootstrap.yml
index cb1440283..b83b2c452 100644
--- a/roles/openshift_node/tasks/bootstrap.yml
+++ b/roles/openshift_node/tasks/bootstrap.yml
@@ -42,14 +42,25 @@
path: /etc/origin/.config_managed
register: rpmgenerated_config
-- name: Remove RPM generated config files if present
- file:
- path: "/etc/origin/{{ item }}"
- state: absent
- when:
- - rpmgenerated_config.stat.exists
- - openshift_deployment_type in ['openshift-enterprise', 'atomic-enterprise']
- with_items:
- - master
- - node
- - .config_managed
+- when: rpmgenerated_config.stat.exists
+ block:
+ - name: Remove RPM generated config files if present
+ file:
+ path: "/etc/origin/{{ item }}"
+ state: absent
+ with_items:
+ - master
+
+ # with_fileglob doesn't work correctly due to a few issues.
+ # Could change this to fileglob when it gets fixed.
+ - name: find all files in /etc/origin/node so we can remove them
+ find:
+ path: /etc/origin/node/
+ register: find_results
+
+ - name: Remove everything except the resolv.conf required for node
+ file:
+ path: "{{ item.path }}"
+ state: absent
+ when: "'resolv.conf' not in item.path or 'node-dnsmasq.conf' not in item.path"
+ with_items: "{{ find_results.files }}"
diff --git a/roles/openshift_node/tasks/config.yml b/roles/openshift_node/tasks/config.yml
index 8210fd881..2759188f3 100644
--- a/roles/openshift_node/tasks/config.yml
+++ b/roles/openshift_node/tasks/config.yml
@@ -2,18 +2,6 @@
- name: Install the systemd units
include: systemd_units.yml
-- name: Check for tuned package
- command: rpm -q tuned
- args:
- warn: no
- register: tuned_installed
- changed_when: false
- failed_when: false
-
-- name: Set atomic-guest tuned profile
- command: "tuned-adm profile atomic-guest"
- when: tuned_installed.rc == 0 and openshift.common.is_atomic | bool
-
- name: Start and enable openvswitch service
systemd:
name: openvswitch.service
@@ -22,7 +10,7 @@
daemon_reload: yes
when:
- openshift.common.is_containerized | bool
- - openshift.common.use_openshift_sdn | default(true) | bool
+ - openshift_node_use_openshift_sdn | default(true) | bool
register: ovs_start_result
until: not ovs_start_result | failed
retries: 3
@@ -107,5 +95,9 @@
msg: Node failed to start please inspect the logs and try again
when: node_start_result | failed
+- name: Setup tuned
+ include: tuned.yml
+ static: yes
+
- set_fact:
node_service_status_changed: "{{ node_start_result | changed }}"
diff --git a/roles/openshift_node/tasks/install.yml b/roles/openshift_node/tasks/install.yml
index 9bf4ed879..265bf2c46 100644
--- a/roles/openshift_node/tasks/install.yml
+++ b/roles/openshift_node/tasks/install.yml
@@ -1,11 +1,9 @@
---
-# We have to add tuned-profiles in the same transaction otherwise we run into depsolving
-# problems because the rpms don't pin the version properly. This was fixed in 3.1 packaging.
- when: not openshift.common.is_containerized | bool
block:
- name: Install Node package
package:
- name: "{{ openshift.common.service_type }}-node{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }},tuned-profiles-{{ openshift.common.service_type }}-node{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }}"
+ name: "{{ openshift.common.service_type }}-node{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }}"
state: present
- name: Install sdn-ovs package
@@ -13,7 +11,7 @@
name: "{{ openshift.common.service_type }}-sdn-ovs{{ openshift_pkg_version | oo_image_tag_to_rpm_version(include_dash=True) }}"
state: present
when:
- - openshift.common.use_openshift_sdn | default(true) | bool
+ - openshift_node_use_openshift_sdn | bool
- name: Install conntrack-tools package
package:
diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml
index 60a25dcc6..ef79b6ac0 100644
--- a/roles/openshift_node/tasks/main.yml
+++ b/roles/openshift_node/tasks/main.yml
@@ -49,41 +49,32 @@
state: restarted
when: openshift_use_crio | default(false)
+- name: restart NetworkManager to ensure resolv.conf is present
+ systemd:
+ name: NetworkManager
+ enabled: yes
+ state: restarted
+ when: openshift_node_bootstrap | bool
+
# The atomic-openshift-node service will set this parameter on
# startup, but if the network service is restarted this setting is
# lost. Reference: https://bugzilla.redhat.com/show_bug.cgi?id=1372388
-#
-# Use lineinfile w/ a handler for this task until
-# https://github.com/ansible/ansible/pull/24277 is included in an
-# ansible release and we can use the sysctl module.
-- name: Persist net.ipv4.ip_forward sysctl entry
- lineinfile: dest=/etc/sysctl.conf regexp='^net.ipv4.ip_forward' line='net.ipv4.ip_forward=1'
- notify:
- - reload sysctl.conf
+- sysctl:
+ name: net.ipv4.ip_forward
+ value: 1
+ sysctl_file: "/etc/sysctl.d/99-openshift.conf"
+ reload: yes
- name: include bootstrap node config
include: bootstrap.yml
when: openshift_node_bootstrap
+- include: registry_auth.yml
+
- name: include standard node config
include: config.yml
when: not openshift_node_bootstrap
-- name: Check for credentials file for registry auth
- stat:
- path: "{{oreg_auth_credentials_path }}"
- when:
- - oreg_auth_user is defined
- register: node_oreg_auth_credentials_stat
-
-- name: Create credentials for registry auth
- command: "docker --config={{ oreg_auth_credentials_path }} login -u {{ oreg_auth_user }} -p {{ oreg_auth_password }} {{ oreg_host }}"
- when:
- - oreg_auth_user is defined
- - (not node_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool
- notify:
- - restart node
-
- name: Configure AWS Cloud Provider Settings
lineinfile:
dest: /etc/sysconfig/{{ openshift.common.service_type }}-node
@@ -121,4 +112,4 @@
##### END Storage #####
- include: config/workaround-bz1331590-ovs-oom-fix.yml
- when: openshift.common.use_openshift_sdn | default(true) | bool
+ when: openshift_node_use_openshift_sdn | default(true) | bool
diff --git a/roles/openshift_node/tasks/registry_auth.yml b/roles/openshift_node/tasks/registry_auth.yml
new file mode 100644
index 000000000..f370bb260
--- /dev/null
+++ b/roles/openshift_node/tasks/registry_auth.yml
@@ -0,0 +1,25 @@
+---
+- name: Check for credentials file for registry auth
+ stat:
+ path: "{{ oreg_auth_credentials_path }}"
+ when: oreg_auth_user is defined
+ register: node_oreg_auth_credentials_stat
+
+# Container images may need the registry credentials
+- name: Setup ro mount of /root/.docker for containerized hosts
+ set_fact:
+ l_bind_docker_reg_auth: True
+ when:
+ - openshift.common.is_containerized | bool
+ - oreg_auth_user is defined
+ - (not node_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool
+ notify:
+ - restart node
+
+- name: Create credentials for registry auth
+ command: "docker --config={{ oreg_auth_credentials_path }} login -u {{ oreg_auth_user }} -p {{ oreg_auth_password }} {{ oreg_host }}"
+ when:
+ - oreg_auth_user is defined
+ - (not node_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool
+ notify:
+ - restart node
diff --git a/roles/openshift_node/tasks/systemd_units.yml b/roles/openshift_node/tasks/systemd_units.yml
index 4687400cd..6b4490f61 100644
--- a/roles/openshift_node/tasks/systemd_units.yml
+++ b/roles/openshift_node/tasks/systemd_units.yml
@@ -26,7 +26,7 @@
- name: Install OpenvSwitch system containers
include: openvswitch_system_container.yml
when:
- - openshift.common.use_openshift_sdn | default(true) | bool
+ - openshift_node_use_openshift_sdn | bool
- openshift.common.is_openvswitch_system_container | bool
- block:
@@ -39,7 +39,7 @@
- include: config/install-ovs-docker-service-file.yml
when:
- openshift.common.is_containerized | bool
- - openshift.common.use_openshift_sdn | default(true) | bool
+ - openshift_node_use_openshift_sdn | bool
- not openshift.common.is_openvswitch_system_container | bool
- include: config/configure-node-settings.yml
diff --git a/roles/openshift_node/templates/node.yaml.v1.j2 b/roles/openshift_node/templates/node.yaml.v1.j2
index 711afcadb..7049f7189 100644
--- a/roles/openshift_node/templates/node.yaml.v1.j2
+++ b/roles/openshift_node/templates/node.yaml.v1.j2
@@ -21,8 +21,6 @@ kubeletArguments: {{ openshift.node.kubelet_args | default(None) | to_padded_yam
- remote
container-runtime-endpoint:
- /var/run/crio.sock
- experimental-cri:
- - 'true'
image-service-endpoint:
- /var/run/crio.sock
node-labels:
@@ -39,15 +37,15 @@ masterClientConnectionOverrides:
qps: 100
{% endif %}
masterKubeConfig: system:node:{{ openshift.common.hostname }}.kubeconfig
-{% if openshift.common.use_openshift_sdn | bool %}
-networkPluginName: {{ openshift.common.sdn_network_plugin_name }}
+{% if openshift_node_use_openshift_sdn | bool %}
+networkPluginName: {{ openshift_node_sdn_network_plugin_name }}
{% endif %}
# networkConfig struct introduced in origin 1.0.6 and OSE 3.0.2 which
# deprecates networkPluginName above. The two should match.
networkConfig:
mtu: {{ openshift.node.sdn_mtu }}
-{% if openshift.common.use_openshift_sdn | bool or openshift.common.use_nuage | bool or openshift.common.use_contiv | bool or openshift.common.sdn_network_plugin_name == 'cni' %}
- networkPluginName: {{ openshift.common.sdn_network_plugin_name }}
+{% if openshift_node_use_openshift_sdn | bool or openshift_node_use_nuage | bool or openshift_node_use_contiv | bool or openshift_node_sdn_network_plugin_name == 'cni' %}
+ networkPluginName: {{ openshift_node_sdn_network_plugin_name }}
{% endif %}
{% if openshift.node.set_node_ip | bool %}
nodeIP: {{ openshift.common.ip }}
@@ -68,7 +66,7 @@ servingInfo:
- {{ cipher_suite }}
{% endfor %}
{% endif %}
-volumeDirectory: {{ openshift.common.data_dir }}/openshift.local.volumes
+volumeDirectory: {{ openshift_node_data_dir }}/openshift.local.volumes
proxyArguments:
proxy-mode:
- {{ openshift.node.proxy_mode }}
diff --git a/roles/openshift_node/templates/openshift.docker.node.service b/roles/openshift_node/templates/openshift.docker.node.service
index 639b6f6c8..4ab10b95f 100644
--- a/roles/openshift_node/templates/openshift.docker.node.service
+++ b/roles/openshift_node/templates/openshift.docker.node.service
@@ -4,7 +4,7 @@ After={{ openshift.docker.service_name }}.service
After=openvswitch.service
PartOf={{ openshift.docker.service_name }}.service
Requires={{ openshift.docker.service_name }}.service
-{% if openshift.common.use_openshift_sdn %}
+{% if openshift_node_use_openshift_sdn %}
Wants=openvswitch.service
After=ovsdb-server.service
After=ovs-vswitchd.service
@@ -21,7 +21,22 @@ EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-node-dep
ExecStartPre=-/usr/bin/docker rm -f {{ openshift.common.service_type }}-node
ExecStartPre=/usr/bin/cp /etc/origin/node/node-dnsmasq.conf /etc/dnsmasq.d/
ExecStartPre=/usr/bin/dbus-send --system --dest=uk.org.thekelleys.dnsmasq /uk/org/thekelleys/dnsmasq uk.org.thekelleys.SetDomainServers array:string:/in-addr.arpa/127.0.0.1,/{{ openshift.common.dns_domain }}/127.0.0.1
-ExecStart=/usr/bin/docker run --name {{ openshift.common.service_type }}-node --rm --privileged --net=host --pid=host --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-node -v /:/rootfs:ro,rslave -e CONFIG_FILE=${CONFIG_FILE} -e OPTIONS=${OPTIONS} -e HOST=/rootfs -e HOST_ETC=/host-etc -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }}{{ ':rslave' if openshift.docker.gte_1_10 | default(False) | bool else '' }} -v {{ openshift.common.config_base }}/node:{{ openshift.common.config_base }}/node {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} -v /etc/localtime:/etc/localtime:ro -v /etc/machine-id:/etc/machine-id:ro -v /run:/run -v /sys:/sys:rw -v /sys/fs/cgroup:/sys/fs/cgroup:rw -v /usr/bin/docker:/usr/bin/docker:ro -v /var/lib/docker:/var/lib/docker -v /lib/modules:/lib/modules -v /etc/origin/openvswitch:/etc/openvswitch -v /etc/origin/sdn:/etc/openshift-sdn -v /var/lib/cni:/var/lib/cni -v /etc/systemd/system:/host-etc/systemd/system -v /var/log:/var/log -v /dev:/dev $DOCKER_ADDTL_BIND_MOUNTS -v /etc/pki:/etc/pki:ro {{ openshift.node.node_image }}:${IMAGE_VERSION}
+ExecStart=/usr/bin/docker run --name {{ openshift.common.service_type }}-node \
+ --rm --privileged --net=host --pid=host --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-node \
+ -v /:/rootfs:ro,rslave -e CONFIG_FILE=${CONFIG_FILE} -e OPTIONS=${OPTIONS} \
+ -e HOST=/rootfs -e HOST_ETC=/host-etc \
+ -v {{ openshift_node_data_dir }}:{{ openshift_node_data_dir }}{{ ':rslave' if openshift.docker.gte_1_10 | default(False) | bool else '' }} \
+ -v {{ openshift.common.config_base }}/node:{{ openshift.common.config_base }}/node \
+ {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} \
+ -v /etc/localtime:/etc/localtime:ro -v /etc/machine-id:/etc/machine-id:ro \
+ -v /run:/run -v /sys:/sys:rw -v /sys/fs/cgroup:/sys/fs/cgroup:rw \
+ -v /usr/bin/docker:/usr/bin/docker:ro -v /var/lib/docker:/var/lib/docker \
+ -v /lib/modules:/lib/modules -v /etc/origin/openvswitch:/etc/openvswitch \
+ -v /etc/origin/sdn:/etc/openshift-sdn -v /var/lib/cni:/var/lib/cni \
+ -v /etc/systemd/system:/host-etc/systemd/system -v /var/log:/var/log \
+ -v /dev:/dev $DOCKER_ADDTL_BIND_MOUNTS -v /etc/pki:/etc/pki:ro \
+ {% if l_bind_docker_reg_auth %} -v {{ oreg_auth_credentials_path }}:/root/.docker:ro{% endif %}\
+ {{ openshift.node.node_image }}:${IMAGE_VERSION}
ExecStartPost=/usr/bin/sleep 10
ExecStop=/usr/bin/docker stop {{ openshift.common.service_type }}-node
ExecStopPost=/usr/bin/rm /etc/dnsmasq.d/node-dnsmasq.conf
diff --git a/roles/openshift_node_dnsmasq/meta/main.yml b/roles/openshift_node_dnsmasq/meta/main.yml
index 84035b88c..d80ed1b72 100644
--- a/roles/openshift_node_dnsmasq/meta/main.yml
+++ b/roles/openshift_node_dnsmasq/meta/main.yml
@@ -12,5 +12,4 @@ galaxy_info:
categories:
- cloud
dependencies:
-- role: openshift_common
- role: openshift_node_facts
diff --git a/roles/openshift_node_upgrade/README.md b/roles/openshift_node_upgrade/README.md
index 4e6229bfb..5ad994df9 100644
--- a/roles/openshift_node_upgrade/README.md
+++ b/roles/openshift_node_upgrade/README.md
@@ -32,14 +32,12 @@ From openshift.common:
| Name | Default Value | |
|------------------------------------|---------------------|---------------------|
| openshift.common.config_base |---------------------|---------------------|
-| openshift.common.data_dir |---------------------|---------------------|
| openshift.common.hostname |---------------------|---------------------|
| openshift.common.http_proxy |---------------------|---------------------|
| openshift.common.is_atomic |---------------------|---------------------|
| openshift.common.is_containerized |---------------------|---------------------|
| openshift.common.portal_net |---------------------|---------------------|
| openshift.common.service_type |---------------------|---------------------|
-| openshift.common.use_openshift_sdn |---------------------|---------------------|
From openshift.master:
@@ -58,7 +56,7 @@ From openshift.node:
Dependencies
------------
-openshift_common
+
TODO
diff --git a/roles/openshift_node_upgrade/defaults/main.yml b/roles/openshift_node_upgrade/defaults/main.yml
index ed97d539c..3d8704308 100644
--- a/roles/openshift_node_upgrade/defaults/main.yml
+++ b/roles/openshift_node_upgrade/defaults/main.yml
@@ -1 +1,6 @@
---
+openshift_use_openshift_sdn: True
+os_sdn_network_plugin_name: "redhat/openshift-ovs-subnet"
+
+openshift_node_data_dir_default: "{{ openshift_data_dir | default('/var/lib/origin') }}"
+openshift_node_data_dir: "{{ openshift_node_data_dir_default }}"
diff --git a/roles/openshift_node_upgrade/handlers/main.yml b/roles/openshift_node_upgrade/handlers/main.yml
index d31b899cf..90d80855e 100644
--- a/roles/openshift_node_upgrade/handlers/main.yml
+++ b/roles/openshift_node_upgrade/handlers/main.yml
@@ -6,7 +6,7 @@
when:
- not skip_node_svc_handlers | default(False) | bool
- not (ovs_service_status_changed | default(false) | bool)
- - openshift.common.use_openshift_sdn | default(true) | bool
+ - openshift_use_openshift_sdn | bool
register: l_openshift_node_upgrade_stop_openvswitch_result
until: not l_openshift_node_upgrade_stop_openvswitch_result | failed
retries: 3
diff --git a/roles/openshift_node_upgrade/meta/main.yml b/roles/openshift_node_upgrade/meta/main.yml
index 2a36d8945..a810b01dc 100644
--- a/roles/openshift_node_upgrade/meta/main.yml
+++ b/roles/openshift_node_upgrade/meta/main.yml
@@ -11,4 +11,3 @@ galaxy_info:
- 7
dependencies:
- role: lib_utils
-- role: openshift_common
diff --git a/roles/openshift_node_upgrade/tasks/main.yml b/roles/openshift_node_upgrade/tasks/main.yml
index bc092c26c..e34319186 100644
--- a/roles/openshift_node_upgrade/tasks/main.yml
+++ b/roles/openshift_node_upgrade/tasks/main.yml
@@ -44,7 +44,7 @@
changed_when: "'Downloaded newer image' in pull_result.stdout"
when:
- openshift.common.is_containerized | bool
- - openshift.common.use_openshift_sdn | default(true) | bool
+ - openshift_use_openshift_sdn | bool
- include: docker/upgrade.yml
vars:
@@ -142,7 +142,7 @@
# End Disable Swap Block
- name: Reset selinux context
- command: restorecon -RF {{ openshift.common.data_dir }}/openshift.local.volumes
+ command: restorecon -RF {{ openshift_node_data_dir }}/openshift.local.volumes
when:
- ansible_selinux is defined
- ansible_selinux.status == 'enabled'
diff --git a/roles/openshift_node_upgrade/tasks/systemd_units.yml b/roles/openshift_node_upgrade/tasks/systemd_units.yml
index 4e9550150..afff2f8ba 100644
--- a/roles/openshift_node_upgrade/tasks/systemd_units.yml
+++ b/roles/openshift_node_upgrade/tasks/systemd_units.yml
@@ -4,7 +4,7 @@
# - openshift_image_tag
# - openshift.common.is_containerized
# - openshift.node.ovs_image
-# - openshift.common.use_openshift_sdn
+# - openshift_use_openshift_sdn
# - openshift.common.service_type
# - openshift.node.debug_level
# - openshift.common.config_base
@@ -28,10 +28,10 @@
when: openshift.common.is_containerized | bool
- include: config/workaround-bz1331590-ovs-oom-fix.yml
- when: openshift.common.use_openshift_sdn | default(true) | bool
+ when: openshift_use_openshift_sdn | bool
- include: config/install-ovs-docker-service-file.yml
- when: openshift.common.is_containerized | bool and openshift.common.use_openshift_sdn | default(true) | bool
+ when: openshift.common.is_containerized | bool and openshift_use_openshift_sdn | bool
- include: config/configure-node-settings.yml
- include: config/configure-proxy-settings.yml
diff --git a/roles/openshift_node_upgrade/templates/openshift.docker.node.service b/roles/openshift_node_upgrade/templates/openshift.docker.node.service
index 639b6f6c8..451412ab0 100644
--- a/roles/openshift_node_upgrade/templates/openshift.docker.node.service
+++ b/roles/openshift_node_upgrade/templates/openshift.docker.node.service
@@ -4,7 +4,7 @@ After={{ openshift.docker.service_name }}.service
After=openvswitch.service
PartOf={{ openshift.docker.service_name }}.service
Requires={{ openshift.docker.service_name }}.service
-{% if openshift.common.use_openshift_sdn %}
+{% if openshift_use_openshift_sdn %}
Wants=openvswitch.service
After=ovsdb-server.service
After=ovs-vswitchd.service
@@ -21,7 +21,7 @@ EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-node-dep
ExecStartPre=-/usr/bin/docker rm -f {{ openshift.common.service_type }}-node
ExecStartPre=/usr/bin/cp /etc/origin/node/node-dnsmasq.conf /etc/dnsmasq.d/
ExecStartPre=/usr/bin/dbus-send --system --dest=uk.org.thekelleys.dnsmasq /uk/org/thekelleys/dnsmasq uk.org.thekelleys.SetDomainServers array:string:/in-addr.arpa/127.0.0.1,/{{ openshift.common.dns_domain }}/127.0.0.1
-ExecStart=/usr/bin/docker run --name {{ openshift.common.service_type }}-node --rm --privileged --net=host --pid=host --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-node -v /:/rootfs:ro,rslave -e CONFIG_FILE=${CONFIG_FILE} -e OPTIONS=${OPTIONS} -e HOST=/rootfs -e HOST_ETC=/host-etc -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }}{{ ':rslave' if openshift.docker.gte_1_10 | default(False) | bool else '' }} -v {{ openshift.common.config_base }}/node:{{ openshift.common.config_base }}/node {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} -v /etc/localtime:/etc/localtime:ro -v /etc/machine-id:/etc/machine-id:ro -v /run:/run -v /sys:/sys:rw -v /sys/fs/cgroup:/sys/fs/cgroup:rw -v /usr/bin/docker:/usr/bin/docker:ro -v /var/lib/docker:/var/lib/docker -v /lib/modules:/lib/modules -v /etc/origin/openvswitch:/etc/openvswitch -v /etc/origin/sdn:/etc/openshift-sdn -v /var/lib/cni:/var/lib/cni -v /etc/systemd/system:/host-etc/systemd/system -v /var/log:/var/log -v /dev:/dev $DOCKER_ADDTL_BIND_MOUNTS -v /etc/pki:/etc/pki:ro {{ openshift.node.node_image }}:${IMAGE_VERSION}
+ExecStart=/usr/bin/docker run --name {{ openshift.common.service_type }}-node --rm --privileged --net=host --pid=host --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-node -v /:/rootfs:ro,rslave -e CONFIG_FILE=${CONFIG_FILE} -e OPTIONS=${OPTIONS} -e HOST=/rootfs -e HOST_ETC=/host-etc -v {{ openshift_node_data_dir }}:{{ openshift_node_data_dir }}{{ ':rslave' if openshift.docker.gte_1_10 | default(False) | bool else '' }} -v {{ openshift.common.config_base }}/node:{{ openshift.common.config_base }}/node {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} -v /etc/localtime:/etc/localtime:ro -v /etc/machine-id:/etc/machine-id:ro -v /run:/run -v /sys:/sys:rw -v /sys/fs/cgroup:/sys/fs/cgroup:rw -v /usr/bin/docker:/usr/bin/docker:ro -v /var/lib/docker:/var/lib/docker -v /lib/modules:/lib/modules -v /etc/origin/openvswitch:/etc/openvswitch -v /etc/origin/sdn:/etc/openshift-sdn -v /var/lib/cni:/var/lib/cni -v /etc/systemd/system:/host-etc/systemd/system -v /var/log:/var/log -v /dev:/dev $DOCKER_ADDTL_BIND_MOUNTS -v /etc/pki:/etc/pki:ro {{ openshift.node.node_image }}:${IMAGE_VERSION}
ExecStartPost=/usr/bin/sleep 10
ExecStop=/usr/bin/docker stop {{ openshift.common.service_type }}-node
ExecStopPost=/usr/bin/rm /etc/dnsmasq.d/node-dnsmasq.conf
diff --git a/roles/openshift_persistent_volumes/README.md b/roles/openshift_persistent_volumes/README.md
index 1489cb0bd..0407d6ef1 100644
--- a/roles/openshift_persistent_volumes/README.md
+++ b/roles/openshift_persistent_volumes/README.md
@@ -17,13 +17,6 @@ From this role:
| persistent_volume_claims | [] | List of persistent volume claim dictionaries, keys: name, capacity, access_modes |
-From openshift_common:
-
-| Name | Default Value | |
-|-------------------------------|----------------|----------------------------------------|
-| openshift_debug_level | 2 | Global openshift debug log verbosity |
-
-
Dependencies
------------
diff --git a/roles/openshift_persistent_volumes/meta/main.yml b/roles/openshift_persistent_volumes/meta/main.yml
index 25e5a38dd..8d3d010e4 100644
--- a/roles/openshift_persistent_volumes/meta/main.yml
+++ b/roles/openshift_persistent_volumes/meta/main.yml
@@ -10,5 +10,4 @@ galaxy_info:
versions:
- 7
dependencies:
-- role: openshift_common
- role: openshift_hosted_facts
diff --git a/roles/openshift_prometheus/README.md b/roles/openshift_prometheus/README.md
new file mode 100644
index 000000000..c5a44bffb
--- /dev/null
+++ b/roles/openshift_prometheus/README.md
@@ -0,0 +1,95 @@
+OpenShift Prometheus
+====================
+
+OpenShift Prometheus Installation
+
+Requirements
+------------
+
+
+Role Variables
+--------------
+
+For default values, see [`defaults/main.yaml`](defaults/main.yaml).
+
+- `openshift_prometheus_state`: present - install/update. absent - uninstall.
+
+- `openshift_prometheus_namespace`: project (i.e. namespace) where the components will be
+ deployed.
+
+- `openshift_prometheus_replicas`: The number of replicas for prometheus deployment.
+
+- `openshift_prometheus_node_selector`: Selector for the nodes prometheus will be deployed on.
+
+- `openshift_prometheus_image_<COMPONENT>`: specify image for the component
+
+## Storage related variables
+Each prometheus component (prometheus, alertmanager, alert-buffer, oauth-proxy) can set pv claim by setting corresponding role variable:
+```
+openshift_prometheus_<COMPONENT>_storage_type: <VALUE>
+openshift_prometheus_<COMPONENT>_pvc_(name|size|access_modes|pv_selector): <VALUE>
+```
+e.g
+```
+openshift_prometheus_storage_type: pvc
+openshift_prometheus_alertmanager_pvc_name: alertmanager
+openshift_prometheus_alertbuffer_pvc_size: 10G
+openshift_prometheus_pvc_access_modes: [ReadWriteOnce]
+```
+
+## Additional Alert Rules file variable
+An external file with alert rules can be added by setting path to additional rules variable:
+```
+openshift_prometheus_additional_rules_file: <PATH>
+```
+
+File content should be in prometheus alert rules format.
+Following example sets rule to fire an alert when one of the cluster nodes is down:
+
+```
+groups:
+- name: example-rules
+ interval: 30s # defaults to global interval
+ rules:
+ - alert: Node Down
+ expr: up{job="kubernetes-nodes"} == 0
+ annotations:
+ miqTarget: "ContainerNode"
+ severity: "HIGH"
+ message: "{{ '{{' }}{{ '$labels.instance' }}{{ '}}' }} is down"
+```
+
+
+## Additional variables to control resource limits
+Each prometheus component (prometheus, alertmanager, alert-buffer, oauth-proxy) can specify a cpu and memory limits and requests by setting
+the corresponding role variable:
+```
+openshift_prometheus_<COMPONENT>_(limits|requests)_(memory|cpu): <VALUE>
+```
+e.g
+```
+openshift_prometheus_alertmanager_limits_memory: 1Gi
+openshift_prometheus_oath_proxy_requests_cpu: 100
+```
+
+Dependencies
+------------
+
+openshift_facts
+
+
+Example Playbook
+----------------
+
+```
+- name: Configure openshift-prometheus
+ hosts: oo_first_master
+ roles:
+ - role: openshift_prometheus
+```
+
+License
+-------
+
+Apache License, Version 2.0
+
diff --git a/roles/openshift_prometheus/defaults/main.yaml b/roles/openshift_prometheus/defaults/main.yaml
new file mode 100644
index 000000000..5aa8aecec
--- /dev/null
+++ b/roles/openshift_prometheus/defaults/main.yaml
@@ -0,0 +1,74 @@
+---
+# defaults file for openshift_prometheus
+openshift_prometheus_state: present
+
+openshift_prometheus_namespace: prometheus
+
+openshift_prometheus_replicas: 1
+openshift_prometheus_node_selector: {"region":"infra"}
+
+# images
+openshift_prometheus_image_proxy: "openshift/oauth-proxy:v1.0.0"
+openshift_prometheus_image_prometheus: "openshift/prometheus:v2.0.0-dev"
+openshift_prometheus_image_alertmanager: "openshift/prometheus-alertmanager:dev"
+openshift_prometheus_image_alertbuffer: "openshift/prometheus-alert-buffer:v0.0.1"
+
+# additional prometheus rules file
+openshift_prometheus_additional_rules_file: null
+
+# All the required exports
+openshift_prometheus_pv_exports:
+ - prometheus
+ - prometheus-alertmanager
+ - prometheus-alertbuffer
+# PV template files and their created object names
+openshift_prometheus_pv_data:
+ - pv_name: prometheus
+ pv_template: prom-pv-server.yml
+ pv_label: Prometheus Server PV
+ - pv_name: prometheus-alertmanager
+ pv_template: prom-pv-alertmanager.yml
+ pv_label: Prometheus Alertmanager PV
+ - pv_name: prometheus-alertbuffer
+ pv_template: prom-pv-alertbuffer.yml
+ pv_label: Prometheus Alert Buffer PV
+
+# Hostname/IP of the NFS server. Currently defaults to first master
+openshift_prometheus_nfs_server: "{{ groups.nfs.0 }}"
+
+# storage
+openshift_prometheus_storage_type: pvc
+openshift_prometheus_pvc_name: prometheus
+openshift_prometheus_pvc_size: 10G
+openshift_prometheus_pvc_access_modes: [ReadWriteOnce]
+openshift_prometheus_pvc_pv_selector: {}
+
+openshift_prometheus_alertmanager_storage_type: pvc
+openshift_prometheus_alertmanager_pvc_name: prometheus-alertmanager
+openshift_prometheus_alertmanager_pvc_size: 10G
+openshift_prometheus_alertmanager_pvc_access_modes: [ReadWriteOnce]
+openshift_prometheus_alertmanager_pvc_pv_selector: {}
+
+openshift_prometheus_alertbuffer_storage_type: pvc
+openshift_prometheus_alertbuffer_pvc_name: prometheus-alertbuffer
+openshift_prometheus_alertbuffer_pvc_size: 10G
+openshift_prometheus_alertbuffer_pvc_access_modes: [ReadWriteOnce]
+openshift_prometheus_alertbuffer_pvc_pv_selector: {}
+
+# container resources
+openshift_prometheus_cpu_limit: null
+openshift_prometheus_memory_limit: null
+openshift_prometheus_cpu_requests: null
+openshift_prometheus_memory_requests: null
+openshift_prometheus_alertmanager_cpu_limit: null
+openshift_prometheus_alertmanager_memory_limit: null
+openshift_prometheus_alertmanager_cpu_requests: null
+openshift_prometheus_alertmanager_memory_requests: null
+openshift_prometheus_alertbuffer_cpu_limit: null
+openshift_prometheus_alertbuffer_memory_limit: null
+openshift_prometheus_alertbuffer_cpu_requests: null
+openshift_prometheus_alertbuffer_memory_requests: null
+openshift_prometheus_oauth_proxy_cpu_limit: null
+openshift_prometheus_oauth_proxy_memory_limit: null
+openshift_prometheus_oauth_proxy_cpu_requests: null
+openshift_prometheus_oauth_proxy_memory_requests: null
diff --git a/roles/openshift_prometheus/files/openshift_prometheus.exports b/roles/openshift_prometheus/files/openshift_prometheus.exports
new file mode 100644
index 000000000..3ccedb1fd
--- /dev/null
+++ b/roles/openshift_prometheus/files/openshift_prometheus.exports
@@ -0,0 +1,3 @@
+/exports/prometheus *(rw,no_root_squash,no_wdelay)
+/exports/prometheus-alertmanager *(rw,no_root_squash,no_wdelay)
+/exports/prometheus-alertbuffer *(rw,no_root_squash,no_wdelay)
diff --git a/roles/openshift_prometheus/meta/main.yaml b/roles/openshift_prometheus/meta/main.yaml
new file mode 100644
index 000000000..33188bb7e
--- /dev/null
+++ b/roles/openshift_prometheus/meta/main.yaml
@@ -0,0 +1,19 @@
+---
+galaxy_info:
+ author: OpenShift Development <dev@lists.openshift.redhat.com>
+ description: Deploy OpenShift prometheus integration for the cluster
+ company: Red Hat, Inc.
+ license: license (Apache)
+ min_ansible_version: 2.2
+ platforms:
+ - name: EL
+ versions:
+ - 7
+ - name: Fedora
+ versions:
+ - all
+ categories:
+ - openshift
+dependencies:
+- { role: lib_openshift }
+- { role: openshift_facts }
diff --git a/roles/openshift_prometheus/tasks/create_pvs.yaml b/roles/openshift_prometheus/tasks/create_pvs.yaml
new file mode 100644
index 000000000..4e79da05f
--- /dev/null
+++ b/roles/openshift_prometheus/tasks/create_pvs.yaml
@@ -0,0 +1,36 @@
+---
+# Check for existance and then conditionally:
+# - evaluate templates
+# - PVs
+#
+# These tasks idempotently create required Prometheus PV objects. Do not
+# call this file directly. This file is intended to be ran as an
+# include that has a 'with_items' attached to it. Hence the use below
+# of variables like "{{ item.pv_label }}"
+
+- name: "Check if the {{ item.pv_label }} template has been created already"
+ oc_obj:
+ namespace: "{{ openshift_prometheus_namespace }}"
+ state: list
+ kind: pv
+ name: "{{ item.pv_name }}"
+ register: prom_pv_check
+
+# Skip all of this if the PV already exists
+- block:
+ - name: "Ensure the {{ item.pv_label }} template is evaluated"
+ template:
+ src: "{{ item.pv_template }}.j2"
+ dest: "{{ tempdir }}/templates/{{ item.pv_template }}"
+
+ - name: "Ensure {{ item.pv_label }} is created"
+ oc_obj:
+ namespace: "{{ openshift_prometheus_namespace }}"
+ kind: pv
+ name: "{{ item.pv_name }}"
+ state: present
+ delete_after: True
+ files:
+ - "{{ tempdir }}/templates/{{ item.pv_template }}"
+ when:
+ - not prom_pv_check.results.results.0
diff --git a/roles/openshift_prometheus/tasks/install_prometheus.yaml b/roles/openshift_prometheus/tasks/install_prometheus.yaml
new file mode 100644
index 000000000..a9bce2fb1
--- /dev/null
+++ b/roles/openshift_prometheus/tasks/install_prometheus.yaml
@@ -0,0 +1,244 @@
+---
+
+# namespace
+- name: Add prometheus project
+ oc_project:
+ state: "{{ state }}"
+ name: "{{ openshift_prometheus_namespace }}"
+ node_selector: "{{ openshift_prometheus_node_selector | oo_selector_to_string_list() }}"
+ description: Prometheus
+
+# secrets
+- name: Set alert and prometheus secrets
+ oc_secret:
+ state: "{{ state }}"
+ name: "{{ item }}-proxy"
+ namespace: "{{ openshift_prometheus_namespace }}"
+ contents:
+ - path: session_secret
+ data: "{{ 43 | oo_random_word }}="
+ with_items:
+ - prometheus
+ - alerts
+
+# serviceaccount
+- name: create prometheus serviceaccount
+ oc_serviceaccount:
+ state: "{{ state }}"
+ name: prometheus
+ namespace: "{{ openshift_prometheus_namespace }}"
+ # TODO add annotations when supproted
+ # annotations:
+ # serviceaccounts.openshift.io/oauth-redirectreference.prom: '{"kind":"OAuthRedirectReference","apiVersion":"v1","reference":{"kind":"Route","name":"prometheus"}}'
+ # serviceaccounts.openshift.io/oauth-redirectreference.alerts: '{"kind":"OAuthRedirectReference","apiVersion":"v1","reference":{"kind":"Route","name":"alerts"}}'
+
+ secrets:
+ - prometheus-secrets
+ changed_when: no
+
+# TODO remove this when annotations are supported by oc_serviceaccount
+- name: annotate serviceaccount
+ command: >
+ {{ openshift.common.client_binary }} annotate --overwrite -n {{ openshift_prometheus_namespace }}
+ serviceaccount prometheus
+ serviceaccounts.openshift.io/oauth-redirectreference.prom='{"kind":"OAuthRedirectReference","apiVersion":"v1","reference":{"kind":"Route","name":"prometheus"}}'
+ serviceaccounts.openshift.io/oauth-redirectreference.alerts='{"kind":"OAuthRedirectReference","apiVersion":"v1","reference":{"kind":"Route","name":"alerts"}}'
+
+
+# create clusterrolebinding for prometheus serviceaccount
+- name: Set cluster-reader permissions for prometheus
+ oc_adm_policy_user:
+ state: "{{ state }}"
+ namespace: "{{ openshift_prometheus_namespace }}"
+ resource_kind: cluster-role
+ resource_name: cluster-reader
+ user: "system:serviceaccount:{{ openshift_prometheus_namespace }}:prometheus"
+
+
+######################################################################
+# NFS
+# In the case that we are not running on a cloud provider, volumes must be statically provisioned
+
+- include: nfs.yaml
+ when: not (openshift_cloudprovider_kind is defined and (openshift_cloudprovider_kind == 'aws' or openshift_cloudprovider_kind == 'gce'))
+
+
+# create prometheus and alerts services
+# TODO join into 1 task with loop
+- name: Create prometheus service
+ oc_service:
+ state: "{{ state }}"
+ name: "{{ item.name }}"
+ namespace: "{{ openshift_prometheus_namespace }}"
+ selector:
+ app: prometheus
+ labels:
+ name: "{{ item.name }}"
+ # TODO add annotations when supported
+ # annotations:
+ # service.alpha.openshift.io/serving-cert-secret-name: "{{item.name}}-tls"
+ ports:
+ - port: 443
+ targetPort: 8443
+ with_items:
+ - name: prometheus
+
+- name: Create alerts service
+ oc_service:
+ state: "{{ state }}"
+ name: "{{ item.name }}"
+ namespace: "{{ openshift_prometheus_namespace }}"
+ selector:
+ app: prometheus
+ labels:
+ name: "{{ item.name }}"
+ # TODO add annotations when supported
+ # annotations:
+ # service.alpha.openshift.io/serving-cert-secret-name: "{{item.name}}-tls"
+ ports:
+ - port: 443
+ targetPort: 9443
+ with_items:
+ - name: alerts
+
+
+# Annotate services with secret name
+# TODO remove this when annotations are supported by oc_service
+- name: annotate prometheus service
+ command: >
+ {{ openshift.common.client_binary }} annotate --overwrite -n {{ openshift_prometheus_namespace }}
+ service prometheus
+ prometheus.io/scrape='true'
+ prometheus.io/scheme=https
+ service.alpha.openshift.io/serving-cert-secret-name=prometheus-tls
+
+- name: annotate alerts service
+ command: >
+ {{ openshift.common.client_binary }} annotate --overwrite -n {{ openshift_prometheus_namespace }}
+ service alerts 'service.alpha.openshift.io/serving-cert-secret-name=prometheus-alerts-tls'
+
+# create prometheus and alerts routes
+- name: create prometheus and alerts routes
+ oc_route:
+ state: "{{ state }}"
+ name: "{{ item.name }}"
+ namespace: "{{ openshift_prometheus_namespace }}"
+ service_name: "{{ item.name }}"
+ tls_termination: reencrypt
+ with_items:
+ - name: prometheus
+ - name: alerts
+
+# Storage
+- name: create prometheus pvc
+ oc_pvc:
+ namespace: "{{ openshift_prometheus_namespace }}"
+ name: "{{ openshift_prometheus_pvc_name }}"
+ access_modes: "{{ openshift_prometheus_pvc_access_modes }}"
+ volume_capacity: "{{ openshift_prometheus_pvc_size }}"
+ selector: "{{ openshift_prometheus_pvc_pv_selector }}"
+
+- name: create alertmanager pvc
+ oc_pvc:
+ namespace: "{{ openshift_prometheus_namespace }}"
+ name: "{{ openshift_prometheus_alertmanager_pvc_name }}"
+ access_modes: "{{ openshift_prometheus_alertmanager_pvc_access_modes }}"
+ volume_capacity: "{{ openshift_prometheus_alertmanager_pvc_size }}"
+ selector: "{{ openshift_prometheus_alertmanager_pvc_pv_selector }}"
+
+- name: create alertbuffer pvc
+ oc_pvc:
+ namespace: "{{ openshift_prometheus_namespace }}"
+ name: "{{ openshift_prometheus_alertbuffer_pvc_name }}"
+ access_modes: "{{ openshift_prometheus_alertbuffer_pvc_access_modes }}"
+ volume_capacity: "{{ openshift_prometheus_alertbuffer_pvc_size }}"
+ selector: "{{ openshift_prometheus_alertbuffer_pvc_pv_selector }}"
+
+# create prometheus deployment
+- name: Set prometheus deployment template
+ template:
+ src: prometheus_deployment.j2
+ dest: "{{ tempdir }}/templates/prometheus.yaml"
+ vars:
+ namespace: "{{ openshift_prometheus_namespace }}"
+ prom_replicas: "{{ openshift_prometheus_replicas }}"
+
+- name: Set prometheus deployment
+ oc_obj:
+ state: "{{ state }}"
+ name: "prometheus"
+ namespace: "{{ openshift_prometheus_namespace }}"
+ kind: deployment
+ files:
+ - "{{ tempdir }}/templates/prometheus.yaml"
+ delete_after: true
+
+# prometheus configmap
+# Copy the additional rules file if it is defined
+- name: Copy additional rules file to host
+ copy:
+ src: "{{ openshift_prometheus_additional_rules_file }}"
+ dest: "{{ tempdir }}/prometheus.additional.rules"
+ when:
+ - openshift_prometheus_additional_rules_file is defined
+ - openshift_prometheus_additional_rules_file is not none
+ - openshift_prometheus_additional_rules_file | trim | length > 0
+
+- stat:
+ path: "{{ tempdir }}/prometheus.additional.rules"
+ register: additional_rules_stat
+
+# The kubernetes version impacts the prometheus scraping endpoint
+# so gathering it before constructing the configmap
+- name: get oc version
+ oc_version:
+ register: oc_version
+
+- set_fact:
+ kubernetes_version: "{{ oc_version.results.kubernetes_short | float }}"
+
+- template:
+ src: prometheus.yml.j2
+ dest: "{{ tempdir }}/prometheus.yml"
+ changed_when: no
+
+- template:
+ src: prometheus.rules.j2
+ dest: "{{ tempdir }}/prometheus.rules"
+ changed_when: no
+
+# In prometheus configmap create "additional.rules" section if file exists
+- name: Set prometheus configmap
+ oc_configmap:
+ state: "{{ state }}"
+ name: "prometheus"
+ namespace: "{{ openshift_prometheus_namespace }}"
+ from_file:
+ prometheus.rules: "{{ tempdir }}/prometheus.rules"
+ prometheus.additional.rules: "{{ tempdir }}/prometheus.additional.rules"
+ prometheus.yml: "{{ tempdir }}/prometheus.yml"
+ when: additional_rules_stat.stat.exists == True
+
+- name: Set prometheus configmap
+ oc_configmap:
+ state: "{{ state }}"
+ name: "prometheus"
+ namespace: "{{ openshift_prometheus_namespace }}"
+ from_file:
+ prometheus.rules: "{{ tempdir }}/prometheus.rules"
+ prometheus.yml: "{{ tempdir }}/prometheus.yml"
+ when: additional_rules_stat.stat.exists == False
+
+# alertmanager configmap
+- template:
+ src: alertmanager.yml.j2
+ dest: "{{ tempdir }}/alertmanager.yml"
+ changed_when: no
+
+- name: Set alertmanager configmap
+ oc_configmap:
+ state: "{{ state }}"
+ name: "prometheus-alerts"
+ namespace: "{{ openshift_prometheus_namespace }}"
+ from_file:
+ alertmanager.yml: "{{ tempdir }}/alertmanager.yml"
diff --git a/roles/openshift_prometheus/tasks/main.yaml b/roles/openshift_prometheus/tasks/main.yaml
new file mode 100644
index 000000000..523a64334
--- /dev/null
+++ b/roles/openshift_prometheus/tasks/main.yaml
@@ -0,0 +1,26 @@
+---
+
+- name: Create temp directory for doing work in on target
+ command: mktemp -td openshift-prometheus-ansible-XXXXXX
+ register: mktemp
+ changed_when: False
+
+- set_fact:
+ tempdir: "{{ mktemp.stdout }}"
+
+- name: Create templates subdirectory
+ file:
+ state: directory
+ path: "{{ tempdir }}/templates"
+ mode: 0755
+ changed_when: False
+
+- include: install_prometheus.yaml
+ vars:
+ state: "{{ openshift_prometheus_state }}"
+
+- name: Delete temp directory
+ file:
+ name: "{{ tempdir }}"
+ state: absent
+ changed_when: False
diff --git a/roles/openshift_prometheus/tasks/nfs.yaml b/roles/openshift_prometheus/tasks/nfs.yaml
new file mode 100644
index 000000000..0b45f2cee
--- /dev/null
+++ b/roles/openshift_prometheus/tasks/nfs.yaml
@@ -0,0 +1,44 @@
+---
+# Tasks to statically provision NFS volumes
+# Include if not using dynamic volume provisioning
+- name: Ensure the /exports/ directory exists
+ file:
+ path: /exports/
+ state: directory
+ mode: 0755
+ owner: root
+ group: root
+
+- name: Ensure the prom-pv0X export directories exist
+ file:
+ path: "/exports/{{ item }}"
+ state: directory
+ mode: 0777
+ owner: nfsnobody
+ group: nfsnobody
+ with_items: "{{ openshift_prometheus_pv_exports }}"
+
+- name: Ensure the NFS exports for Prometheus PVs exist
+ copy:
+ src: openshift_prometheus.exports
+ dest: /etc/exports.d/openshift_prometheus.exports
+ register: nfs_exports_updated
+
+- name: Ensure the NFS export table is refreshed if exports were added
+ command: exportfs -ar
+ when:
+ - nfs_exports_updated.changed
+
+
+######################################################################
+# Create the required Prometheus PVs. Check out these online docs if you
+# need a refresher on includes looping with items:
+# * http://docs.ansible.com/ansible/playbooks_loops.html#loops-and-includes-in-2-0
+# * http://stackoverflow.com/a/35128533
+#
+# TODO: Handle the case where a PV template is updated in
+# openshift-ansible and the change needs to be landed on the managed
+# cluster.
+
+- include: create_pvs.yaml
+ with_items: "{{ openshift_prometheus_pv_data }}"
diff --git a/roles/openshift_prometheus/templates/alertmanager.yml.j2 b/roles/openshift_prometheus/templates/alertmanager.yml.j2
new file mode 100644
index 000000000..6c432a3d0
--- /dev/null
+++ b/roles/openshift_prometheus/templates/alertmanager.yml.j2
@@ -0,0 +1,20 @@
+global:
+
+# The root route on which each incoming alert enters.
+route:
+ # default route if none match
+ receiver: alert-buffer-wh
+
+ # The labels by which incoming alerts are grouped together. For example,
+ # multiple alerts coming in for cluster=A and alertname=LatencyHigh would
+ # be batched into a single group.
+ # TODO:
+ group_by: []
+
+ # All the above attributes are inherited by all child routes and can
+ # overwritten on each.
+
+receivers:
+- name: alert-buffer-wh
+ webhook_configs:
+ - url: http://localhost:9099/topics/alerts
diff --git a/roles/openshift_prometheus/templates/prom-pv-alertbuffer.yml.j2 b/roles/openshift_prometheus/templates/prom-pv-alertbuffer.yml.j2
new file mode 100644
index 000000000..55a5e19c3
--- /dev/null
+++ b/roles/openshift_prometheus/templates/prom-pv-alertbuffer.yml.j2
@@ -0,0 +1,15 @@
+apiVersion: v1
+kind: PersistentVolume
+metadata:
+ name: prometheus-alertbuffer
+ labels:
+ storage: prometheus-alertbuffer
+spec:
+ capacity:
+ storage: 15Gi
+ accessModes:
+ - ReadWriteOnce
+ nfs:
+ path: /exports/prometheus-alertbuffer
+ server: {{ openshift_prometheus_nfs_server }}
+ persistentVolumeReclaimPolicy: Retain
diff --git a/roles/openshift_prometheus/templates/prom-pv-alertmanager.yml.j2 b/roles/openshift_prometheus/templates/prom-pv-alertmanager.yml.j2
new file mode 100644
index 000000000..4ee518735
--- /dev/null
+++ b/roles/openshift_prometheus/templates/prom-pv-alertmanager.yml.j2
@@ -0,0 +1,15 @@
+apiVersion: v1
+kind: PersistentVolume
+metadata:
+ name: prometheus-alertmanager
+ labels:
+ storage: prometheus-alertmanager
+spec:
+ capacity:
+ storage: 15Gi
+ accessModes:
+ - ReadWriteOnce
+ nfs:
+ path: /exports/prometheus-alertmanager
+ server: {{ openshift_prometheus_nfs_server }}
+ persistentVolumeReclaimPolicy: Retain
diff --git a/roles/openshift_prometheus/templates/prom-pv-server.yml.j2 b/roles/openshift_prometheus/templates/prom-pv-server.yml.j2
new file mode 100644
index 000000000..933bf0f60
--- /dev/null
+++ b/roles/openshift_prometheus/templates/prom-pv-server.yml.j2
@@ -0,0 +1,15 @@
+apiVersion: v1
+kind: PersistentVolume
+metadata:
+ name: prometheus
+ labels:
+ storage: prometheus
+spec:
+ capacity:
+ storage: 15Gi
+ accessModes:
+ - ReadWriteOnce
+ nfs:
+ path: /exports/prometheus
+ server: {{ openshift_prometheus_nfs_server }}
+ persistentVolumeReclaimPolicy: Retain
diff --git a/roles/openshift_prometheus/templates/prometheus.rules.j2 b/roles/openshift_prometheus/templates/prometheus.rules.j2
new file mode 100644
index 000000000..e861dc127
--- /dev/null
+++ b/roles/openshift_prometheus/templates/prometheus.rules.j2
@@ -0,0 +1,4 @@
+groups:
+- name: example-rules
+ interval: 30s # defaults to global interval
+ rules:
diff --git a/roles/openshift_prometheus/templates/prometheus.yml.j2 b/roles/openshift_prometheus/templates/prometheus.yml.j2
new file mode 100644
index 000000000..63430f834
--- /dev/null
+++ b/roles/openshift_prometheus/templates/prometheus.yml.j2
@@ -0,0 +1,174 @@
+rule_files:
+ - 'prometheus.rules'
+{% if openshift_prometheus_additional_rules_file is defined and openshift_prometheus_additional_rules_file is not none %}
+ - 'prometheus.additional.rules'
+{% endif %}
+
+
+
+# A scrape configuration for running Prometheus on a Kubernetes cluster.
+# This uses separate scrape configs for cluster components (i.e. API server, node)
+# and services to allow each to use different authentication configs.
+#
+# Kubernetes labels will be added as Prometheus labels on metrics via the
+# `labelmap` relabeling action.
+
+# Scrape config for API servers.
+#
+# Kubernetes exposes API servers as endpoints to the default/kubernetes
+# service so this uses `endpoints` role and uses relabelling to only keep
+# the endpoints associated with the default/kubernetes service using the
+# default named port `https`. This works for single API server deployments as
+# well as HA API server deployments.
+scrape_configs:
+- job_name: 'kubernetes-apiservers'
+
+ kubernetes_sd_configs:
+ - role: endpoints
+
+ scheme: https
+ tls_config:
+ ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
+ bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
+
+ # Keep only the default/kubernetes service endpoints for the https port. This
+ # will add targets for each API server which Kubernetes adds an endpoint to
+ # the default/kubernetes service.
+ relabel_configs:
+ - source_labels: [__meta_kubernetes_namespace, __meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name]
+ action: keep
+ regex: default;kubernetes;https
+
+# Scrape config for nodes.
+#
+# Each node exposes a /metrics endpoint that contains operational metrics for
+# the Kubelet and other components.
+- job_name: 'kubernetes-nodes'
+
+ scheme: https
+ tls_config:
+ ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
+ bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
+
+ kubernetes_sd_configs:
+ - role: node
+
+ relabel_configs:
+ - action: labelmap
+ regex: __meta_kubernetes_node_label_(.+)
+
+# Scrape config for controllers.
+#
+# Each master node exposes a /metrics endpoint on :8444 that contains operational metrics for
+# the controllers.
+#
+# TODO: move this to a pure endpoints based metrics gatherer when controllers are exposed via
+# endpoints.
+- job_name: 'kubernetes-controllers'
+
+ scheme: https
+ tls_config:
+ ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
+ bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
+
+ kubernetes_sd_configs:
+ - role: endpoints
+
+ # Keep only the default/kubernetes service endpoints for the https port, and then
+ # set the port to 8444. This is the default configuration for the controllers on OpenShift
+ # masters.
+ relabel_configs:
+ - source_labels: [__meta_kubernetes_namespace, __meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name]
+ action: keep
+ regex: default;kubernetes;https
+ - source_labels: [__address__]
+ action: replace
+ target_label: __address__
+ regex: (.+)(?::\d+)
+ replacement: $1:8444
+
+# Scrape config for cAdvisor.
+#
+# Beginning in Kube 1.7, each node exposes a /metrics/cadvisor endpoint that
+# reports container metrics for each running pod. Scrape those by default.
+- job_name: 'kubernetes-cadvisor'
+
+ scheme: https
+ tls_config:
+ ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
+ bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
+
+{% if kubernetes_version | float() >= 1.7 | float() %}
+ metrics_path: /metrics/cadvisor
+{% else %}
+ metrics_path: /metrics
+{% endif %}
+
+ kubernetes_sd_configs:
+ - role: node
+
+ relabel_configs:
+ - action: labelmap
+ regex: __meta_kubernetes_node_label_(.+)
+
+# Scrape config for service endpoints.
+#
+# The relabeling allows the actual service scrape endpoint to be configured
+# via the following annotations:
+#
+# * `prometheus.io/scrape`: Only scrape services that have a value of `true`
+# * `prometheus.io/scheme`: If the metrics endpoint is secured then you will need
+# to set this to `https` & most likely set the `tls_config` of the scrape config.
+# * `prometheus.io/path`: If the metrics path is not `/metrics` override this.
+# * `prometheus.io/port`: If the metrics are exposed on a different port to the
+# service then set this appropriately.
+- job_name: 'kubernetes-service-endpoints'
+
+ tls_config:
+ ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
+ # TODO: this should be per target
+ insecure_skip_verify: true
+
+ kubernetes_sd_configs:
+ - role: endpoints
+
+ relabel_configs:
+ - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scrape]
+ action: keep
+ regex: true
+ - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scheme]
+ action: replace
+ target_label: __scheme__
+ regex: (https?)
+ - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_path]
+ action: replace
+ target_label: __metrics_path__
+ regex: (.+)
+ - source_labels: [__address__, __meta_kubernetes_service_annotation_prometheus_io_port]
+ action: replace
+ target_label: __address__
+ regex: (.+)(?::\d+);(\d+)
+ replacement: $1:$2
+ - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_username]
+ action: replace
+ target_label: __basic_auth_username__
+ regex: (.+)
+ - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_password]
+ action: replace
+ target_label: __basic_auth_password__
+ regex: (.+)
+ - action: labelmap
+ regex: __meta_kubernetes_service_label_(.+)
+ - source_labels: [__meta_kubernetes_namespace]
+ action: replace
+ target_label: kubernetes_namespace
+ - source_labels: [__meta_kubernetes_service_name]
+ action: replace
+ target_label: kubernetes_name
+
+alerting:
+ alertmanagers:
+ - scheme: http
+ static_configs:
+ - targets:
+ - "localhost:9093"
diff --git a/roles/openshift_prometheus/templates/prometheus_deployment.j2 b/roles/openshift_prometheus/templates/prometheus_deployment.j2
new file mode 100644
index 000000000..98c117f19
--- /dev/null
+++ b/roles/openshift_prometheus/templates/prometheus_deployment.j2
@@ -0,0 +1,240 @@
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+ name: prometheus
+ namespace: {{ namespace }}
+ labels:
+ app: prometheus
+spec:
+ replicas: {{ prom_replicas|default(1) }}
+ selector:
+ provider: openshift
+ matchLabels:
+ app: prometheus
+ template:
+ metadata:
+ name: prometheus
+ labels:
+ app: prometheus
+ spec:
+ serviceAccountName: prometheus
+{% if openshift_prometheus_node_selector is iterable and openshift_prometheus_node_selector | length > 0 %}
+ nodeSelector:
+{% for key, value in openshift_prometheus_node_selector.iteritems() %}
+ {{key}}: "{{value}}"
+{% endfor %}
+{% endif %}
+ containers:
+ # Deploy Prometheus behind an oauth proxy
+ - name: prom-proxy
+ image: "{{ openshift_prometheus_image_proxy }}"
+ imagePullPolicy: IfNotPresent
+ resources:
+ requests:
+{% if openshift_prometheus_oauth_proxy_memory_requests is defined and openshift_prometheus_oauth_proxy_memory_requests is not none %}
+ memory: "{{openshift_prometheus_oauth_proxy_memory_requests}}"
+{% endif %}
+{% if openshift_prometheus_oauth_proxy_cpu_requests is defined and openshift_prometheus_oauth_proxy_cpu_requests is not none %}
+ cpu: "{{openshift_prometheus_oauth_proxy_cpu_requests}}"
+{% endif %}
+ limits:
+{% if openshift_prometheus_memory_requests_limit_proxy is defined and openshift_prometheus_oauth_proxy_memory_limit is not none %}
+ memory: "{{openshift_prometheus_oauth_proxy_memory_limit}}"
+{% endif %}
+{% if openshift_prometheus_oauth_proxy_cpu_limit is defined and openshift_prometheus_oauth_proxy_cpu_limit is not none %}
+ cpu: "{{openshift_prometheus_oauth_proxy_cpu_limit}}"
+{% endif %}
+ ports:
+ - containerPort: 8443
+ name: web
+ args:
+ - -provider=openshift
+ - -https-address=:8443
+ - -http-address=
+ - -email-domain=*
+ - -upstream=http://localhost:9090
+ - -client-id=system:serviceaccount:{{ namespace }}:prometheus
+ - '-openshift-sar={"resource": "namespaces", "verb": "get", "resourceName": "{{ namespace }}", "namespace": "{{ namespace }}"}'
+ - '-openshift-delegate-urls={"/": {"resource": "namespaces", "verb": "get", "resourceName": "{{ namespace }}", "namespace": "{{ namespace }}"}}'
+ - -tls-cert=/etc/tls/private/tls.crt
+ - -tls-key=/etc/tls/private/tls.key
+ - -client-secret-file=/var/run/secrets/kubernetes.io/serviceaccount/token
+ - -cookie-secret-file=/etc/proxy/secrets/session_secret
+ - -skip-auth-regex=^/metrics
+ volumeMounts:
+ - mountPath: /etc/tls/private
+ name: prometheus-tls
+ - mountPath: /etc/proxy/secrets
+ name: prometheus-secrets
+ - mountPath: /prometheus
+ name: prometheus-data
+
+ - name: prometheus
+ args:
+ - --storage.tsdb.retention=6h
+ - --config.file=/etc/prometheus/prometheus.yml
+ - --web.listen-address=localhost:9090
+ image: "{{ openshift_prometheus_image_prometheus }}"
+ imagePullPolicy: IfNotPresent
+ resources:
+ requests:
+{% if openshift_prometheus_memory_requests is defined and openshift_prometheus_memory_requests is not none %}
+ memory: "{{openshift_prometheus_memory_requests}}"
+{% endif %}
+{% if openshift_prometheus_cpu_requests is defined and openshift_prometheus_cpu_requests is not none %}
+ cpu: "{{openshift_prometheus_cpu_requests}}"
+{% endif %}
+ limits:
+{% if openshift_prometheus_memory_limit is defined and openshift_prometheus_memory_limit is not none %}
+ memory: "{{ openshift_prometheus_memory_limit }}"
+{% endif %}
+{% if openshift_prometheus_cpu_limit is defined and openshift_prometheus_cpu_limit is not none %}
+ cpu: "{{openshift_prometheus_cpu_limit}}"
+{% endif %}
+
+ volumeMounts:
+ - mountPath: /etc/prometheus
+ name: prometheus-config
+ - mountPath: /prometheus
+ name: prometheus-data
+
+ # Deploy alertmanager behind prometheus-alert-buffer behind an oauth proxy
+ - name: alerts-proxy
+ image: "{{ openshift_prometheus_image_proxy }}"
+ imagePullPolicy: IfNotPresent
+ resources:
+ requests:
+{% if openshift_prometheus_oauth_proxy_memory_requests is defined and openshift_prometheus_oauth_proxy_memory_requests is not none %}
+ memory: "{{openshift_prometheus_oauth_proxy_memory_requests}}"
+{% endif %}
+{% if openshift_prometheus_oauth_proxy_cpu_requests is defined and openshift_prometheus_oauth_proxy_cpu_requests is not none %}
+ cpu: "{{openshift_prometheus_oauth_proxy_cpu_requests}}"
+{% endif %}
+ limits:
+{% if openshift_prometheus_oauth_proxy_memory_limit is defined and openshift_prometheus_oauth_proxy_memory_limit is not none %}
+ memory: "{{openshift_prometheus_oauth_proxy_memory_limit}}"
+{% endif %}
+{% if openshift_prometheus_oauth_proxy_cpu_limit is defined and openshift_prometheus_oauth_proxy_cpu_limit is not none %}
+ cpu: "{{openshift_prometheus_oauth_proxy_cpu_limit}}"
+{% endif %}
+ ports:
+ - containerPort: 9443
+ name: web
+ args:
+ - -provider=openshift
+ - -https-address=:9443
+ - -http-address=
+ - -email-domain=*
+ - -upstream=http://localhost:9099
+ - -client-id=system:serviceaccount:{{ namespace }}:prometheus
+ - '-openshift-sar={"resource": "namespaces", "verb": "get", "resourceName": "{{ namespace }}", "namespace": "{{ namespace }}"}'
+ - '-openshift-delegate-urls={"/": {"resource": "namespaces", "verb": "get", "resourceName": "{{ namespace }}", "namespace": "{{ namespace }}"}}'
+ - -tls-cert=/etc/tls/private/tls.crt
+ - -tls-key=/etc/tls/private/tls.key
+ - -client-secret-file=/var/run/secrets/kubernetes.io/serviceaccount/token
+ - -cookie-secret-file=/etc/proxy/secrets/session_secret
+ volumeMounts:
+ - mountPath: /etc/tls/private
+ name: alerts-tls
+ - mountPath: /etc/proxy/secrets
+ name: alerts-secrets
+
+ - name: alert-buffer
+ args:
+ - --storage-path=/alert-buffer/messages.db
+ image: "{{ openshift_prometheus_image_alertbuffer }}"
+ imagePullPolicy: IfNotPresent
+ resources:
+ requests:
+{% if openshift_prometheus_alertbuffer_memory_requests is defined and openshift_prometheus_alertbuffer_memory_requests is not none %}
+ memory: "{{openshift_prometheus_alertbuffer_memory_requests}}"
+{% endif %}
+{% if openshift_prometheus_alertbuffer_cpu_requests is defined and openshift_prometheus_alertbuffer_cpu_requests is not none %}
+ cpu: "{{openshift_prometheus_alertbuffer_cpu_requests}}"
+{% endif %}
+ limits:
+{% if openshift_prometheus_alertbuffer_memory_limit is defined and openshift_prometheus_alertbuffer_memory_limit is not none %}
+ memory: "{{openshift_prometheus_alertbuffer_memory_limit}}"
+{% endif %}
+{% if openshift_prometheus_alertbuffer_cpu_limit is defined and openshift_prometheus_alertbuffer_cpu_limit is not none %}
+ cpu: "{{openshift_prometheus_alertbuffer_cpu_limit}}"
+{% endif %}
+ volumeMounts:
+ - mountPath: /alert-buffer
+ name: alert-buffer-data
+ ports:
+ - containerPort: 9099
+ name: alert-buf
+
+ - name: alertmanager
+ args:
+ - -config.file=/etc/alertmanager/alertmanager.yml
+ image: "{{ openshift_prometheus_image_alertmanager }}"
+ imagePullPolicy: IfNotPresent
+ resources:
+ requests:
+{% if openshift_prometheus_alertmanager_memory_requests is defined and openshift_prometheus_alertmanager_memory_requests is not none %}
+ memory: "{{openshift_prometheus_alertmanager_memory_requests}}"
+{% endif %}
+{% if openshift_prometheus_alertmanager_cpu_requests is defined and openshift_prometheus_alertmanager_cpu_requests is not none %}
+ cpu: "{{openshift_prometheus_alertmanager_cpu_requests}}"
+{% endif %}
+ limits:
+{% if openshift_prometheus_alertmanager_memory_limit is defined and openshift_prometheus_alertmanager_memory_limit is not none %}
+ memory: "{{openshift_prometheus_alertmanager_memory_limit}}"
+{% endif %}
+{% if openshift_prometheus_alertmanager_cpu_limit is defined and openshift_prometheus_alertmanager_cpu_limit is not none %}
+ cpu: "{{openshift_prometheus_alertmanager_cpu_limit}}"
+{% endif %}
+ ports:
+ - containerPort: 9093
+ name: web
+ volumeMounts:
+ - mountPath: /etc/alertmanager
+ name: alertmanager-config
+ - mountPath: /alertmanager
+ name: alertmanager-data
+
+ restartPolicy: Always
+ volumes:
+ - name: prometheus-config
+ configMap:
+ defaultMode: 420
+ name: prometheus
+ - name: prometheus-secrets
+ secret:
+ secretName: prometheus-proxy
+ - name: prometheus-tls
+ secret:
+ secretName: prometheus-tls
+ - name: prometheus-data
+{% if openshift_prometheus_storage_type == 'pvc' %}
+ persistentVolumeClaim:
+ claimName: {{ openshift_prometheus_pvc_name }}
+{% else %}
+ emptydir: {}
+{% endif %}
+ - name: alertmanager-config
+ configMap:
+ defaultMode: 420
+ name: prometheus-alerts
+ - name: alerts-secrets
+ secret:
+ secretName: alerts-proxy
+ - name: alerts-tls
+ secret:
+ secretName: prometheus-alerts-tls
+ - name: alertmanager-data
+{% if openshift_prometheus_alertmanager_storage_type == 'pvc' %}
+ persistentVolumeClaim:
+ claimName: {{ openshift_prometheus_alertmanager_pvc_name }}
+{% else %}
+ emptydir: {}
+{% endif %}
+ - name: alert-buffer-data
+{% if openshift_prometheus_alertbuffer_storage_type == 'pvc' %}
+ persistentVolumeClaim:
+ claimName: {{ openshift_prometheus_alertbuffer_pvc_name }}
+{% else %}
+ emptydir: {}
+{% endif %}
diff --git a/roles/openshift_prometheus/tests/inventory b/roles/openshift_prometheus/tests/inventory
new file mode 100644
index 000000000..878877b07
--- /dev/null
+++ b/roles/openshift_prometheus/tests/inventory
@@ -0,0 +1,2 @@
+localhost
+
diff --git a/roles/openshift_prometheus/tests/test.yaml b/roles/openshift_prometheus/tests/test.yaml
new file mode 100644
index 000000000..37baf573c
--- /dev/null
+++ b/roles/openshift_prometheus/tests/test.yaml
@@ -0,0 +1,5 @@
+---
+- hosts: localhost
+ remote_user: root
+ roles:
+ - openshift_prometheus
diff --git a/roles/openshift_sanitize_inventory/tasks/main.yml b/roles/openshift_sanitize_inventory/tasks/main.yml
index 59ce505d3..47d7be05a 100644
--- a/roles/openshift_sanitize_inventory/tasks/main.yml
+++ b/roles/openshift_sanitize_inventory/tasks/main.yml
@@ -12,6 +12,27 @@
deployment_type is deprecated in favor of openshift_deployment_type.
Please specify only openshift_deployment_type, or make both the same.
+# osm_cluster_network_cidr, osm_host_subnet_length and openshift_portal_net are
+# now required to avoid changes that may occur between releases
+#
+# Note: We will skip these checks when some tests run which don't
+# actually do any insalling/upgrading/scaling/etc..
+# Reference: https://bugzilla.redhat.com/show_bug.cgi?id=1451023
+- when:
+ - not testing_skip_some_requirements|default(False)|bool
+ assert:
+ that:
+ - "osm_cluster_network_cidr is defined"
+ - "osm_host_subnet_length is defined"
+ - "openshift_portal_net is defined"
+ msg: >
+ osm_cluster_network_cidr, osm_host_subnet_length, and openshift_portal_net are required inventory
+ variables. If you are upgrading or scaling up these variables should match what is currently used
+ in the cluster. If you don't remember what these values are you can find them in
+ /etc/origin/master/master-config.yaml on a master with the names clusterNetworkCIDR
+ (osm_cluster_network_cidr), hostSubnetLength (osm_host_subnet_length),
+ and serviceNetworkCIDR (openshift_portal_net).
+
- name: Standardize on latest variable names
set_fact:
# goal is to deprecate deployment_type in favor of openshift_deployment_type.
diff --git a/roles/openshift_service_catalog/defaults/main.yml b/roles/openshift_service_catalog/defaults/main.yml
index 01ee2544d..7c848cb12 100644
--- a/roles/openshift_service_catalog/defaults/main.yml
+++ b/roles/openshift_service_catalog/defaults/main.yml
@@ -1,3 +1,7 @@
---
openshift_service_catalog_remove: false
openshift_service_catalog_nodeselector: {"openshift-infra": "apiserver"}
+
+openshift_use_openshift_sdn: True
+# os_sdn_network_plugin_name: "{% if openshift_use_openshift_sdn %}redhat/openshift-ovs-subnet{% else %}{% endif %}"
+os_sdn_network_plugin_name: "redhat/openshift-ovs-subnet"
diff --git a/roles/openshift_service_catalog/files/openshift-ansible-catalog-console.js b/roles/openshift_service_catalog/files/openshift-ansible-catalog-console.js
index 1f25cc39f..16a307c06 100644
--- a/roles/openshift_service_catalog/files/openshift-ansible-catalog-console.js
+++ b/roles/openshift_service_catalog/files/openshift-ansible-catalog-console.js
@@ -1,2 +1 @@
window.OPENSHIFT_CONSTANTS.ENABLE_TECH_PREVIEW_FEATURE.service_catalog_landing_page = true;
-window.OPENSHIFT_CONSTANTS.ENABLE_TECH_PREVIEW_FEATURE.pod_presets = true;
diff --git a/roles/openshift_service_catalog/tasks/install.yml b/roles/openshift_service_catalog/tasks/install.yml
index 64f94347b..746c73eaf 100644
--- a/roles/openshift_service_catalog/tasks/install.yml
+++ b/roles/openshift_service_catalog/tasks/install.yml
@@ -28,7 +28,7 @@
- name: Make kube-service-catalog project network global
command: >
oc adm pod-network make-projects-global kube-service-catalog
- when: os_sdn_network_plugin_name | default('') == 'redhat/openshift-ovs-multitenant'
+ when: os_sdn_network_plugin_name == 'redhat/openshift-ovs-multitenant'
- include: generate_certs.yml
diff --git a/roles/openshift_storage_glusterfs/README.md b/roles/openshift_storage_glusterfs/README.md
index a059745a6..d0bc0e028 100644
--- a/roles/openshift_storage_glusterfs/README.md
+++ b/roles/openshift_storage_glusterfs/README.md
@@ -76,10 +76,11 @@ GlusterFS cluster into a new or existing OpenShift cluster:
| Name | Default value | Description |
|--------------------------------------------------|-------------------------|-----------------------------------------|
| openshift_storage_glusterfs_timeout | 300 | Seconds to wait for pods to become ready
-| openshift_storage_glusterfs_namespace | 'glusterfs' | Namespace in which to create GlusterFS resources
+| openshift_storage_glusterfs_namespace | 'glusterfs' | Namespace/project in which to create GlusterFS resources
| openshift_storage_glusterfs_is_native | True | GlusterFS should be containerized
| openshift_storage_glusterfs_name | 'storage' | A name to identify the GlusterFS cluster, which will be used in resource names
| openshift_storage_glusterfs_nodeselector | 'glusterfs=storage-host'| Selector to determine which nodes will host GlusterFS pods in native mode. **NOTE:** The label value is taken from the cluster name
+| openshift_storage_glusterfs_use_default_selector | False | Whether to use a default node selector for the GlusterFS namespace/project. If False, the namespace/project will have no restricting node selector. If True, uses pre-existing or default (e.g. osm_default_node_selector) node selectors. **NOTE:** If True, nodes which will host GlusterFS pods must already have the additional labels.
| openshift_storage_glusterfs_storageclass | True | Automatically create a StorageClass for each GlusterFS cluster
| openshift_storage_glusterfs_image | 'gluster/gluster-centos'| Container image to use for GlusterFS pods, enterprise default is 'rhgs3/rhgs-server-rhel7'
| openshift_storage_glusterfs_version | 'latest' | Container image version to use for GlusterFS pods
@@ -91,7 +92,7 @@ GlusterFS cluster into a new or existing OpenShift cluster:
| openshift_storage_glusterfs_heketi_admin_key | auto-generated | String to use as secret key for performing heketi commands as admin
| openshift_storage_glusterfs_heketi_user_key | auto-generated | String to use as secret key for performing heketi commands as user that can only view or modify volumes
| openshift_storage_glusterfs_heketi_topology_load | True | Load the GlusterFS topology information into heketi
-| openshift_storage_glusterfs_heketi_url | Undefined | When heketi is native, this sets the hostname portion of the final heketi route URL. When heketi is external, this is the full URL to the heketi service.
+| openshift_storage_glusterfs_heketi_url | Undefined | When heketi is native, this sets the hostname portion of the final heketi route URL. When heketi is external, this is the FQDN or IP address to the heketi service.
| openshift_storage_glusterfs_heketi_port | 8080 | TCP port for external heketi service **NOTE:** This has no effect in native mode
| openshift_storage_glusterfs_heketi_executor | 'kubernetes' | Selects how a native heketi service will manage GlusterFS nodes: 'kubernetes' for native nodes, 'ssh' for external nodes
| openshift_storage_glusterfs_heketi_ssh_port | 22 | SSH port for external GlusterFS nodes via native heketi
diff --git a/roles/openshift_storage_glusterfs/defaults/main.yml b/roles/openshift_storage_glusterfs/defaults/main.yml
index 0b3d3aef1..148549887 100644
--- a/roles/openshift_storage_glusterfs/defaults/main.yml
+++ b/roles/openshift_storage_glusterfs/defaults/main.yml
@@ -3,6 +3,7 @@ openshift_storage_glusterfs_timeout: 300
openshift_storage_glusterfs_is_native: True
openshift_storage_glusterfs_name: 'storage'
openshift_storage_glusterfs_nodeselector: "glusterfs={{ openshift_storage_glusterfs_name }}-host"
+openshift_storage_glusterfs_use_default_selector: False
openshift_storage_glusterfs_storageclass: True
openshift_storage_glusterfs_image: "{{ 'rhgs3/rhgs-server-rhel7' | quote if deployment_type == 'openshift-enterprise' else 'gluster/gluster-centos' | quote }}"
openshift_storage_glusterfs_version: 'latest'
@@ -31,6 +32,7 @@ openshift_storage_glusterfs_registry_namespace: "{{ openshift.hosted.registry.na
openshift_storage_glusterfs_registry_is_native: "{{ openshift_storage_glusterfs_is_native }}"
openshift_storage_glusterfs_registry_name: 'registry'
openshift_storage_glusterfs_registry_nodeselector: "glusterfs={{ openshift_storage_glusterfs_registry_name }}-host"
+openshift_storage_glusterfs_registry_use_default_selector: "{{ openshift_storage_glusterfs_use_default_selector }}"
openshift_storage_glusterfs_registry_storageclass: False
openshift_storage_glusterfs_registry_image: "{{ openshift_storage_glusterfs_image }}"
openshift_storage_glusterfs_registry_version: "{{ openshift_storage_glusterfs_version }}"
@@ -58,9 +60,9 @@ r_openshift_storage_glusterfs_os_firewall_deny: []
r_openshift_storage_glusterfs_os_firewall_allow:
- service: glusterfs_sshd
port: "2222/tcp"
-- service: glusterfs_daemon
- port: "24007/tcp"
- service: glusterfs_management
+ port: "24007/tcp"
+- service: glusterfs_rdma
port: "24008/tcp"
- service: glusterfs_bricks
port: "49152-49251/tcp"
diff --git a/roles/openshift_storage_glusterfs/files/v3.7/deploy-heketi-template.yml b/roles/openshift_storage_glusterfs/files/v3.7/deploy-heketi-template.yml
new file mode 100644
index 000000000..9ebb0d5ec
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/files/v3.7/deploy-heketi-template.yml
@@ -0,0 +1,143 @@
+---
+kind: Template
+apiVersion: v1
+metadata:
+ name: deploy-heketi
+ labels:
+ glusterfs: heketi-template
+ deploy-heketi: support
+ annotations:
+ description: Bootstrap Heketi installation
+ tags: glusterfs,heketi,installation
+objects:
+- kind: Service
+ apiVersion: v1
+ metadata:
+ name: deploy-heketi-${CLUSTER_NAME}
+ labels:
+ glusterfs: deploy-heketi-${CLUSTER_NAME}-service
+ deploy-heketi: support
+ annotations:
+ description: Exposes Heketi service
+ spec:
+ ports:
+ - name: deploy-heketi-${CLUSTER_NAME}
+ port: 8080
+ targetPort: 8080
+ selector:
+ glusterfs: deploy-heketi-${CLUSTER_NAME}-pod
+- kind: Route
+ apiVersion: v1
+ metadata:
+ name: ${HEKETI_ROUTE}
+ labels:
+ glusterfs: deploy-heketi-${CLUSTER_NAME}-route
+ deploy-heketi: support
+ spec:
+ to:
+ kind: Service
+ name: deploy-heketi-${CLUSTER_NAME}
+- kind: DeploymentConfig
+ apiVersion: v1
+ metadata:
+ name: deploy-heketi-${CLUSTER_NAME}
+ labels:
+ glusterfs: deploy-heketi-${CLUSTER_NAME}-dc
+ deploy-heketi: support
+ annotations:
+ description: Defines how to deploy Heketi
+ spec:
+ replicas: 1
+ selector:
+ glusterfs: deploy-heketi-${CLUSTER_NAME}-pod
+ triggers:
+ - type: ConfigChange
+ strategy:
+ type: Recreate
+ template:
+ metadata:
+ name: deploy-heketi
+ labels:
+ glusterfs: deploy-heketi-${CLUSTER_NAME}-pod
+ deploy-heketi: support
+ spec:
+ serviceAccountName: heketi-${CLUSTER_NAME}-service-account
+ containers:
+ - name: heketi
+ image: ${IMAGE_NAME}:${IMAGE_VERSION}
+ env:
+ - name: HEKETI_USER_KEY
+ value: ${HEKETI_USER_KEY}
+ - name: HEKETI_ADMIN_KEY
+ value: ${HEKETI_ADMIN_KEY}
+ - name: HEKETI_EXECUTOR
+ value: ${HEKETI_EXECUTOR}
+ - name: HEKETI_FSTAB
+ value: /var/lib/heketi/fstab
+ - name: HEKETI_SNAPSHOT_LIMIT
+ value: '14'
+ - name: HEKETI_KUBE_GLUSTER_DAEMONSET
+ value: '1'
+ - name: HEKETI_KUBE_NAMESPACE
+ value: ${HEKETI_KUBE_NAMESPACE}
+ ports:
+ - containerPort: 8080
+ volumeMounts:
+ - name: db
+ mountPath: /var/lib/heketi
+ - name: topology
+ mountPath: ${TOPOLOGY_PATH}
+ - name: config
+ mountPath: /etc/heketi
+ readinessProbe:
+ timeoutSeconds: 3
+ initialDelaySeconds: 3
+ httpGet:
+ path: /hello
+ port: 8080
+ livenessProbe:
+ timeoutSeconds: 3
+ initialDelaySeconds: 30
+ httpGet:
+ path: /hello
+ port: 8080
+ volumes:
+ - name: db
+ - name: topology
+ secret:
+ secretName: heketi-${CLUSTER_NAME}-topology-secret
+ - name: config
+ secret:
+ secretName: heketi-${CLUSTER_NAME}-config-secret
+parameters:
+- name: HEKETI_USER_KEY
+ displayName: Heketi User Secret
+ description: Set secret for those creating volumes as type _user_
+- name: HEKETI_ADMIN_KEY
+ displayName: Heketi Administrator Secret
+ description: Set secret for administration of the Heketi service as user _admin_
+- name: HEKETI_EXECUTOR
+ displayName: heketi executor type
+ description: Set the executor type, kubernetes or ssh
+ value: kubernetes
+- name: HEKETI_KUBE_NAMESPACE
+ displayName: Namespace
+ description: Set the namespace where the GlusterFS pods reside
+ value: default
+- name: HEKETI_ROUTE
+ displayName: heketi route name
+ description: Set the hostname for the route URL
+ value: "heketi-glusterfs"
+- name: IMAGE_NAME
+ displayName: heketi container image name
+ required: True
+- name: IMAGE_VERSION
+ displayName: heketi container image version
+ required: True
+- name: CLUSTER_NAME
+ displayName: GlusterFS cluster name
+ description: A unique name to identify this heketi service, useful for running multiple heketi instances
+ value: glusterfs
+- name: TOPOLOGY_PATH
+ displayName: heketi topology file location
+ required: True
diff --git a/roles/openshift_storage_glusterfs/files/v3.7/glusterfs-template.yml b/roles/openshift_storage_glusterfs/files/v3.7/glusterfs-template.yml
new file mode 100644
index 000000000..8c5e1ded3
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/files/v3.7/glusterfs-template.yml
@@ -0,0 +1,136 @@
+---
+kind: Template
+apiVersion: v1
+metadata:
+ name: glusterfs
+ labels:
+ glusterfs: template
+ annotations:
+ description: GlusterFS DaemonSet template
+ tags: glusterfs
+objects:
+- kind: DaemonSet
+ apiVersion: extensions/v1beta1
+ metadata:
+ name: glusterfs-${CLUSTER_NAME}
+ labels:
+ glusterfs: ${CLUSTER_NAME}-daemonset
+ annotations:
+ description: GlusterFS DaemonSet
+ tags: glusterfs
+ spec:
+ selector:
+ matchLabels:
+ glusterfs: ${CLUSTER_NAME}-pod
+ template:
+ metadata:
+ name: glusterfs-${CLUSTER_NAME}
+ labels:
+ glusterfs: ${CLUSTER_NAME}-pod
+ glusterfs-node: pod
+ spec:
+ nodeSelector: "${{NODE_LABELS}}"
+ hostNetwork: true
+ containers:
+ - name: glusterfs
+ image: ${IMAGE_NAME}:${IMAGE_VERSION}
+ imagePullPolicy: IfNotPresent
+ volumeMounts:
+ - name: glusterfs-heketi
+ mountPath: "/var/lib/heketi"
+ - name: glusterfs-run
+ mountPath: "/run"
+ - name: glusterfs-lvm
+ mountPath: "/run/lvm"
+ - name: glusterfs-etc
+ mountPath: "/etc/glusterfs"
+ - name: glusterfs-logs
+ mountPath: "/var/log/glusterfs"
+ - name: glusterfs-config
+ mountPath: "/var/lib/glusterd"
+ - name: glusterfs-dev
+ mountPath: "/dev"
+ - name: glusterfs-misc
+ mountPath: "/var/lib/misc/glusterfsd"
+ - name: glusterfs-cgroup
+ mountPath: "/sys/fs/cgroup"
+ readOnly: true
+ - name: glusterfs-ssl
+ mountPath: "/etc/ssl"
+ readOnly: true
+ securityContext:
+ capabilities: {}
+ privileged: true
+ readinessProbe:
+ timeoutSeconds: 3
+ initialDelaySeconds: 40
+ exec:
+ command:
+ - "/bin/bash"
+ - "-c"
+ - systemctl status glusterd.service
+ periodSeconds: 25
+ successThreshold: 1
+ failureThreshold: 15
+ livenessProbe:
+ timeoutSeconds: 3
+ initialDelaySeconds: 40
+ exec:
+ command:
+ - "/bin/bash"
+ - "-c"
+ - systemctl status glusterd.service
+ periodSeconds: 25
+ successThreshold: 1
+ failureThreshold: 15
+ resources: {}
+ terminationMessagePath: "/dev/termination-log"
+ volumes:
+ - name: glusterfs-heketi
+ hostPath:
+ path: "/var/lib/heketi"
+ - name: glusterfs-run
+ emptyDir: {}
+ - name: glusterfs-lvm
+ hostPath:
+ path: "/run/lvm"
+ - name: glusterfs-etc
+ hostPath:
+ path: "/etc/glusterfs"
+ - name: glusterfs-logs
+ hostPath:
+ path: "/var/log/glusterfs"
+ - name: glusterfs-config
+ hostPath:
+ path: "/var/lib/glusterd"
+ - name: glusterfs-dev
+ hostPath:
+ path: "/dev"
+ - name: glusterfs-misc
+ hostPath:
+ path: "/var/lib/misc/glusterfsd"
+ - name: glusterfs-cgroup
+ hostPath:
+ path: "/sys/fs/cgroup"
+ - name: glusterfs-ssl
+ hostPath:
+ path: "/etc/ssl"
+ restartPolicy: Always
+ terminationGracePeriodSeconds: 30
+ dnsPolicy: ClusterFirst
+ securityContext: {}
+parameters:
+- name: NODE_LABELS
+ displayName: Daemonset Node Labels
+ description: Labels which define the daemonset node selector. Must contain at least one label of the format \'glusterfs=<CLUSTER_NAME>-host\'
+ value: '{ "glusterfs": "storage-host" }'
+- name: IMAGE_NAME
+ displayName: GlusterFS container image name
+ required: True
+- name: IMAGE_VERSION
+ displayName: GlusterFS container image version
+ required: True
+- name: CLUSTER_NAME
+ displayName: GlusterFS cluster name
+ description: A unique name to identify which heketi service manages this cluster, useful for running multiple heketi instances
+ value: storage
diff --git a/roles/openshift_storage_glusterfs/files/v3.7/heketi-template.yml b/roles/openshift_storage_glusterfs/files/v3.7/heketi-template.yml
new file mode 100644
index 000000000..61b6a8c13
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/files/v3.7/heketi-template.yml
@@ -0,0 +1,134 @@
+---
+kind: Template
+apiVersion: v1
+metadata:
+ name: heketi
+ labels:
+ glusterfs: heketi-template
+ annotations:
+ description: Heketi service deployment template
+ tags: glusterfs,heketi
+objects:
+- kind: Service
+ apiVersion: v1
+ metadata:
+ name: heketi-${CLUSTER_NAME}
+ labels:
+ glusterfs: heketi-${CLUSTER_NAME}-service
+ annotations:
+ description: Exposes Heketi service
+ spec:
+ ports:
+ - name: heketi
+ port: 8080
+ targetPort: 8080
+ selector:
+ glusterfs: heketi-${CLUSTER_NAME}-pod
+- kind: Route
+ apiVersion: v1
+ metadata:
+ name: ${HEKETI_ROUTE}
+ labels:
+ glusterfs: heketi-${CLUSTER_NAME}-route
+ spec:
+ to:
+ kind: Service
+ name: heketi-${CLUSTER_NAME}
+- kind: DeploymentConfig
+ apiVersion: v1
+ metadata:
+ name: heketi-${CLUSTER_NAME}
+ labels:
+ glusterfs: heketi-${CLUSTER_NAME}-dc
+ annotations:
+ description: Defines how to deploy Heketi
+ spec:
+ replicas: 1
+ selector:
+ glusterfs: heketi-${CLUSTER_NAME}-pod
+ triggers:
+ - type: ConfigChange
+ strategy:
+ type: Recreate
+ template:
+ metadata:
+ name: heketi-${CLUSTER_NAME}
+ labels:
+ glusterfs: heketi-${CLUSTER_NAME}-pod
+ spec:
+ serviceAccountName: heketi-${CLUSTER_NAME}-service-account
+ containers:
+ - name: heketi
+ image: ${IMAGE_NAME}:${IMAGE_VERSION}
+ imagePullPolicy: IfNotPresent
+ env:
+ - name: HEKETI_USER_KEY
+ value: ${HEKETI_USER_KEY}
+ - name: HEKETI_ADMIN_KEY
+ value: ${HEKETI_ADMIN_KEY}
+ - name: HEKETI_EXECUTOR
+ value: ${HEKETI_EXECUTOR}
+ - name: HEKETI_FSTAB
+ value: /var/lib/heketi/fstab
+ - name: HEKETI_SNAPSHOT_LIMIT
+ value: '14'
+ - name: HEKETI_KUBE_GLUSTER_DAEMONSET
+ value: '1'
+ - name: HEKETI_KUBE_NAMESPACE
+ value: ${HEKETI_KUBE_NAMESPACE}
+ ports:
+ - containerPort: 8080
+ volumeMounts:
+ - name: db
+ mountPath: /var/lib/heketi
+ - name: config
+ mountPath: /etc/heketi
+ readinessProbe:
+ timeoutSeconds: 3
+ initialDelaySeconds: 3
+ httpGet:
+ path: /hello
+ port: 8080
+ livenessProbe:
+ timeoutSeconds: 3
+ initialDelaySeconds: 30
+ httpGet:
+ path: /hello
+ port: 8080
+ volumes:
+ - name: db
+ glusterfs:
+ endpoints: heketi-db-${CLUSTER_NAME}-endpoints
+ path: heketidbstorage
+ - name: config
+ secret:
+ secretName: heketi-${CLUSTER_NAME}-config-secret
+parameters:
+- name: HEKETI_USER_KEY
+ displayName: Heketi User Secret
+ description: Set secret for those creating volumes as type _user_
+- name: HEKETI_ADMIN_KEY
+ displayName: Heketi Administrator Secret
+ description: Set secret for administration of the Heketi service as user _admin_
+- name: HEKETI_EXECUTOR
+ displayName: heketi executor type
+ description: Set the executor type, kubernetes or ssh
+ value: kubernetes
+- name: HEKETI_KUBE_NAMESPACE
+ displayName: Namespace
+ description: Set the namespace where the GlusterFS pods reside
+ value: default
+- name: HEKETI_ROUTE
+ displayName: heketi route name
+ description: Set the hostname for the route URL
+ value: "heketi-glusterfs"
+- name: IMAGE_NAME
+ displayName: heketi container image name
+ required: True
+- name: IMAGE_VERSION
+ displayName: heketi container image version
+ required: True
+- name: CLUSTER_NAME
+ displayName: GlusterFS cluster name
+ description: A unique name to identify this heketi service, useful for running multiple heketi instances
+ value: glusterfs
diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml
index a31c5bd5e..bc0dde17d 100644
--- a/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml
+++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml
@@ -15,6 +15,7 @@
oc_project:
state: present
name: "{{ glusterfs_namespace }}"
+ node_selector: "{% if glusterfs_use_default_selector %}{{ omit }}{% endif %}"
when: glusterfs_is_native or glusterfs_heketi_is_native or glusterfs_storageclass
- name: Delete pre-existing heketi resources
diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml
index 7a2987883..012c722ff 100644
--- a/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml
+++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml
@@ -5,6 +5,7 @@
glusterfs_is_native: "{{ openshift_storage_glusterfs_is_native | bool }}"
glusterfs_name: "{{ openshift_storage_glusterfs_name }}"
glusterfs_nodeselector: "{{ openshift_storage_glusterfs_nodeselector | default(['storagenode', openshift_storage_glusterfs_name] | join('=')) | map_from_pairs }}"
+ glusterfs_use_default_selector: "{{ openshift_storage_glusterfs_use_default_selector }}"
glusterfs_storageclass: "{{ openshift_storage_glusterfs_storageclass }}"
glusterfs_image: "{{ openshift_storage_glusterfs_image }}"
glusterfs_version: "{{ openshift_storage_glusterfs_version }}"
diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml
index 17f87578d..1bcab8e49 100644
--- a/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml
+++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml
@@ -5,6 +5,7 @@
glusterfs_is_native: "{{ openshift_storage_glusterfs_registry_is_native | bool }}"
glusterfs_name: "{{ openshift_storage_glusterfs_registry_name }}"
glusterfs_nodeselector: "{{ openshift_storage_glusterfs_registry_nodeselector | default(['storagenode', openshift_storage_glusterfs_registry_name] | join('=')) | map_from_pairs }}"
+ glusterfs_use_default_selector: "{{ openshift_storage_glusterfs_registry_use_default_selector }}"
glusterfs_storageclass: "{{ openshift_storage_glusterfs_registry_storageclass }}"
glusterfs_image: "{{ openshift_storage_glusterfs_registry_image }}"
glusterfs_version: "{{ openshift_storage_glusterfs_registry_version }}"
diff --git a/roles/openshift_storage_glusterfs/templates/v3.7/glusterfs-registry-endpoints.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.7/glusterfs-registry-endpoints.yml.j2
new file mode 100644
index 000000000..11c9195bb
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/templates/v3.7/glusterfs-registry-endpoints.yml.j2
@@ -0,0 +1,12 @@
+---
+apiVersion: v1
+kind: Endpoints
+metadata:
+ name: glusterfs-{{ glusterfs_name }}-endpoints
+subsets:
+- addresses:
+{% for node in glusterfs_nodes %}
+ - ip: {{ hostvars[node].glusterfs_ip | default(hostvars[node].openshift.common.ip) }}
+{% endfor %}
+ ports:
+ - port: 1
diff --git a/roles/openshift_storage_glusterfs/templates/v3.7/glusterfs-registry-service.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.7/glusterfs-registry-service.yml.j2
new file mode 100644
index 000000000..3f869d2b7
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/templates/v3.7/glusterfs-registry-service.yml.j2
@@ -0,0 +1,10 @@
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: glusterfs-{{ glusterfs_name }}-endpoints
+spec:
+ ports:
+ - port: 1
+status:
+ loadBalancer: {}
diff --git a/roles/openshift_storage_glusterfs/templates/v3.7/glusterfs-storageclass.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.7/glusterfs-storageclass.yml.j2
new file mode 100644
index 000000000..095fb780f
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/templates/v3.7/glusterfs-storageclass.yml.j2
@@ -0,0 +1,13 @@
+---
+apiVersion: storage.k8s.io/v1
+kind: StorageClass
+metadata:
+ name: glusterfs-{{ glusterfs_name }}
+provisioner: kubernetes.io/glusterfs
+parameters:
+ resturl: "http://{% if glusterfs_heketi_is_native %}{{ glusterfs_heketi_route }}{% else %}{{ glusterfs_heketi_url }}:{{ glusterfs_heketi_port }}{% endif %}"
+ restuser: "admin"
+{% if glusterfs_heketi_admin_key is defined %}
+ secretNamespace: "{{ glusterfs_namespace }}"
+ secretName: "heketi-{{ glusterfs_name }}-admin-secret"
+{%- endif -%}
diff --git a/roles/openshift_storage_glusterfs/templates/v3.7/heketi-endpoints.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.7/heketi-endpoints.yml.j2
new file mode 100644
index 000000000..99cbdf748
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/templates/v3.7/heketi-endpoints.yml.j2
@@ -0,0 +1,12 @@
+---
+apiVersion: v1
+kind: Endpoints
+metadata:
+ name: heketi-db-{{ glusterfs_name }}-endpoints
+subsets:
+- addresses:
+{% for node in glusterfs_nodes %}
+ - ip: {{ hostvars[node].glusterfs_ip | default(hostvars[node].openshift.common.ip) }}
+{% endfor %}
+ ports:
+ - port: 1
diff --git a/roles/openshift_storage_glusterfs/templates/v3.7/heketi-service.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.7/heketi-service.yml.j2
new file mode 100644
index 000000000..dcb896441
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/templates/v3.7/heketi-service.yml.j2
@@ -0,0 +1,10 @@
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: heketi-db-{{ glusterfs_name }}-endpoints
+spec:
+ ports:
+ - port: 1
+status:
+ loadBalancer: {}
diff --git a/roles/openshift_storage_glusterfs/templates/v3.7/heketi.json.j2 b/roles/openshift_storage_glusterfs/templates/v3.7/heketi.json.j2
new file mode 100644
index 000000000..579b11bb7
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/templates/v3.7/heketi.json.j2
@@ -0,0 +1,36 @@
+{
+ "_port_comment": "Heketi Server Port Number",
+ "port" : "8080",
+
+ "_use_auth": "Enable JWT authorization. Please enable for deployment",
+ "use_auth" : false,
+
+ "_jwt" : "Private keys for access",
+ "jwt" : {
+ "_admin" : "Admin has access to all APIs",
+ "admin" : {
+ "key" : "My Secret"
+ },
+ "_user" : "User only has access to /volumes endpoint",
+ "user" : {
+ "key" : "My Secret"
+ }
+ },
+
+ "_glusterfs_comment": "GlusterFS Configuration",
+ "glusterfs" : {
+
+ "_executor_comment": "Execute plugin. Possible choices: mock, kubernetes, ssh",
+ "executor" : "{{ glusterfs_heketi_executor }}",
+
+ "_db_comment": "Database file name",
+ "db" : "/var/lib/heketi/heketi.db",
+
+ "sshexec" : {
+ "keyfile" : "/etc/heketi/private_key",
+ "port" : "{{ glusterfs_heketi_ssh_port }}",
+ "user" : "{{ glusterfs_heketi_ssh_user }}",
+ "sudo" : {{ glusterfs_heketi_ssh_sudo | lower }}
+ }
+ }
+}
diff --git a/roles/openshift_storage_glusterfs/templates/v3.7/topology.json.j2 b/roles/openshift_storage_glusterfs/templates/v3.7/topology.json.j2
new file mode 100644
index 000000000..d6c28f6dd
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/templates/v3.7/topology.json.j2
@@ -0,0 +1,49 @@
+{
+ "clusters": [
+{%- set clusters = {} -%}
+{%- for node in glusterfs_nodes -%}
+ {%- set cluster = hostvars[node].glusterfs_cluster if 'glusterfs_cluster' in node else '1' -%}
+ {%- if cluster in clusters -%}
+ {%- set _dummy = clusters[cluster].append(node) -%}
+ {%- else -%}
+ {%- set _dummy = clusters.update({cluster: [ node, ]}) -%}
+ {%- endif -%}
+{%- endfor -%}
+{%- for cluster in clusters -%}
+ {
+ "nodes": [
+{%- for node in clusters[cluster] -%}
+ {
+ "node": {
+ "hostnames": {
+ "manage": [
+{%- if 'glusterfs_hostname' in hostvars[node] -%}
+ "{{ hostvars[node].glusterfs_hostname }}"
+{%- elif 'openshift' in hostvars[node] -%}
+ "{{ hostvars[node].openshift.node.nodename }}"
+{%- else -%}
+ "{{ node }}"
+{%- endif -%}
+ ],
+ "storage": [
+{%- if 'glusterfs_ip' in hostvars[node] -%}
+ "{{ hostvars[node].glusterfs_ip }}"
+{%- else -%}
+ "{{ hostvars[node].openshift.common.ip }}"
+{%- endif -%}
+ ]
+ },
+ "zone": {{ hostvars[node].glusterfs_zone | default(1) }}
+ },
+ "devices": [
+{%- for device in hostvars[node].glusterfs_devices -%}
+ "{{ device }}"{% if not loop.last %},{% endif %}
+{%- endfor -%}
+ ]
+ }{% if not loop.last %},{% endif %}
+{%- endfor -%}
+ ]
+ }{% if not loop.last %},{% endif %}
+{%- endfor -%}
+ ]
+}
diff --git a/roles/openshift_version/defaults/main.yml b/roles/openshift_version/defaults/main.yml
index 01a1a7472..53d10f1f8 100644
--- a/roles/openshift_version/defaults/main.yml
+++ b/roles/openshift_version/defaults/main.yml
@@ -1,2 +1,3 @@
---
openshift_protect_installed_version: True
+version_install_base_package: False
diff --git a/roles/openshift_version/tasks/main.yml b/roles/openshift_version/tasks/main.yml
index c0ea00f34..1ff99adf8 100644
--- a/roles/openshift_version/tasks/main.yml
+++ b/roles/openshift_version/tasks/main.yml
@@ -5,6 +5,16 @@
is_containerized: "{{ openshift.common.is_containerized | default(False) | bool }}"
is_atomic: "{{ openshift.common.is_atomic | default(False) | bool }}"
+# This is only needed on masters and nodes; version_install_base_package
+# should be set by a play externally.
+- name: Install the base package for versioning
+ package:
+ name: "{{ openshift.common.service_type }}{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }}"
+ state: present
+ when:
+ - not is_containerized | bool
+ - version_install_base_package | bool
+
# Block attempts to install origin without specifying some kind of version information.
# This is because the latest tags for origin are usually alpha builds, which should not
# be used by default. Users must indicate what they want.
diff --git a/roles/os_firewall/tasks/iptables.yml b/roles/os_firewall/tasks/iptables.yml
index 0af5abf38..2d74f2e48 100644
--- a/roles/os_firewall/tasks/iptables.yml
+++ b/roles/os_firewall/tasks/iptables.yml
@@ -33,7 +33,7 @@
register: result
delegate_to: "{{item}}"
run_once: true
- with_items: "{{ ansible_play_hosts }}"
+ with_items: "{{ ansible_play_batch }}"
- name: need to pause here, otherwise the iptables service starting can sometimes cause ssh to fail
pause:
diff --git a/roles/rhel_subscribe/tasks/main.yml b/roles/rhel_subscribe/tasks/main.yml
index 453044a6e..2a2cf40f3 100644
--- a/roles/rhel_subscribe/tasks/main.yml
+++ b/roles/rhel_subscribe/tasks/main.yml
@@ -41,15 +41,19 @@
redhat_subscription:
username: "{{ rhel_subscription_user }}"
password: "{{ rhel_subscription_pass }}"
+ register: rh_subscription
+ until: rh_subscription | succeeded
- name: Retrieve the OpenShift Pool ID
command: subscription-manager list --available --matches="{{ rhel_subscription_pool }}" --pool-only
register: openshift_pool_id
+ until: openshift_pool_id | succeeded
changed_when: False
- name: Determine if OpenShift Pool Already Attached
command: subscription-manager list --consumed --matches="{{ rhel_subscription_pool }}" --pool-only
register: openshift_pool_attached
+ until: openshift_pool_attached | succeeded
changed_when: False
when: openshift_pool_id.stdout == ''
@@ -59,6 +63,8 @@
- name: Attach to OpenShift Pool
command: subscription-manager subscribe --pool {{ openshift_pool_id.stdout_lines[0] }}
+ register: subscribe_pool
+ until: subscribe_pool | succeeded
when: openshift_pool_id.stdout != ''
- include: enterprise.yml
diff --git a/setup.py b/setup.py
index c0c08b4d2..eaf23d47a 100644
--- a/setup.py
+++ b/setup.py
@@ -48,6 +48,27 @@ def find_files(base_dir, exclude_dirs, include_dirs, file_regex):
return found
+def recursive_search(search_list, field):
+ """
+ Takes a list with nested dicts, and searches all dicts for a key of the
+ field provided. If the items in the list are not dicts, the items are not
+ processed.
+ """
+ fields_found = []
+
+ for item in search_list:
+ if isinstance(item, dict):
+ for key, value in item.items():
+ if key == field:
+ fields_found.append(value)
+ elif isinstance(value, list):
+ results = recursive_search(value, field)
+ for result in results:
+ fields_found.append(result)
+
+ return fields_found
+
+
def find_entrypoint_playbooks():
'''find entry point playbooks as defined by openshift-ansible'''
playbooks = set()
@@ -248,37 +269,73 @@ class OpenShiftAnsibleSyntaxCheck(Command):
''' finalize_options '''
pass
+ def deprecate_jinja2_in_when(self, yaml_contents, yaml_file):
+ ''' Check for Jinja2 templating delimiters in when conditions '''
+ test_result = False
+ failed_items = []
+
+ search_results = recursive_search(yaml_contents, 'when')
+ for item in search_results:
+ if isinstance(item, str):
+ if '{{' in item or '{%' in item:
+ failed_items.append(item)
+ else:
+ for sub_item in item:
+ if '{{' in sub_item or '{%' in sub_item:
+ failed_items.append(sub_item)
+
+ if len(failed_items) > 0:
+ print('{}Error: Usage of Jinja2 templating delimiters in when '
+ 'conditions is deprecated in Ansible 2.3.\n'
+ ' File: {}'.format(self.FAIL, yaml_file))
+ for item in failed_items:
+ print(' Found: "{}"'.format(item))
+ print(self.ENDC)
+ test_result = True
+
+ return test_result
+
+ def deprecate_include(self, yaml_contents, yaml_file):
+ ''' Check for usage of include directive '''
+ test_result = False
+
+ search_results = recursive_search(yaml_contents, 'include')
+
+ if len(search_results) > 0:
+ print('{}Error: The `include` directive is deprecated in Ansible 2.4.\n'
+ 'https://github.com/ansible/ansible/blob/devel/CHANGELOG.md\n'
+ ' File: {}'.format(self.FAIL, yaml_file))
+ for item in search_results:
+ print(' Found: "include: {}"'.format(item))
+ print(self.ENDC)
+ test_result = True
+
+ return test_result
+
def run(self):
''' run command '''
has_errors = False
print('Ansible Deprecation Checks')
- exclude_dirs = ['adhoc', 'files', 'meta', 'test', 'tests', 'vars', '.tox']
+ exclude_dirs = ['adhoc', 'files', 'meta', 'test', 'tests', 'vars', 'defaults', '.tox']
for yaml_file in find_files(
os.getcwd(), exclude_dirs, None, r'\.ya?ml$'):
with open(yaml_file, 'r') as contents:
- for task in yaml.safe_load(contents) or {}:
- if not isinstance(task, dict):
- # Skip yaml files which are not a dictionary of tasks
- continue
- if 'when' in task:
- if '{{' in task['when'] or '{%' in task['when']:
- print('{}Error: Usage of Jinja2 templating delimiters '
- 'in when conditions is deprecated in Ansible 2.3.\n'
- ' File: {}\n'
- ' Found: "{}"{}'.format(
- self.FAIL, yaml_file,
- task['when'], self.ENDC))
- has_errors = True
- # TODO (rteague): This test will be enabled once we move to Ansible 2.4
- # if 'include' in task:
- # print('{}Error: The `include` directive is deprecated in Ansible 2.4.\n'
- # 'https://github.com/ansible/ansible/blob/devel/CHANGELOG.md\n'
- # ' File: {}\n'
- # ' Found: "include: {}"{}'.format(
- # self.FAIL, yaml_file, task['include'], self.ENDC))
- # has_errors = True
+ yaml_contents = yaml.safe_load(contents)
+ if not isinstance(yaml_contents, list):
+ continue
+
+ # Check for Jinja2 templating delimiters in when conditions
+ result = self.deprecate_jinja2_in_when(yaml_contents, yaml_file)
+ has_errors = result or has_errors
+
+ # TODO (rteague): This test will be enabled once we move to Ansible 2.4
+ # result = self.deprecate_include(yaml_contents, yaml_file)
+ # has_errors = result or has_errors
+
+ if not has_errors:
+ print('...PASSED')
print('Ansible Playbook Entry Point Syntax Checks')
for playbook in find_entrypoint_playbooks():
diff --git a/test/integration/openshift_health_checker/common.go b/test/integration/openshift_health_checker/common.go
index a92d6861d..8b79c48cb 100644
--- a/test/integration/openshift_health_checker/common.go
+++ b/test/integration/openshift_health_checker/common.go
@@ -25,7 +25,7 @@ func (p PlaybookTest) Run(t *testing.T) {
// A PlaybookTest is intended to be run in parallel with other tests.
t.Parallel()
- cmd := exec.Command("ansible-playbook", "-i", "/dev/null", p.Path)
+ cmd := exec.Command("ansible-playbook", "-e", "testing_skip_some_requirements=1", "-i", "/dev/null", p.Path)
cmd.Env = append(os.Environ(), "ANSIBLE_FORCE_COLOR=1")
b, err := cmd.CombinedOutput()