summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.tito/packages/openshift-ansible2
-rw-r--r--inventory/hosts.example3
-rw-r--r--openshift-ansible.spec53
-rw-r--r--playbooks/common/openshift-cluster/upgrades/post_control_plane.yml2
-rw-r--r--playbooks/container-runtime/config.yml4
-rw-r--r--playbooks/container-runtime/setup_storage.yml4
-rw-r--r--playbooks/init/main.yml4
-rw-r--r--playbooks/openshift-etcd/certificates.yml6
-rw-r--r--playbooks/openshift-etcd/config.yml6
-rw-r--r--playbooks/openshift-etcd/embedded2external.yml6
-rw-r--r--playbooks/openshift-etcd/migrate.yml6
-rw-r--r--playbooks/openshift-etcd/redeploy-ca.yml6
-rw-r--r--playbooks/openshift-etcd/redeploy-certificates.yml6
-rw-r--r--playbooks/openshift-etcd/restart.yml6
-rw-r--r--playbooks/openshift-etcd/scaleup.yml4
-rw-r--r--playbooks/openshift-etcd/upgrade.yml4
-rw-r--r--playbooks/openshift-glusterfs/config.yml5
-rw-r--r--playbooks/openshift-glusterfs/registry.yml5
-rw-r--r--playbooks/openshift-grafana/config.yml5
-rw-r--r--playbooks/openshift-hosted/config.yml5
-rw-r--r--playbooks/openshift-hosted/deploy_registry.yml5
-rw-r--r--playbooks/openshift-hosted/deploy_router.yml5
-rw-r--r--playbooks/openshift-hosted/redeploy-registry-certificates.yml5
-rw-r--r--playbooks/openshift-hosted/redeploy-router-certificates.yml5
-rw-r--r--playbooks/openshift-loadbalancer/config.yml5
-rw-r--r--playbooks/openshift-logging/config.yml5
-rw-r--r--playbooks/openshift-management/config.yml5
-rw-r--r--playbooks/openshift-metrics/config.yml6
-rw-r--r--playbooks/openshift-nfs/config.yml6
-rw-r--r--playbooks/openshift-prometheus/config.yml6
-rw-r--r--playbooks/openshift-provisioners/config.yml6
-rw-r--r--playbooks/openshift-service-catalog/config.yml6
-rw-r--r--playbooks/openshift-web-console/config.yml5
-rw-r--r--playbooks/openstack/advanced-configuration.md32
-rw-r--r--playbooks/openstack/sample-inventory/group_vars/all.yml7
-rw-r--r--playbooks/prerequisites.yml4
-rw-r--r--roles/ansible_service_broker/vars/openshift-enterprise.yml4
-rw-r--r--roles/container_runtime/defaults/main.yml11
-rw-r--r--roles/container_runtime/tasks/systemcontainer_crio.yml2
-rw-r--r--roles/etcd/defaults/main.yaml2
-rw-r--r--roles/kuryr/tasks/master.yaml4
-rw-r--r--roles/lib_utils/filter_plugins/oo_filters.py48
-rw-r--r--roles/openshift_aws/templates/user_data.j23
-rw-r--r--roles/openshift_facts/defaults/main.yml3
-rw-r--r--roles/openshift_logging_curator/tasks/main.yaml7
-rw-r--r--roles/openshift_logging_curator/vars/default_images.yml2
-rw-r--r--roles/openshift_logging_curator/vars/openshift-enterprise.yml2
-rw-r--r--roles/openshift_logging_elasticsearch/tasks/main.yaml327
-rw-r--r--roles/openshift_logging_elasticsearch/vars/openshift-enterprise.yml4
-rw-r--r--roles/openshift_logging_eventrouter/tasks/install_eventrouter.yaml7
-rw-r--r--roles/openshift_logging_eventrouter/vars/openshift-enterprise.yml2
-rw-r--r--roles/openshift_logging_fluentd/vars/openshift-enterprise.yml2
-rw-r--r--roles/openshift_logging_kibana/tasks/main.yaml7
-rw-r--r--roles/openshift_logging_kibana/vars/openshift-enterprise.yml4
-rw-r--r--roles/openshift_logging_mux/tasks/main.yaml55
-rw-r--r--roles/openshift_logging_mux/vars/openshift-enterprise.yml2
-rw-r--r--roles/openshift_metrics/tasks/install_cassandra.yaml7
-rw-r--r--roles/openshift_metrics/tasks/install_hawkular.yaml7
-rw-r--r--roles/openshift_metrics/tasks/install_heapster.yaml19
-rw-r--r--roles/openshift_metrics/tasks/install_hosa.yaml13
-rw-r--r--roles/openshift_metrics/vars/default_images.yml2
-rw-r--r--roles/openshift_metrics/vars/openshift-enterprise.yml2
-rw-r--r--roles/openshift_node/files/bootstrap.yml8
-rw-r--r--roles/openshift_openstack/defaults/main.yml6
-rw-r--r--roles/openshift_openstack/templates/heat_stack.yaml.j222
-rw-r--r--roles/openshift_openstack/templates/user_data.j216
-rw-r--r--roles/openshift_prometheus/tasks/install_prometheus.yaml61
-rw-r--r--roles/openshift_prometheus/vars/openshift-enterprise.yml8
-rw-r--r--roles/openshift_provisioners/tasks/install_provisioners.yaml15
-rw-r--r--roles/openshift_service_catalog/tasks/install.yml57
-rw-r--r--roles/openshift_service_catalog/tasks/start_api_server.yml4
-rw-r--r--roles/openshift_service_catalog/vars/default_images.yml2
-rw-r--r--roles/openshift_service_catalog/vars/openshift-enterprise.yml2
-rw-r--r--roles/openshift_web_console/tasks/install.yml153
-rw-r--r--roles/openshift_web_console/vars/default_images.yml2
-rw-r--r--roles/openshift_web_console/vars/openshift-enterprise.yml2
-rw-r--r--roles/template_service_broker/tasks/install.yml25
-rw-r--r--roles/template_service_broker/vars/default_images.yml2
-rw-r--r--roles/template_service_broker/vars/openshift-enterprise.yml2
79 files changed, 793 insertions, 397 deletions
diff --git a/.tito/packages/openshift-ansible b/.tito/packages/openshift-ansible
index 1266921a6..3ab6b3fb1 100644
--- a/.tito/packages/openshift-ansible
+++ b/.tito/packages/openshift-ansible
@@ -1 +1 @@
-3.9.0-0.41.0 ./
+3.9.0-0.42.0 ./
diff --git a/inventory/hosts.example b/inventory/hosts.example
index 82c588100..b2237df3c 100644
--- a/inventory/hosts.example
+++ b/inventory/hosts.example
@@ -934,6 +934,9 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# Force a specific image version to use when pulling the service catalog image
#openshift_service_catalog_image_version=v3.7
+# TSB image tag
+#template_service_broker_version='v3.7'
+
# Configure one of more namespaces whose templates will be served by the TSB
#openshift_template_service_broker_namespaces=['openshift']
diff --git a/openshift-ansible.spec b/openshift-ansible.spec
index ae0104b27..531aa63b3 100644
--- a/openshift-ansible.spec
+++ b/openshift-ansible.spec
@@ -10,7 +10,7 @@
Name: openshift-ansible
Version: 3.9.0
-Release: 0.41.0%{?dist}
+Release: 0.42.0%{?dist}
Summary: Openshift and Atomic Enterprise Ansible
License: ASL 2.0
URL: https://github.com/openshift/openshift-ansible
@@ -201,6 +201,57 @@ Atomic OpenShift Utilities includes
%changelog
+* Fri Feb 09 2018 Justin Pierce <jupierce@redhat.com> 3.9.0-0.42.0
+- xPaaS v1.4.8 for v3.7 (sdodson@redhat.com)
+- xPaaS v1.4.8-1 for v3.8 (sdodson@redhat.com)
+- xPaaS v1.4.8-1 for v3.9 (sdodson@redhat.com)
+- Bump xpaas version (sdodson@redhat.com)
+- Bug 1524805- CFME example now works disconnected (fabian@fabianism.us)
+- Only try to yaml.load a file if it ends in .yml or .yaml in logging facts
+ (ewolinet@redhat.com)
+- Set default image tag to openshift_image_tag for services
+ (vrutkovs@redhat.com)
+- Redeploy router certificates during upgrade only when secure.
+ (kwoodson@redhat.com)
+- GlusterFS: Fix block StorageClass heketi route (jarrpa@redhat.com)
+- changed oc to {{ openshift_client_binary }} (datarace101@gmail.com)
+- Use v3.9 web-console image for now (sdodson@redhat.com)
+- Adding ability to provide additional mounts to crio system container.
+ (kwoodson@redhat.com)
+- Remove spaces introduced at the start of the line
+ (geoff.newson@googlemail.com)
+- Changing the check for the number of etcd nodes (geoff.newson@gmail.com)
+- aws ami: make it so the tags from the orinal AMI are used with the newly
+ created AMI (mwoodson@redhat.com)
+- Setup docker excluder if requested before container_runtime is installed
+ (vrutkovs@redhat.com)
+- openshift_node: Remove master from aws node building (smilner@redhat.com)
+- Use wait_for_connection to validate ssh transport is alive
+ (sdodson@redhat.com)
+- Bug 1541625- properly cast provided ip address to unicode
+ (fabian@fabianism.us)
+- Add base package installation to upgrade playbooks (rteague@redhat.com)
+- 3.9 upgrade: fix typos in restart masters procedure (vrutkovs@redhat.com)
+- quick installer: disable broken test_get_hosts_to_run_on6 test
+ (vrutkovs@redhat.com)
+- Quick installer: run prerequistes first and update path to main playbook
+ (vrutkovs@redhat.com)
+- Fix uninstall using openshift_prometheus_state=absent (zgalor@redhat.com)
+- Detect config changes in console liveness probe (spadgett@redhat.com)
+- Fix master and node system container variables (mgugino@redhat.com)
+- Correct the list of certificates checked in openshift_master_certificates
+ s.t. masters do not incorrectly report that master certs are missing.
+ (abutcher@redhat.com)
+- tag fix without ose- (rcook@redhat.com)
+- lib_utils_oo_collect: Allow filtering on dot separated keys.
+ (abutcher@redhat.com)
+- Determine which etcd host is the etcd_ca_host rather than assume it is the
+ first host in the etcd host group. (abutcher@redhat.com)
+- Attempt to back up generated certificates on every etcd host.
+ (abutcher@redhat.com)
+- Remove pre upgrade verification step re: etcd ca host. (abutcher@redhat.com)
+- Revert "GlusterFS: Remove image option from heketi command" (hansmi@vshn.ch)
+
* Wed Feb 07 2018 Justin Pierce <jupierce@redhat.com> 3.9.0-0.41.0
- Allow OVS 2.7 in OCP 3.10 (sdodson@redhat.com)
- GlusterFS: Minor documentation update (jarrpa@redhat.com)
diff --git a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml
index fafbd8d1c..86cde2844 100644
--- a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml
@@ -134,7 +134,7 @@
# Run the redeploy certs based upon the certificates. Defaults to False for insecure registries
- when: (hostvars[groups.oo_first_master.0].openshift_hosted_rollout_certs_and_registry | default(False)) | bool
- import_playbook: ../../../openshift-hosted/redeploy-registry-certificates.yml
+ import_playbook: ../../../openshift-hosted/private/redeploy-registry-certificates.yml
# Check for warnings to be printed at the end of the upgrade:
- name: Clean up and display warnings
diff --git a/playbooks/container-runtime/config.yml b/playbooks/container-runtime/config.yml
index f15aa771f..d7f3634ec 100644
--- a/playbooks/container-runtime/config.yml
+++ b/playbooks/container-runtime/config.yml
@@ -1,6 +1,8 @@
---
- import_playbook: ../init/main.yml
vars:
- skip_verison: True
+ skip_version: True
+ l_openshift_version_set_hosts: "all:!all"
+ l_openshift_version_check_hosts: "all:!all"
- import_playbook: private/config.yml
diff --git a/playbooks/container-runtime/setup_storage.yml b/playbooks/container-runtime/setup_storage.yml
index 98e876b2c..17ff11cfd 100644
--- a/playbooks/container-runtime/setup_storage.yml
+++ b/playbooks/container-runtime/setup_storage.yml
@@ -1,6 +1,8 @@
---
- import_playbook: ../init/main.yml
vars:
- skip_verison: True
+ skip_version: True
+ l_openshift_version_set_hosts: "all:!all"
+ l_openshift_version_check_hosts: "all:!all"
- import_playbook: private/setup_storage.yml
diff --git a/playbooks/init/main.yml b/playbooks/init/main.yml
index 9886691e0..468d81fbe 100644
--- a/playbooks/init/main.yml
+++ b/playbooks/init/main.yml
@@ -1,5 +1,5 @@
---
-# skip_verison and l_install_base_packages are passed in via prerequistes.yml.
+# skip_version and l_install_base_packages are passed in via prerequistes.yml.
# skip_sanity_checks is passed in via openshift-node/private/image_prep.yml
- name: Initialization Checkpoint Start
@@ -27,7 +27,7 @@
- import_playbook: cluster_facts.yml
- import_playbook: version.yml
- when: not (skip_verison | default(False))
+ when: not (skip_version | default(False))
- import_playbook: sanity_checks.yml
when: not (skip_sanity_checks | default(False))
diff --git a/playbooks/openshift-etcd/certificates.yml b/playbooks/openshift-etcd/certificates.yml
index c06e3b575..86caba4e8 100644
--- a/playbooks/openshift-etcd/certificates.yml
+++ b/playbooks/openshift-etcd/certificates.yml
@@ -1,5 +1,11 @@
---
- import_playbook: ../init/main.yml
+ vars:
+ skip_version: True
+ l_openshift_version_set_hosts: "all:!all"
+ l_openshift_version_check_hosts: "all:!all"
+ l_init_fact_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+ l_sanity_check_hosts: "{{ groups['oo_etcd_to_config'] | union(groups['oo_masters_to_config']) }}"
- import_playbook: private/ca.yml
diff --git a/playbooks/openshift-etcd/config.yml b/playbooks/openshift-etcd/config.yml
index c7814207c..378edce85 100644
--- a/playbooks/openshift-etcd/config.yml
+++ b/playbooks/openshift-etcd/config.yml
@@ -1,4 +1,10 @@
---
- import_playbook: ../init/main.yml
+ vars:
+ skip_version: True
+ l_openshift_version_set_hosts: "all:!all"
+ l_openshift_version_check_hosts: "all:!all"
+ l_init_fact_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+ l_sanity_check_hosts: "{{ groups['oo_etcd_to_config'] | union(groups['oo_masters_to_config']) }}"
- import_playbook: private/config.yml
diff --git a/playbooks/openshift-etcd/embedded2external.yml b/playbooks/openshift-etcd/embedded2external.yml
index 7d090fa9b..34be38ac0 100644
--- a/playbooks/openshift-etcd/embedded2external.yml
+++ b/playbooks/openshift-etcd/embedded2external.yml
@@ -1,4 +1,10 @@
---
- import_playbook: ../init/main.yml
+ vars:
+ skip_version: True
+ l_openshift_version_set_hosts: "all:!all"
+ l_openshift_version_check_hosts: "all:!all"
+ l_init_fact_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+ l_sanity_check_hosts: "{{ groups['oo_etcd_to_config'] | union(groups['oo_masters_to_config']) }}"
- import_playbook: private/embedded2external.yml
diff --git a/playbooks/openshift-etcd/migrate.yml b/playbooks/openshift-etcd/migrate.yml
index 0340b74a5..4e8238ebd 100644
--- a/playbooks/openshift-etcd/migrate.yml
+++ b/playbooks/openshift-etcd/migrate.yml
@@ -1,4 +1,10 @@
---
- import_playbook: ../init/main.yml
+ vars:
+ skip_version: True
+ l_openshift_version_set_hosts: "all:!all"
+ l_openshift_version_check_hosts: "all:!all"
+ l_init_fact_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+ l_sanity_check_hosts: "{{ groups['oo_etcd_to_config'] | union(groups['oo_masters_to_config']) }}"
- import_playbook: private/migrate.yml
diff --git a/playbooks/openshift-etcd/redeploy-ca.yml b/playbooks/openshift-etcd/redeploy-ca.yml
index 769d694ba..93b68a257 100644
--- a/playbooks/openshift-etcd/redeploy-ca.yml
+++ b/playbooks/openshift-etcd/redeploy-ca.yml
@@ -1,4 +1,10 @@
---
- import_playbook: ../init/main.yml
+ vars:
+ skip_version: True
+ l_openshift_version_set_hosts: "all:!all"
+ l_openshift_version_check_hosts: "all:!all"
+ l_init_fact_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+ l_sanity_check_hosts: "{{ groups['oo_etcd_to_config'] | union(groups['oo_masters_to_config']) }}"
- import_playbook: private/redeploy-ca.yml
diff --git a/playbooks/openshift-etcd/redeploy-certificates.yml b/playbooks/openshift-etcd/redeploy-certificates.yml
index 8ea1994f7..202acb493 100644
--- a/playbooks/openshift-etcd/redeploy-certificates.yml
+++ b/playbooks/openshift-etcd/redeploy-certificates.yml
@@ -1,5 +1,11 @@
---
- import_playbook: ../init/main.yml
+ vars:
+ skip_version: True
+ l_openshift_version_set_hosts: "all:!all"
+ l_openshift_version_check_hosts: "all:!all"
+ l_init_fact_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+ l_sanity_check_hosts: "{{ groups['oo_etcd_to_config'] | union(groups['oo_masters_to_config']) }}"
- import_playbook: private/redeploy-certificates.yml
diff --git a/playbooks/openshift-etcd/restart.yml b/playbooks/openshift-etcd/restart.yml
index 041c1384d..05aaa9809 100644
--- a/playbooks/openshift-etcd/restart.yml
+++ b/playbooks/openshift-etcd/restart.yml
@@ -1,4 +1,10 @@
---
- import_playbook: ../init/main.yml
+ vars:
+ skip_version: True
+ l_init_fact_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+ l_openshift_version_set_hosts: "all:!all"
+ l_openshift_version_check_hosts: "all:!all"
+ l_sanity_check_hosts: "{{ groups['oo_etcd_to_config'] | union(groups['oo_masters_to_config']) }}"
- import_playbook: private/restart.yml
diff --git a/playbooks/openshift-etcd/scaleup.yml b/playbooks/openshift-etcd/scaleup.yml
index 1f8cb7391..3e2fca8d4 100644
--- a/playbooks/openshift-etcd/scaleup.yml
+++ b/playbooks/openshift-etcd/scaleup.yml
@@ -43,8 +43,10 @@
# prerequisites, we can just init facts as normal.
- import_playbook: ../init/main.yml
vars:
- skip_verison: True
+ skip_version: True
l_init_fact_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config:oo_new_etcd_to_config"
+ l_openshift_version_set_hosts: "all:!all"
+ l_openshift_version_check_hosts: "all:!all"
when:
- inventory_hostname in groups['oo_masters']
- inventory_hostname in groups['oo_nodes_to_config']
diff --git a/playbooks/openshift-etcd/upgrade.yml b/playbooks/openshift-etcd/upgrade.yml
index 77999d92c..1edcd6819 100644
--- a/playbooks/openshift-etcd/upgrade.yml
+++ b/playbooks/openshift-etcd/upgrade.yml
@@ -1,7 +1,9 @@
---
- import_playbook: ../init/main.yml
vars:
- skip_verison: True
+ skip_version: True
+ l_openshift_version_set_hosts: "all:!all"
+ l_openshift_version_check_hosts: "all:!all"
l_init_fact_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
l_sanity_check_hosts: "{{ groups['oo_etcd_to_config'] | union(groups['oo_masters_to_config']) }}"
diff --git a/playbooks/openshift-glusterfs/config.yml b/playbooks/openshift-glusterfs/config.yml
index c7814207c..ccdd8d069 100644
--- a/playbooks/openshift-glusterfs/config.yml
+++ b/playbooks/openshift-glusterfs/config.yml
@@ -1,4 +1,9 @@
---
- import_playbook: ../init/main.yml
+ vars:
+ l_init_fact_hosts: "oo_masters_to_config:oo_glusterfs_to_config"
+ l_openshift_version_set_hosts: "oo_masters_to_config:!oo_first_master"
+ l_openshift_version_check_hosts: "all:!all"
+ l_sanity_check_hosts: "{{ groups['oo_masters_to_config'] | union(groups['oo_glusterfs_to_config']) }}"
- import_playbook: private/config.yml
diff --git a/playbooks/openshift-glusterfs/registry.yml b/playbooks/openshift-glusterfs/registry.yml
index 5e3b18536..cc2846cb3 100644
--- a/playbooks/openshift-glusterfs/registry.yml
+++ b/playbooks/openshift-glusterfs/registry.yml
@@ -1,4 +1,9 @@
---
- import_playbook: ../init/main.yml
+ vars:
+ l_init_fact_hosts: "oo_masters_to_config:oo_glusterfs_to_config"
+ l_openshift_version_set_hosts: "oo_masters_to_config:!oo_first_master"
+ l_openshift_version_check_hosts: "all:!all"
+ l_sanity_check_hosts: "{{ groups['oo_masters_to_config'] | union(groups['oo_glusterfs_to_config']) }}"
- import_playbook: private/registry.yml
diff --git a/playbooks/openshift-grafana/config.yml b/playbooks/openshift-grafana/config.yml
index c7814207c..62d954d29 100644
--- a/playbooks/openshift-grafana/config.yml
+++ b/playbooks/openshift-grafana/config.yml
@@ -1,4 +1,9 @@
---
- import_playbook: ../init/main.yml
+ vars:
+ l_init_fact_hosts: "oo_masters_to_config"
+ l_openshift_version_set_hosts: "oo_masters_to_config:!oo_first_master"
+ l_openshift_version_check_hosts: "all:!all"
+ l_sanity_check_hosts: "{{ groups['oo_masters_to_config'] }}"
- import_playbook: private/config.yml
diff --git a/playbooks/openshift-hosted/config.yml b/playbooks/openshift-hosted/config.yml
index c7814207c..62d954d29 100644
--- a/playbooks/openshift-hosted/config.yml
+++ b/playbooks/openshift-hosted/config.yml
@@ -1,4 +1,9 @@
---
- import_playbook: ../init/main.yml
+ vars:
+ l_init_fact_hosts: "oo_masters_to_config"
+ l_openshift_version_set_hosts: "oo_masters_to_config:!oo_first_master"
+ l_openshift_version_check_hosts: "all:!all"
+ l_sanity_check_hosts: "{{ groups['oo_masters_to_config'] }}"
- import_playbook: private/config.yml
diff --git a/playbooks/openshift-hosted/deploy_registry.yml b/playbooks/openshift-hosted/deploy_registry.yml
index 2453329dd..e42af7149 100644
--- a/playbooks/openshift-hosted/deploy_registry.yml
+++ b/playbooks/openshift-hosted/deploy_registry.yml
@@ -1,4 +1,9 @@
---
- import_playbook: ../init/main.yml
+ vars:
+ l_init_fact_hosts: "oo_masters_to_config"
+ l_openshift_version_set_hosts: "oo_masters_to_config:!oo_first_master"
+ l_openshift_version_check_hosts: "all:!all"
+ l_sanity_check_hosts: "{{ groups['oo_masters_to_config'] }}"
- import_playbook: private/openshift_hosted_registry.yml
diff --git a/playbooks/openshift-hosted/deploy_router.yml b/playbooks/openshift-hosted/deploy_router.yml
index e832eeeea..a3564fe51 100644
--- a/playbooks/openshift-hosted/deploy_router.yml
+++ b/playbooks/openshift-hosted/deploy_router.yml
@@ -1,4 +1,9 @@
---
- import_playbook: ../init/main.yml
+ vars:
+ l_init_fact_hosts: "oo_masters_to_config"
+ l_openshift_version_set_hosts: "oo_masters_to_config:!oo_first_master"
+ l_openshift_version_check_hosts: "all:!all"
+ l_sanity_check_hosts: "{{ groups['oo_masters_to_config'] }}"
- import_playbook: private/openshift_hosted_router.yml
diff --git a/playbooks/openshift-hosted/redeploy-registry-certificates.yml b/playbooks/openshift-hosted/redeploy-registry-certificates.yml
index 518a1d624..1ab237558 100644
--- a/playbooks/openshift-hosted/redeploy-registry-certificates.yml
+++ b/playbooks/openshift-hosted/redeploy-registry-certificates.yml
@@ -1,4 +1,9 @@
---
- import_playbook: ../init/main.yml
+ vars:
+ l_init_fact_hosts: "oo_masters_to_config"
+ l_openshift_version_set_hosts: "oo_masters_to_config:!oo_first_master"
+ l_openshift_version_check_hosts: "all:!all"
+ l_sanity_check_hosts: "{{ groups['oo_masters_to_config'] }}"
- import_playbook: private/redeploy-registry-certificates.yml
diff --git a/playbooks/openshift-hosted/redeploy-router-certificates.yml b/playbooks/openshift-hosted/redeploy-router-certificates.yml
index a74dd8c79..4b44be405 100644
--- a/playbooks/openshift-hosted/redeploy-router-certificates.yml
+++ b/playbooks/openshift-hosted/redeploy-router-certificates.yml
@@ -1,4 +1,9 @@
---
- import_playbook: ../init/main.yml
+ vars:
+ l_init_fact_hosts: "oo_masters_to_config"
+ l_openshift_version_set_hosts: "oo_masters_to_config:!oo_first_master"
+ l_openshift_version_check_hosts: "all:!all"
+ l_sanity_check_hosts: "{{ groups['oo_masters_to_config'] }}"
- import_playbook: private/redeploy-router-certificates.yml
diff --git a/playbooks/openshift-loadbalancer/config.yml b/playbooks/openshift-loadbalancer/config.yml
index c7814207c..13903ee17 100644
--- a/playbooks/openshift-loadbalancer/config.yml
+++ b/playbooks/openshift-loadbalancer/config.yml
@@ -1,4 +1,9 @@
---
- import_playbook: ../init/main.yml
+ vars:
+ l_init_fact_hosts: "oo_masters_to_config:oo_lb_to_config"
+ l_openshift_version_set_hosts: "oo_masters_to_config:!oo_first_master"
+ l_openshift_version_check_hosts: "all:!all"
+ l_sanity_check_hosts: "{{ groups['oo_masters_to_config'] | union(groups['oo_lb_to_config']) }}"
- import_playbook: private/config.yml
diff --git a/playbooks/openshift-logging/config.yml b/playbooks/openshift-logging/config.yml
index 83d330284..419dcbc3f 100644
--- a/playbooks/openshift-logging/config.yml
+++ b/playbooks/openshift-logging/config.yml
@@ -5,5 +5,10 @@
# currently supported method.
#
- import_playbook: ../init/main.yml
+ vars:
+ l_init_fact_hosts: "oo_masters_to_config"
+ l_openshift_version_set_hosts: "oo_masters_to_config:!oo_first_master"
+ l_openshift_version_check_hosts: "all:!all"
+ l_sanity_check_hosts: "{{ groups['oo_masters_to_config'] }}"
- import_playbook: private/config.yml
diff --git a/playbooks/openshift-management/config.yml b/playbooks/openshift-management/config.yml
index c7814207c..62d954d29 100644
--- a/playbooks/openshift-management/config.yml
+++ b/playbooks/openshift-management/config.yml
@@ -1,4 +1,9 @@
---
- import_playbook: ../init/main.yml
+ vars:
+ l_init_fact_hosts: "oo_masters_to_config"
+ l_openshift_version_set_hosts: "oo_masters_to_config:!oo_first_master"
+ l_openshift_version_check_hosts: "all:!all"
+ l_sanity_check_hosts: "{{ groups['oo_masters_to_config'] }}"
- import_playbook: private/config.yml
diff --git a/playbooks/openshift-metrics/config.yml b/playbooks/openshift-metrics/config.yml
index c7814207c..1ca68fb9e 100644
--- a/playbooks/openshift-metrics/config.yml
+++ b/playbooks/openshift-metrics/config.yml
@@ -1,4 +1,10 @@
---
- import_playbook: ../init/main.yml
+ vars:
+ l_init_fact_hosts: "oo_masters_to_config"
+ l_openshift_version_set_hosts: "oo_masters_to_config:!oo_first_master"
+ l_openshift_version_check_hosts: "all:!all"
+ l_sanity_check_hosts: "{{ groups['oo_masters_to_config'] }}"
+
- import_playbook: private/config.yml
diff --git a/playbooks/openshift-nfs/config.yml b/playbooks/openshift-nfs/config.yml
index c7814207c..b22796228 100644
--- a/playbooks/openshift-nfs/config.yml
+++ b/playbooks/openshift-nfs/config.yml
@@ -1,4 +1,10 @@
---
- import_playbook: ../init/main.yml
+ vars:
+ l_init_fact_hosts: "oo_masters_to_config:oo_nfs_to_config"
+ l_openshift_version_set_hosts: "oo_masters_to_config:!oo_first_master"
+ l_openshift_version_check_hosts: "all:!all"
+ l_sanity_check_hosts: "{{ groups['oo_masters_to_config'] | union(groups['oo_nfs_to_config']) }}"
+
- import_playbook: private/config.yml
diff --git a/playbooks/openshift-prometheus/config.yml b/playbooks/openshift-prometheus/config.yml
index c7814207c..1ca68fb9e 100644
--- a/playbooks/openshift-prometheus/config.yml
+++ b/playbooks/openshift-prometheus/config.yml
@@ -1,4 +1,10 @@
---
- import_playbook: ../init/main.yml
+ vars:
+ l_init_fact_hosts: "oo_masters_to_config"
+ l_openshift_version_set_hosts: "oo_masters_to_config:!oo_first_master"
+ l_openshift_version_check_hosts: "all:!all"
+ l_sanity_check_hosts: "{{ groups['oo_masters_to_config'] }}"
+
- import_playbook: private/config.yml
diff --git a/playbooks/openshift-provisioners/config.yml b/playbooks/openshift-provisioners/config.yml
index c7814207c..1ca68fb9e 100644
--- a/playbooks/openshift-provisioners/config.yml
+++ b/playbooks/openshift-provisioners/config.yml
@@ -1,4 +1,10 @@
---
- import_playbook: ../init/main.yml
+ vars:
+ l_init_fact_hosts: "oo_masters_to_config"
+ l_openshift_version_set_hosts: "oo_masters_to_config:!oo_first_master"
+ l_openshift_version_check_hosts: "all:!all"
+ l_sanity_check_hosts: "{{ groups['oo_masters_to_config'] }}"
+
- import_playbook: private/config.yml
diff --git a/playbooks/openshift-service-catalog/config.yml b/playbooks/openshift-service-catalog/config.yml
index c7814207c..1ca68fb9e 100644
--- a/playbooks/openshift-service-catalog/config.yml
+++ b/playbooks/openshift-service-catalog/config.yml
@@ -1,4 +1,10 @@
---
- import_playbook: ../init/main.yml
+ vars:
+ l_init_fact_hosts: "oo_masters_to_config"
+ l_openshift_version_set_hosts: "oo_masters_to_config:!oo_first_master"
+ l_openshift_version_check_hosts: "all:!all"
+ l_sanity_check_hosts: "{{ groups['oo_masters_to_config'] }}"
+
- import_playbook: private/config.yml
diff --git a/playbooks/openshift-web-console/config.yml b/playbooks/openshift-web-console/config.yml
index c7814207c..62d954d29 100644
--- a/playbooks/openshift-web-console/config.yml
+++ b/playbooks/openshift-web-console/config.yml
@@ -1,4 +1,9 @@
---
- import_playbook: ../init/main.yml
+ vars:
+ l_init_fact_hosts: "oo_masters_to_config"
+ l_openshift_version_set_hosts: "oo_masters_to_config:!oo_first_master"
+ l_openshift_version_check_hosts: "all:!all"
+ l_sanity_check_hosts: "{{ groups['oo_masters_to_config'] }}"
- import_playbook: private/config.yml
diff --git a/playbooks/openstack/advanced-configuration.md b/playbooks/openstack/advanced-configuration.md
index e8f4cfc32..8df3c40b0 100644
--- a/playbooks/openstack/advanced-configuration.md
+++ b/playbooks/openstack/advanced-configuration.md
@@ -273,6 +273,38 @@ openshift_openstack_cluster_node_labels:
mylabel: myvalue
```
+`openshift_openstack_provision_user_commands` allows users to execute
+shell commands via cloud-init for all of the created Nova servers in
+the Heat stack, before they are available for SSH connections.
+Note that you should use custom ansible playbooks whenever
+possible, like this `provision_install_custom.yml` example playbook:
+```
+- import_playbook: openshift-ansible/playbooks/openstack/openshift-cluster/provision.yml
+
+- name: My custom actions
+ hosts: cluster_hosts
+ tasks:
+ - do whatever you want here
+
+- import_playbook: openshift-ansible/playbooks/openstack/openshift-cluster/install.yml
+```
+The playbook leverages a two existing provider interfaces: `provision.yml` and
+`install.yml`. For some cases, like SSH keys configuration and coordinated reboots of
+servers, the cloud-init runcmd directive may be a better choice though. User specified
+shell commands for cloud-init need to be either strings or lists, for example:
+```
+- openshift_openstack_provision_user_commands:
+ - set -vx
+ - systemctl stop sshd # fences off ansible playbooks as we want to reboot later
+ - ['echo', 'foo', '>', '/tmp/foo']
+ - [ ls, /tmp/foo, '||', true ]
+ - reboot # unfences ansible playbooks to continue after reboot
+```
+
+**Note** To protect Nova servers from recreating when the user-data changes via
+`openshift_openstack_provision_user_commands`, the
+`user_data_update_policy` parameter configured to `IGNORE` for Heat resources.
+
The `openshift_openstack_nodes_to_remove` allows you to specify the numerical indexes
of App nodes that should be removed; for example, ['0', '2'],
diff --git a/playbooks/openstack/sample-inventory/group_vars/all.yml b/playbooks/openstack/sample-inventory/group_vars/all.yml
index d63229120..101ac52ad 100644
--- a/playbooks/openstack/sample-inventory/group_vars/all.yml
+++ b/playbooks/openstack/sample-inventory/group_vars/all.yml
@@ -85,7 +85,12 @@ openshift_openstack_docker_volume_size: "15"
## WARNING: This will delete any data on the volume!
#openshift_openstack_prepare_and_format_registry_volume: False
-openshift_openstack_subnet_prefix: "192.168.99"
+# The Classless Inter-Domain Routing (CIDR) for the OpenStack VM subnet.
+openshift_openstack_subnet_cidr: "192.168.99.0/24"
+# The starting IP address for the OpenStack subnet allocation pool.
+openshift_openstack_pool_start: "192.168.99.3"
+# The ending IP address for the OpenStack subnet allocation pool.
+openshift_openstack_pool_end: "192.168.99.254"
## Red Hat subscription:
#rhsub_user: '<username>'
diff --git a/playbooks/prerequisites.yml b/playbooks/prerequisites.yml
index 0b76ca862..544adbd4d 100644
--- a/playbooks/prerequisites.yml
+++ b/playbooks/prerequisites.yml
@@ -3,8 +3,10 @@
- import_playbook: init/main.yml
vars:
- skip_verison: True
+ skip_version: True
l_install_base_packages: True
+ l_openshift_version_set_hosts: "all:!all"
+ l_openshift_version_check_hosts: "all:!all"
- import_playbook: init/validate_hostnames.yml
when: not (skip_validate_hostnames | default(False))
diff --git a/roles/ansible_service_broker/vars/openshift-enterprise.yml b/roles/ansible_service_broker/vars/openshift-enterprise.yml
index c203f596e..15a7e5477 100644
--- a/roles/ansible_service_broker/vars/openshift-enterprise.yml
+++ b/roles/ansible_service_broker/vars/openshift-enterprise.yml
@@ -1,7 +1,7 @@
---
__ansible_service_broker_image_prefix: registry.access.redhat.com/openshift3/ose-
-__ansible_service_broker_image_tag: v3.7
+__ansible_service_broker_image_tag: "{{ openshift_image_tag }}"
__ansible_service_broker_etcd_image_prefix: registry.access.redhat.com/rhel7/
__ansible_service_broker_etcd_image_tag: latest
@@ -14,6 +14,6 @@ __ansible_service_broker_registry_url: "https://registry.access.redhat.com"
__ansible_service_broker_registry_user: null
__ansible_service_broker_registry_password: null
__ansible_service_broker_registry_organization: null
-__ansible_service_broker_registry_tag: v3.7
+__ansible_service_broker_registry_tag: "{{ openshift_image_tag }}"
__ansible_service_broker_registry_whitelist:
- '.*-apb$'
diff --git a/roles/container_runtime/defaults/main.yml b/roles/container_runtime/defaults/main.yml
index 7397e2bec..01540776f 100644
--- a/roles/container_runtime/defaults/main.yml
+++ b/roles/container_runtime/defaults/main.yml
@@ -94,6 +94,17 @@ l_insecure_crio_registries: "{{ '\"{}\"'.format('\", \"'.join(l2_docker_insecure
l_crio_registries: "{{ l2_docker_additional_registries + ['docker.io'] }}"
l_additional_crio_registries: "{{ '\"{}\"'.format('\", \"'.join(l_crio_registries)) }}"
+# this is a list of dictionaries of mounts
+# container_runtime_crio_additional_mounts:
+# - destination: /test
+# source: /var/test
+# options:
+# - rw
+# - mode=755
+# type: bind
+container_runtime_crio_additional_mounts: []
+
+l_crio_additional_mounts: "{{ ',' + (container_runtime_crio_additional_mounts | lib_utils_oo_l_of_d_to_csv) if container_runtime_crio_additional_mounts != [] else '' }}"
openshift_crio_image_tag_default: "latest"
diff --git a/roles/container_runtime/tasks/systemcontainer_crio.yml b/roles/container_runtime/tasks/systemcontainer_crio.yml
index d588f2618..f053bdea5 100644
--- a/roles/container_runtime/tasks/systemcontainer_crio.yml
+++ b/roles/container_runtime/tasks/systemcontainer_crio.yml
@@ -53,6 +53,8 @@
name: "cri-o"
image: "{{ l_crio_image }}"
state: latest
+ values:
+ - "ADDTL_MOUNTS={{ l_crio_additional_mounts }}"
- name: Remove CRI-O default configuration files
file:
diff --git a/roles/etcd/defaults/main.yaml b/roles/etcd/defaults/main.yaml
index 87e249642..6f1dc5847 100644
--- a/roles/etcd/defaults/main.yaml
+++ b/roles/etcd/defaults/main.yaml
@@ -78,7 +78,7 @@ etcd_listen_client_urls: "{{ etcd_url_scheme }}://{{ etcd_ip }}:{{ etcd_client_p
# required role variable
#etcd_peer: 127.0.0.1
-etcdctlv2: "etcdctl --cert-file {{ etcd_peer_cert_file }} --key-file {{ etcd_peer_key_file }} --ca-file {{ etcd_peer_ca_file }} -C https://{{ etcd_peer }}:{{ etcd_client_port }}"
+etcdctlv2: "{{ r_etcd_common_etcdctl_command }} --cert-file {{ etcd_peer_cert_file }} --key-file {{ etcd_peer_key_file }} --ca-file {{ etcd_peer_ca_file }} -C https://{{ etcd_peer }}:{{ etcd_client_port }}"
etcd_service: "{{ 'etcd_container' if r_etcd_common_etcd_runtime == 'docker' else 'etcd' }}"
# Location of the service file is fixed and not meant to be changed
diff --git a/roles/kuryr/tasks/master.yaml b/roles/kuryr/tasks/master.yaml
index 1cc6d2375..4f9dd82de 100644
--- a/roles/kuryr/tasks/master.yaml
+++ b/roles/kuryr/tasks/master.yaml
@@ -1,6 +1,7 @@
---
- name: Perform OpenShift ServiceAccount config
include_tasks: serviceaccount.yaml
+ run_once: true
- name: Create kuryr manifests tempdir
command: mktemp -d
@@ -32,6 +33,7 @@
namespace: "{{ kuryr_namespace }}"
files:
- "{{ manifests_tmpdir.stdout }}/configmap.yaml"
+ run_once: true
- name: Apply Controller Deployment manifest
oc_obj:
@@ -41,6 +43,7 @@
namespace: "{{ kuryr_namespace }}"
files:
- "{{ manifests_tmpdir.stdout }}/controller-deployment.yaml"
+ run_once: true
- name: Apply kuryr-cni DaemonSet manifest
oc_obj:
@@ -50,3 +53,4 @@
namespace: "{{ kuryr_namespace }}"
files:
- "{{ manifests_tmpdir.stdout }}/cni-daemonset.yaml"
+ run_once: true
diff --git a/roles/lib_utils/filter_plugins/oo_filters.py b/roles/lib_utils/filter_plugins/oo_filters.py
index c355115b5..ed6bb4c28 100644
--- a/roles/lib_utils/filter_plugins/oo_filters.py
+++ b/roles/lib_utils/filter_plugins/oo_filters.py
@@ -660,6 +660,50 @@ def map_from_pairs(source, delim="="):
return dict(item.split(delim) for item in source.split(","))
+def lib_utils_oo_get_node_labels(source, hostvars=None):
+ ''' Return a list of labels assigned to schedulable nodes '''
+ labels = list()
+
+ # Filter out the unschedulable nodes
+ for host in source:
+ if host not in hostvars:
+ return
+ node_vars = hostvars[host]
+
+ # All nodes are considered schedulable,
+ # unless explicitly marked so
+ schedulable = node_vars.get('openshift_schedulable')
+ if schedulable is None:
+ schedulable = True
+ try:
+ if not strtobool(str(schedulable)):
+ # explicitly marked as unschedulable
+ continue
+ except ValueError:
+ # Incorrect value in openshift_schedulable, skip node
+ continue
+
+ # Get a list of labels from the node
+ node_labels = node_vars.get('openshift_node_labels')
+ if node_labels:
+ labels.append(node_labels)
+
+ return labels
+
+
+def lib_utils_oo_has_no_matching_selector(source, selector=None):
+ ''' Return True when selector cannot be placed
+ on nodes with labels from source '''
+ # Empty selector means any node
+ if not selector:
+ return False
+ for item in source:
+ if selector.items() <= item.items():
+ # Matching selector found
+ return False
+ return True
+
+
class FilterModule(object):
""" Custom ansible filter mapping """
@@ -691,5 +735,7 @@ class FilterModule(object):
"lib_utils_oo_selector_to_string_list": lib_utils_oo_selector_to_string_list,
"lib_utils_oo_filter_sa_secrets": lib_utils_oo_filter_sa_secrets,
"lib_utils_oo_l_of_d_to_csv": lib_utils_oo_l_of_d_to_csv,
- "map_from_pairs": map_from_pairs
+ "lib_utils_oo_has_no_matching_selector": lib_utils_oo_has_no_matching_selector,
+ "lib_utils_oo_get_node_labels": lib_utils_oo_get_node_labels,
+ "map_from_pairs": map_from_pairs,
}
diff --git a/roles/openshift_aws/templates/user_data.j2 b/roles/openshift_aws/templates/user_data.j2
index bda1334cd..46e4e1cc5 100644
--- a/roles/openshift_aws/templates/user_data.j2
+++ b/roles/openshift_aws/templates/user_data.j2
@@ -20,6 +20,9 @@ runcmd:
- [ ansible-playbook, /root/openshift_bootstrap/bootstrap.yml]
{% endif %}
{% if openshift_aws_node_group.group != 'master' %}
+{# Restarting systemd-hostnamed ensures that instances will have FQDN
+hostnames following network restart. #}
+- [ systemctl, restart, systemd-hostnamed]
- [ systemctl, restart, NetworkManager]
- [ systemctl, enable, {% if openshift_deployment_type == 'openshift-enterprise' %}atomic-openshift{% else %}origin{% endif %}-node]
- [ systemctl, start, {% if openshift_deployment_type == 'openshift-enterprise' %}atomic-openshift{% else %}origin{% endif %}-node]
diff --git a/roles/openshift_facts/defaults/main.yml b/roles/openshift_facts/defaults/main.yml
index a223ffba6..3b381b1e4 100644
--- a/roles/openshift_facts/defaults/main.yml
+++ b/roles/openshift_facts/defaults/main.yml
@@ -104,3 +104,6 @@ openshift_service_type_dict:
openshift-enterprise: atomic-openshift
openshift_service_type: "{{ openshift_service_type_dict[openshift_deployment_type] }}"
+
+# Create a list of node labels (dict) for schedulable nodes
+openshift_schedulable_node_labels: "{{ groups['oo_nodes_to_config'] | lib_utils_oo_get_node_labels(hostvars) }}"
diff --git a/roles/openshift_logging_curator/tasks/main.yaml b/roles/openshift_logging_curator/tasks/main.yaml
index 6e8605d28..456a25082 100644
--- a/roles/openshift_logging_curator/tasks/main.yaml
+++ b/roles/openshift_logging_curator/tasks/main.yaml
@@ -14,6 +14,13 @@
- include_tasks: determine_version.yaml
+- name: Ensure that logging curator has nodes to run on
+ fail:
+ msg: |-
+ No schedulable nodes found matching node selector for logging curator - '{{ openshift_logging_curator_nodeselector }}'
+ when:
+ - openshift_schedulable_node_labels | lib_utils_oo_has_no_matching_selector(openshift_logging_curator_nodeselector)
+
# allow passing in a tempdir
- name: Create temp directory for doing work in
command: mktemp -d /tmp/openshift-logging-ansible-XXXXXX
diff --git a/roles/openshift_logging_curator/vars/default_images.yml b/roles/openshift_logging_curator/vars/default_images.yml
index 208b41afa..503be9b58 100644
--- a/roles/openshift_logging_curator/vars/default_images.yml
+++ b/roles/openshift_logging_curator/vars/default_images.yml
@@ -1,3 +1,3 @@
---
__openshift_logging_curator_image_prefix: "{{ openshift_logging_image_prefix | default('docker.io/openshift/origin-') }}"
-__openshift_logging_curator_image_version: "{{ openshift_logging_image_version | default('latest') }}"
+__openshift_logging_curator_image_version: "{{ openshift_logging_image_version | default(openshift_image_tag) }}"
diff --git a/roles/openshift_logging_curator/vars/openshift-enterprise.yml b/roles/openshift_logging_curator/vars/openshift-enterprise.yml
index 79cf131fd..e0507fe3c 100644
--- a/roles/openshift_logging_curator/vars/openshift-enterprise.yml
+++ b/roles/openshift_logging_curator/vars/openshift-enterprise.yml
@@ -1,3 +1,3 @@
---
__openshift_logging_curator_image_prefix: "{{ openshift_logging_image_prefix | default('registry.access.redhat.com/openshift3/') }}"
-__openshift_logging_curator_image_version: "{{ openshift_logging_image_version | default ('v3.7') }}"
+__openshift_logging_curator_image_version: "{{ openshift_logging_image_version | default (openshift_image_tag) }}"
diff --git a/roles/openshift_logging_elasticsearch/tasks/main.yaml b/roles/openshift_logging_elasticsearch/tasks/main.yaml
index 9db67ea9b..64e5a3a1f 100644
--- a/roles/openshift_logging_elasticsearch/tasks/main.yaml
+++ b/roles/openshift_logging_elasticsearch/tasks/main.yaml
@@ -1,4 +1,11 @@
---
+- name: Ensure that ElasticSearch has nodes to run on
+ fail:
+ msg: |-
+ No schedulable nodes found matching node selector for Elasticsearch - '{{ openshift_logging_es_nodeselector }}'
+ when:
+ - openshift_schedulable_node_labels | lib_utils_oo_has_no_matching_selector(openshift_logging_es_nodeselector)
+
- name: Validate Elasticsearch cluster size
fail: msg="The openshift_logging_es_cluster_size may only be scaled down manually. Please see official documentation on how to do this."
when: openshift_logging_facts.elasticsearch.deploymentconfigs | length > openshift_logging_es_cluster_size|int
@@ -18,8 +25,8 @@
- name: Set default image variables based on openshift_deployment_type
include_vars: "{{ var_file_name }}"
with_first_found:
- - "{{ openshift_deployment_type }}.yml"
- - "default_images.yml"
+ - "{{ openshift_deployment_type }}.yml"
+ - "default_images.yml"
loop_control:
loop_var: var_file_name
@@ -35,14 +42,14 @@
- set_fact:
full_restart_cluster: True
when:
- - _es_installed_version is defined
- - _es_installed_version.split('.')[0] | int < __es_version.split('.')[0] | int
+ - _es_installed_version is defined
+ - _es_installed_version.split('.')[0] | int < __es_version.split('.')[0] | int
- set_fact:
full_restart_cluster: True
when:
- - _es_ops_installed_version is defined
- - _es_ops_installed_version.split('.')[0] | int < __es_version.split('.')[0] | int
+ - _es_ops_installed_version is defined
+ - _es_ops_installed_version.split('.')[0] | int < __es_version.split('.')[0] | int
# allow passing in a tempdir
- name: Create temp directory for doing work in
@@ -78,7 +85,7 @@
name: "aggregated-logging-elasticsearch"
namespace: "{{ openshift_logging_elasticsearch_namespace }}"
when:
- - openshift_logging_image_pull_secret == ''
+ - openshift_logging_image_pull_secret == ''
# rolebinding reader
- name: Create rolebinding-reader role
@@ -86,9 +93,9 @@
state: present
name: rolebinding-reader
rules:
- - apiGroups: [""]
- resources: ["clusterrolebindings"]
- verbs: ["get"]
+ - apiGroups: [""]
+ resources: ["clusterrolebindings"]
+ verbs: ["get"]
# SA roles
- name: Set rolebinding-reader permissions for ES
@@ -128,8 +135,8 @@
- fail:
msg: "There was an error creating the logging-metrics-role and binding: {{prometheus_out}}"
when:
- - "prometheus_out.stderr | length > 0"
- - "'already exists' not in prometheus_out.stderr"
+ - "prometheus_out.stderr | length > 0"
+ - "'already exists' not in prometheus_out.stderr"
- set_fact:
_logging_metrics_proxy_passwd: "{{ 16 | lib_utils_oo_random_word | b64encode }}"
@@ -151,8 +158,8 @@
roleRef:
name: view
subjects:
- - kind: ServiceAccount
- name: aggregated-logging-elasticsearch
+ - kind: ServiceAccount
+ name: aggregated-logging-elasticsearch
changed_when: no
- name: Set logging-elasticsearch-view-role role
@@ -162,18 +169,18 @@
kind: rolebinding
namespace: "{{ openshift_logging_elasticsearch_namespace }}"
files:
- - "{{ tempdir }}/logging-elasticsearch-view-role.yaml"
+ - "{{ tempdir }}/logging-elasticsearch-view-role.yaml"
delete_after: true
# configmap
- assert:
that:
- - openshift_logging_elasticsearch_kibana_index_mode in __kibana_index_modes
+ - openshift_logging_elasticsearch_kibana_index_mode in __kibana_index_modes
msg: "The openshift_logging_elasticsearch_kibana_index_mode '{{ openshift_logging_elasticsearch_kibana_index_mode }}' only supports one of: {{ __kibana_index_modes | join(', ') }}"
- assert:
that:
- - "{{ openshift_logging_es_log_appenders | length > 0 }}"
+ - "{{ openshift_logging_es_log_appenders | length > 0 }}"
msg: "The openshift_logging_es_log_appenders '{{ openshift_logging_es_log_appenders }}' has an unrecognized option and only supports the following as a list: {{ __es_log_appenders | join(', ') }}"
- template:
@@ -189,81 +196,81 @@
# create diff between current configmap files and our current files
- when: not openshift_logging_es5_techpreview
block:
- - template:
- src: "{{ __base_file_dir }}/elasticsearch-logging.yml.j2"
- dest: "{{ tempdir }}/elasticsearch-logging.yml"
- vars:
- root_logger: "{{openshift_logging_es_log_appenders | join(', ')}}"
- changed_when: no
-
- - include_role:
- name: openshift_logging
- tasks_from: patch_configmap_files.yaml
- vars:
- configmap_name: "logging-elasticsearch"
- configmap_namespace: "logging"
- configmap_file_names:
- - current_file: "elasticsearch.yml"
- new_file: "{{ tempdir }}/elasticsearch.yml"
- protected_lines: ["number_of_shards", "number_of_replicas"]
- - current_file: "logging.yml"
- new_file: "{{ tempdir }}/elasticsearch-logging.yml"
-
- - name: Set ES configmap
- oc_configmap:
- state: present
- name: "{{ elasticsearch_name }}"
- namespace: "{{ openshift_logging_elasticsearch_namespace }}"
- from_file:
- elasticsearch.yml: "{{ tempdir }}/elasticsearch.yml"
- logging.yml: "{{ tempdir }}/elasticsearch-logging.yml"
- register: es_config_creation
- notify: "restart elasticsearch"
+ - template:
+ src: "{{ __base_file_dir }}/elasticsearch-logging.yml.j2"
+ dest: "{{ tempdir }}/elasticsearch-logging.yml"
+ vars:
+ root_logger: "{{openshift_logging_es_log_appenders | join(', ')}}"
+ changed_when: no
+
+ - include_role:
+ name: openshift_logging
+ tasks_from: patch_configmap_files.yaml
+ vars:
+ configmap_name: "logging-elasticsearch"
+ configmap_namespace: "logging"
+ configmap_file_names:
+ - current_file: "elasticsearch.yml"
+ new_file: "{{ tempdir }}/elasticsearch.yml"
+ protected_lines: ["number_of_shards", "number_of_replicas"]
+ - current_file: "logging.yml"
+ new_file: "{{ tempdir }}/elasticsearch-logging.yml"
+
+ - name: Set ES configmap
+ oc_configmap:
+ state: present
+ name: "{{ elasticsearch_name }}"
+ namespace: "{{ openshift_logging_elasticsearch_namespace }}"
+ from_file:
+ elasticsearch.yml: "{{ tempdir }}/elasticsearch.yml"
+ logging.yml: "{{ tempdir }}/elasticsearch-logging.yml"
+ register: es_config_creation
+ notify: "restart elasticsearch"
- when: openshift_logging_es5_techpreview | bool
block:
- - template:
- src: "{{ __base_file_dir }}/log4j2.properties.j2"
- dest: "{{ tempdir }}/log4j2.properties"
- vars:
- root_logger: "{{ openshift_logging_es_log_appenders | list }}"
- changed_when: no
-
- - include_role:
- name: openshift_logging
- tasks_from: patch_configmap_files.yaml
- vars:
- configmap_name: "logging-elasticsearch"
- configmap_namespace: "logging"
- configmap_file_names:
- - current_file: "elasticsearch.yml"
- new_file: "{{ tempdir }}/elasticsearch.yml"
- - current_file: "log4j2.properties"
- new_file: "{{ tempdir }}/log4j2.properties"
-
- - name: Set ES configmap
- oc_configmap:
- state: present
- name: "{{ elasticsearch_name }}"
- namespace: "{{ openshift_logging_elasticsearch_namespace }}"
- from_file:
- elasticsearch.yml: "{{ tempdir }}/elasticsearch.yml"
- log4j2.properties: "{{ tempdir }}/log4j2.properties"
- register: es_config_creation
- notify: "restart elasticsearch"
+ - template:
+ src: "{{ __base_file_dir }}/log4j2.properties.j2"
+ dest: "{{ tempdir }}/log4j2.properties"
+ vars:
+ root_logger: "{{ openshift_logging_es_log_appenders | list }}"
+ changed_when: no
+
+ - include_role:
+ name: openshift_logging
+ tasks_from: patch_configmap_files.yaml
+ vars:
+ configmap_name: "logging-elasticsearch"
+ configmap_namespace: "logging"
+ configmap_file_names:
+ - current_file: "elasticsearch.yml"
+ new_file: "{{ tempdir }}/elasticsearch.yml"
+ - current_file: "log4j2.properties"
+ new_file: "{{ tempdir }}/log4j2.properties"
+
+ - name: Set ES configmap
+ oc_configmap:
+ state: present
+ name: "{{ elasticsearch_name }}"
+ namespace: "{{ openshift_logging_elasticsearch_namespace }}"
+ from_file:
+ elasticsearch.yml: "{{ tempdir }}/elasticsearch.yml"
+ log4j2.properties: "{{ tempdir }}/log4j2.properties"
+ register: es_config_creation
+ notify: "restart elasticsearch"
- when: es_config_creation.changed | bool
block:
- - set_fact:
- _restart_logging_components: "{{ _restart_logging_components | default([]) + [es_component] | unique }}"
+ - set_fact:
+ _restart_logging_components: "{{ _restart_logging_components | default([]) + [es_component] | unique }}"
- - shell: >
- {{ openshift_client_binary }} get dc -l component="{{ es_component }}" -n "{{ openshift_logging_elasticsearch_namespace }}" -o name | cut -d'/' -f2
- register: _es_dcs
+ - shell: >
+ {{ openshift_client_binary }} get dc -l component="{{ es_component }}" -n "{{ openshift_logging_elasticsearch_namespace }}" -o name | cut -d'/' -f2
+ register: _es_dcs
- - set_fact:
- _restart_logging_nodes: "{{ _restart_logging_nodes | default([]) + [_es_dcs.stdout] | unique }}"
- when: _es_dcs.stdout != ""
+ - set_fact:
+ _restart_logging_nodes: "{{ _restart_logging_nodes | default([]) + [_es_dcs.stdout] | unique }}"
+ when: _es_dcs.stdout != ""
# secret
- name: Set ES secret
@@ -272,24 +279,24 @@
name: "logging-elasticsearch"
namespace: "{{ openshift_logging_elasticsearch_namespace }}"
files:
- - name: key
- path: "{{ generated_certs_dir }}/logging-es.jks"
- - name: truststore
- path: "{{ generated_certs_dir }}/truststore.jks"
- - name: searchguard.key
- path: "{{ generated_certs_dir }}/elasticsearch.jks"
- - name: searchguard.truststore
- path: "{{ generated_certs_dir }}/truststore.jks"
- - name: admin-key
- path: "{{ generated_certs_dir }}/system.admin.key"
- - name: admin-cert
- path: "{{ generated_certs_dir }}/system.admin.crt"
- - name: admin-ca
- path: "{{ generated_certs_dir }}/ca.crt"
- - name: admin.jks
- path: "{{ generated_certs_dir }}/system.admin.jks"
- - name: passwd.yml
- path: "{{mktemp.stdout}}/passwd.yml"
+ - name: key
+ path: "{{ generated_certs_dir }}/logging-es.jks"
+ - name: truststore
+ path: "{{ generated_certs_dir }}/truststore.jks"
+ - name: searchguard.key
+ path: "{{ generated_certs_dir }}/elasticsearch.jks"
+ - name: searchguard.truststore
+ path: "{{ generated_certs_dir }}/truststore.jks"
+ - name: admin-key
+ path: "{{ generated_certs_dir }}/system.admin.key"
+ - name: admin-cert
+ path: "{{ generated_certs_dir }}/system.admin.crt"
+ - name: admin-ca
+ path: "{{ generated_certs_dir }}/ca.crt"
+ - name: admin.jks
+ path: "{{ generated_certs_dir }}/system.admin.jks"
+ - name: passwd.yml
+ path: "{{mktemp.stdout}}/passwd.yml"
# services
- name: Set logging-{{ es_component }}-cluster service
@@ -303,7 +310,7 @@
labels:
logging-infra: 'support'
ports:
- - port: 9300
+ - port: 9300
- name: Set logging-{{ es_component }} service
oc_service:
@@ -316,8 +323,8 @@
labels:
logging-infra: 'support'
ports:
- - port: 9200
- targetPort: "restapi"
+ - port: 9200
+ targetPort: "restapi"
- name: Set logging-{{ es_component}}-prometheus service
oc_service:
@@ -327,9 +334,9 @@
labels:
logging-infra: 'support'
ports:
- - name: proxy
- port: 443
- targetPort: 4443
+ - name: proxy
+ port: 443
+ targetPort: 4443
selector:
component: "{{ es_component }}"
provider: openshift
@@ -357,46 +364,46 @@
# so we check for the presence of 'stderr' to determine if the obj exists or not
# the RC for existing and not existing is both 0
- when:
- - logging_elasticsearch_pvc.results.stderr is defined
- - openshift_logging_elasticsearch_storage_type == "pvc"
+ - logging_elasticsearch_pvc.results.stderr is defined
+ - openshift_logging_elasticsearch_storage_type == "pvc"
block:
- # storageclasses are used by default but if static then disable
- # storageclasses with the storageClassName set to "" in pvc.j2
- - name: Creating ES storage template - static
- template:
- src: "{{ __base_file_dir }}/pvc.j2"
- dest: "{{ tempdir }}/templates/logging-es-pvc.yml"
- vars:
- obj_name: "{{ openshift_logging_elasticsearch_pvc_name }}"
- size: "{{ (openshift_logging_elasticsearch_pvc_size | trim | length == 0) | ternary('10Gi', openshift_logging_elasticsearch_pvc_size) }}"
- access_modes: "{{ openshift_logging_elasticsearch_pvc_access_modes | list }}"
- pv_selector: "{{ openshift_logging_elasticsearch_pvc_pv_selector }}"
- storage_class_name: "{{ openshift_logging_elasticsearch_pvc_storage_class_name | default('', true) }}"
- when:
- - not openshift_logging_elasticsearch_pvc_dynamic | bool
-
- # Storageclasses are used by default if configured
- - name: Creating ES storage template - dynamic
- template:
- src: "{{ __base_file_dir }}/pvc.j2"
- dest: "{{ tempdir }}/templates/logging-es-pvc.yml"
- vars:
- obj_name: "{{ openshift_logging_elasticsearch_pvc_name }}"
- size: "{{ (openshift_logging_elasticsearch_pvc_size | trim | length == 0) | ternary('10Gi', openshift_logging_elasticsearch_pvc_size) }}"
- access_modes: "{{ openshift_logging_elasticsearch_pvc_access_modes | list }}"
- pv_selector: "{{ openshift_logging_elasticsearch_pvc_pv_selector }}"
- when:
- - openshift_logging_elasticsearch_pvc_dynamic | bool
-
- - name: Set ES storage
- oc_obj:
- state: present
- kind: pvc
- name: "{{ openshift_logging_elasticsearch_pvc_name }}"
- namespace: "{{ openshift_logging_elasticsearch_namespace }}"
- files:
- - "{{ tempdir }}/templates/logging-es-pvc.yml"
- delete_after: true
+ # storageclasses are used by default but if static then disable
+ # storageclasses with the storageClassName set to "" in pvc.j2
+ - name: Creating ES storage template - static
+ template:
+ src: "{{ __base_file_dir }}/pvc.j2"
+ dest: "{{ tempdir }}/templates/logging-es-pvc.yml"
+ vars:
+ obj_name: "{{ openshift_logging_elasticsearch_pvc_name }}"
+ size: "{{ (openshift_logging_elasticsearch_pvc_size | trim | length == 0) | ternary('10Gi', openshift_logging_elasticsearch_pvc_size) }}"
+ access_modes: "{{ openshift_logging_elasticsearch_pvc_access_modes | list }}"
+ pv_selector: "{{ openshift_logging_elasticsearch_pvc_pv_selector }}"
+ storage_class_name: "{{ openshift_logging_elasticsearch_pvc_storage_class_name | default('', true) }}"
+ when:
+ - not openshift_logging_elasticsearch_pvc_dynamic | bool
+
+ # Storageclasses are used by default if configured
+ - name: Creating ES storage template - dynamic
+ template:
+ src: "{{ __base_file_dir }}/pvc.j2"
+ dest: "{{ tempdir }}/templates/logging-es-pvc.yml"
+ vars:
+ obj_name: "{{ openshift_logging_elasticsearch_pvc_name }}"
+ size: "{{ (openshift_logging_elasticsearch_pvc_size | trim | length == 0) | ternary('10Gi', openshift_logging_elasticsearch_pvc_size) }}"
+ access_modes: "{{ openshift_logging_elasticsearch_pvc_access_modes | list }}"
+ pv_selector: "{{ openshift_logging_elasticsearch_pvc_pv_selector }}"
+ when:
+ - openshift_logging_elasticsearch_pvc_dynamic | bool
+
+ - name: Set ES storage
+ oc_obj:
+ state: present
+ kind: pvc
+ name: "{{ openshift_logging_elasticsearch_pvc_name }}"
+ namespace: "{{ openshift_logging_elasticsearch_namespace }}"
+ files:
+ - "{{ tempdir }}/templates/logging-es-pvc.yml"
+ delete_after: true
- set_fact:
es_deploy_name: "logging-{{ es_component }}-{{ openshift_logging_elasticsearch_deployment_type }}-{{ 8 | lib_utils_oo_random_word('abcdefghijklmnopqrstuvwxyz0123456789') }}"
@@ -437,7 +444,7 @@
namespace: "{{ openshift_logging_elasticsearch_namespace }}"
kind: dc
files:
- - "{{ tempdir }}/templates/logging-es-dc.yml"
+ - "{{ tempdir }}/templates/logging-es-dc.yml"
delete_after: true
register: es_dc_creation
notify: "restart elasticsearch"
@@ -452,37 +459,37 @@
src: "{{ generated_certs_dir }}/{{ item.file }}"
register: key_pairs
with_items:
- - { name: "ca_file", file: "ca.crt" }
- - { name: "es_key", file: "system.logging.es.key" }
- - { name: "es_cert", file: "system.logging.es.crt" }
+ - { name: "ca_file", file: "ca.crt" }
+ - { name: "es_key", file: "system.logging.es.key" }
+ - { name: "es_cert", file: "system.logging.es.crt" }
when: openshift_logging_es_allow_external | bool
- set_fact:
es_key: "{{ lookup('file', openshift_logging_es_key) | b64encode }}"
when:
- - openshift_logging_es_key | trim | length > 0
- - openshift_logging_es_allow_external | bool
+ - openshift_logging_es_key | trim | length > 0
+ - openshift_logging_es_allow_external | bool
changed_when: false
- set_fact:
es_cert: "{{ lookup('file', openshift_logging_es_cert) | b64encode }}"
when:
- - openshift_logging_es_cert | trim | length > 0
- - openshift_logging_es_allow_external | bool
+ - openshift_logging_es_cert | trim | length > 0
+ - openshift_logging_es_allow_external | bool
changed_when: false
- set_fact:
es_ca: "{{ lookup('file', openshift_logging_es_ca_ext) | b64encode }}"
when:
- - openshift_logging_es_ca_ext | trim | length > 0
- - openshift_logging_es_allow_external | bool
+ - openshift_logging_es_ca_ext | trim | length > 0
+ - openshift_logging_es_allow_external | bool
changed_when: false
- set_fact:
es_ca: "{{ key_pairs | entry_from_named_pair('ca_file') }}"
when:
- - es_ca is not defined
- - openshift_logging_es_allow_external | bool
+ - es_ca is not defined
+ - openshift_logging_es_allow_external | bool
changed_when: false
- name: Generating Elasticsearch {{ es_component }} route template
@@ -513,7 +520,7 @@
namespace: "{{ openshift_logging_elasticsearch_namespace }}"
kind: route
files:
- - "{{ tempdir }}/templates/logging-{{ es_component }}-route.yaml"
+ - "{{ tempdir }}/templates/logging-{{ es_component }}-route.yaml"
when: openshift_logging_es_allow_external | bool
## Placeholder for migration when necessary ##
diff --git a/roles/openshift_logging_elasticsearch/vars/openshift-enterprise.yml b/roles/openshift_logging_elasticsearch/vars/openshift-enterprise.yml
index 07d92896f..0cf48a66b 100644
--- a/roles/openshift_logging_elasticsearch/vars/openshift-enterprise.yml
+++ b/roles/openshift_logging_elasticsearch/vars/openshift-enterprise.yml
@@ -1,5 +1,5 @@
---
__openshift_logging_elasticsearch_image_prefix: "{{ openshift_logging_image_prefix | default('registry.access.redhat.com/openshift3/') }}"
-__openshift_logging_elasticsearch_image_version: "{{ openshift_logging_image_version | default ('v3.7') }}"
+__openshift_logging_elasticsearch_image_version: "{{ openshift_logging_image_version | default (openshift_image_tag) }}"
__openshift_logging_elasticsearch_proxy_image_prefix: "{{ openshift_logging_image_prefix | default('registry.access.redhat.com/openshift3/') }}"
-__openshift_logging_elasticsearch_proxy_image_version: "{{ openshift_logging_image_version | default ('v3.7') }}"
+__openshift_logging_elasticsearch_proxy_image_version: "{{ openshift_logging_image_version | default (openshift_image_tag) }}"
diff --git a/roles/openshift_logging_eventrouter/tasks/install_eventrouter.yaml b/roles/openshift_logging_eventrouter/tasks/install_eventrouter.yaml
index fffdd9f8b..2adc51a16 100644
--- a/roles/openshift_logging_eventrouter/tasks/install_eventrouter.yaml
+++ b/roles/openshift_logging_eventrouter/tasks/install_eventrouter.yaml
@@ -4,6 +4,13 @@
msg: Invalid sink type "{{openshift_logging_eventrouter_sink}}", only one of "{{__eventrouter_sinks}}" allowed
that: openshift_logging_eventrouter_sink in __eventrouter_sinks
+- name: Ensure that logging eventrouter has nodes to run on
+ fail:
+ msg: |-
+ No schedulable nodes found matching node selector for logging EventRouter - '{{ openshift_logging_eventrouter_nodeselector }}'
+ when:
+ - openshift_schedulable_node_labels | lib_utils_oo_has_no_matching_selector(openshift_logging_eventrouter_nodeselector)
+
# allow passing in a tempdir
- name: Create temp directory for doing work in
command: mktemp -d /tmp/openshift-logging-ansible-XXXXXX
diff --git a/roles/openshift_logging_eventrouter/vars/openshift-enterprise.yml b/roles/openshift_logging_eventrouter/vars/openshift-enterprise.yml
index bb7dc6455..1a6bc208e 100644
--- a/roles/openshift_logging_eventrouter/vars/openshift-enterprise.yml
+++ b/roles/openshift_logging_eventrouter/vars/openshift-enterprise.yml
@@ -1,3 +1,3 @@
---
__openshift_logging_eventrouter_image_prefix: "{{ openshift_logging_image_prefix | default('registry.access.redhat.com/openshift3/') }}"
-__openshift_logging_eventrouter_image_version: "{{ openshift_logging_image_version | default ('v3.7') }}"
+__openshift_logging_eventrouter_image_version: "{{ openshift_logging_image_version | default (openshift_image_tag) }}"
diff --git a/roles/openshift_logging_fluentd/vars/openshift-enterprise.yml b/roles/openshift_logging_fluentd/vars/openshift-enterprise.yml
index d0c74f1fb..b62b62a74 100644
--- a/roles/openshift_logging_fluentd/vars/openshift-enterprise.yml
+++ b/roles/openshift_logging_fluentd/vars/openshift-enterprise.yml
@@ -1,3 +1,3 @@
---
__openshift_logging_fluentd_image_prefix: "{{ openshift_logging_image_prefix | default('registry.access.redhat.com/openshift3/') }}"
-__openshift_logging_fluentd_image_version: "{{ openshift_logging_image_version | default ('v3.7') }}"
+__openshift_logging_fluentd_image_version: "{{ openshift_logging_image_version | default (openshift_image_tag) }}"
diff --git a/roles/openshift_logging_kibana/tasks/main.yaml b/roles/openshift_logging_kibana/tasks/main.yaml
index 58edc5ce5..7b6bc02e1 100644
--- a/roles/openshift_logging_kibana/tasks/main.yaml
+++ b/roles/openshift_logging_kibana/tasks/main.yaml
@@ -8,6 +8,13 @@
loop_control:
loop_var: var_file_name
+- name: Ensure that Kibana has nodes to run on
+ fail:
+ msg: |-
+ No schedulable nodes found matching node selector for Kibana - '{{ openshift_logging_kibana_nodeselector }}'
+ when:
+ - openshift_schedulable_node_labels | lib_utils_oo_has_no_matching_selector(openshift_logging_kibana_nodeselector)
+
- name: Set kibana image facts
set_fact:
openshift_logging_kibana_image_prefix: "{{ openshift_logging_kibana_image_prefix | default(__openshift_logging_kibana_image_prefix) }}"
diff --git a/roles/openshift_logging_kibana/vars/openshift-enterprise.yml b/roles/openshift_logging_kibana/vars/openshift-enterprise.yml
index 0be2e7252..033c8ae5f 100644
--- a/roles/openshift_logging_kibana/vars/openshift-enterprise.yml
+++ b/roles/openshift_logging_kibana/vars/openshift-enterprise.yml
@@ -1,5 +1,5 @@
---
__openshift_logging_kibana_image_prefix: "{{ openshift_logging_image_prefix | default('registry.access.redhat.com/openshift3/') }}"
-__openshift_logging_kibana_image_version: "{{ openshift_logging_image_version | default ('v3.7') }}"
+__openshift_logging_kibana_image_version: "{{ openshift_logging_image_version | default (openshift_image_tag) }}"
__openshift_logging_kibana_proxy_image_prefix: "{{ openshift_logging_image_prefix | default('registry.access.redhat.com/openshift3/') }}"
-__openshift_logging_kibana_proxy_image_version: "{{ openshift_logging_image_version | default ('v3.7') }}"
+__openshift_logging_kibana_proxy_image_version: "{{ openshift_logging_image_version | default (openshift_image_tag) }}"
diff --git a/roles/openshift_logging_mux/tasks/main.yaml b/roles/openshift_logging_mux/tasks/main.yaml
index b2699b285..f810f3606 100644
--- a/roles/openshift_logging_mux/tasks/main.yaml
+++ b/roles/openshift_logging_mux/tasks/main.yaml
@@ -7,11 +7,18 @@
msg: Operations logs destination is required
when: not openshift_logging_mux_ops_host or openshift_logging_mux_ops_host == ''
+- name: Ensure that logging mux has nodes to run on
+ fail:
+ msg: |-
+ No schedulable nodes found matching node selector for logging mux - '{{ openshift_logging_mux_nodeselector }}'
+ when:
+ - openshift_schedulable_node_labels | lib_utils_oo_has_no_matching_selector(openshift_logging_mux_nodeselector)
+
- name: Set default image variables based on openshift_deployment_type
include_vars: "{{ var_file_name }}"
with_first_found:
- - "{{ openshift_deployment_type }}.yml"
- - "default_images.yml"
+ - "{{ openshift_deployment_type }}.yml"
+ - "default_images.yml"
loop_control:
loop_var: var_file_name
@@ -55,7 +62,7 @@
name: "aggregated-logging-mux"
namespace: "{{ openshift_logging_mux_namespace }}"
when:
- - openshift_logging_image_pull_secret == ''
+ - openshift_logging_image_pull_secret == ''
# set service account scc
- name: Set privileged permissions for Mux
@@ -102,10 +109,10 @@
configmap_name: "logging-mux"
configmap_namespace: "{{ openshift_logging_mux_namespace }}"
configmap_file_names:
- - current_file: "fluent.conf"
- new_file: "{{ tempdir }}/fluent-mux.conf"
- - current_file: "secure-forward.conf"
- new_file: "{{ tempdir }}/secure-forward-mux.conf"
+ - current_file: "fluent.conf"
+ new_file: "{{ tempdir }}/fluent-mux.conf"
+ - current_file: "secure-forward.conf"
+ new_file: "{{ tempdir }}/secure-forward-mux.conf"
- name: Set Mux configmap
oc_configmap:
@@ -123,14 +130,14 @@
name: logging-mux
namespace: "{{ openshift_logging_mux_namespace }}"
files:
- - name: ca
- path: "{{ generated_certs_dir }}/ca.crt"
- - name: key
- path: "{{ generated_certs_dir }}/system.logging.mux.key"
- - name: cert
- path: "{{ generated_certs_dir }}/system.logging.mux.crt"
- - name: shared_key
- path: "{{ generated_certs_dir }}/mux_shared_key"
+ - name: ca
+ path: "{{ generated_certs_dir }}/ca.crt"
+ - name: key
+ path: "{{ generated_certs_dir }}/system.logging.mux.key"
+ - name: cert
+ path: "{{ generated_certs_dir }}/system.logging.mux.crt"
+ - name: shared_key
+ path: "{{ generated_certs_dir }}/mux_shared_key"
# services
- name: Set logging-mux service for external communication
@@ -144,11 +151,11 @@
labels:
logging-infra: 'support'
ports:
- - name: mux-forward
- port: "{{ openshift_logging_mux_port }}"
- targetPort: "mux-forward"
+ - name: mux-forward
+ port: "{{ openshift_logging_mux_port }}"
+ targetPort: "mux-forward"
external_ips:
- - "{{ openshift_logging_mux_external_address }}"
+ - "{{ openshift_logging_mux_external_address }}"
when: openshift_logging_mux_allow_external | bool
- name: Set logging-mux service for internal communication
@@ -162,9 +169,9 @@
labels:
logging-infra: 'support'
ports:
- - name: mux-forward
- port: "{{ openshift_logging_mux_port }}"
- targetPort: "mux-forward"
+ - name: mux-forward
+ port: "{{ openshift_logging_mux_port }}"
+ targetPort: "mux-forward"
when: not openshift_logging_mux_allow_external | bool
# create Mux DC
@@ -199,7 +206,7 @@
selector: "{{ openshift_logging_mux_file_buffer_pvc_pv_selector }}"
storage_class_name: "{{ openshift_logging_mux_file_buffer_pvc_storage_class_name | default('', true) }}"
when:
- - openshift_logging_mux_file_buffer_storage_type == "pvc"
+ - openshift_logging_mux_file_buffer_storage_type == "pvc"
- name: Set logging-mux DC
oc_obj:
@@ -208,7 +215,7 @@
namespace: "{{ openshift_logging_mux_namespace }}"
kind: dc
files:
- - "{{ tempdir }}/templates/logging-mux-dc.yaml"
+ - "{{ tempdir }}/templates/logging-mux-dc.yaml"
delete_after: true
- name: Add mux namespaces
diff --git a/roles/openshift_logging_mux/vars/openshift-enterprise.yml b/roles/openshift_logging_mux/vars/openshift-enterprise.yml
index 1e7eb9d8d..a72459748 100644
--- a/roles/openshift_logging_mux/vars/openshift-enterprise.yml
+++ b/roles/openshift_logging_mux/vars/openshift-enterprise.yml
@@ -1,3 +1,3 @@
---
__openshift_logging_mux_image_prefix: "{{ openshift_logging_image_prefix | default('registry.access.redhat.com/openshift3/') }}"
-__openshift_logging_mux_image_version: "{{ openshift_logging_image_version | default ('v3.7') }}"
+__openshift_logging_mux_image_version: "{{ openshift_logging_image_version | default (openshift_image_tag) }}"
diff --git a/roles/openshift_metrics/tasks/install_cassandra.yaml b/roles/openshift_metrics/tasks/install_cassandra.yaml
index 158e596ec..e0b37ac26 100644
--- a/roles/openshift_metrics/tasks/install_cassandra.yaml
+++ b/roles/openshift_metrics/tasks/install_cassandra.yaml
@@ -1,4 +1,11 @@
---
+- name: Ensure that Cassandra has nodes to run on
+ fail:
+ msg: |-
+ No schedulable nodes found matching node selector for cassandra - '{{ openshift_metrics_cassandra_nodeselector }}'
+ when:
+ - openshift_schedulable_node_labels | lib_utils_oo_has_no_matching_selector(openshift_metrics_cassandra_nodeselector)
+
- shell: >
{{ openshift_client_binary }} -n {{ openshift_metrics_project | quote }}
--config={{ mktemp.stdout }}/admin.kubeconfig
diff --git a/roles/openshift_metrics/tasks/install_hawkular.yaml b/roles/openshift_metrics/tasks/install_hawkular.yaml
index f45e7a042..de4e89a01 100644
--- a/roles/openshift_metrics/tasks/install_hawkular.yaml
+++ b/roles/openshift_metrics/tasks/install_hawkular.yaml
@@ -1,4 +1,11 @@
---
+- name: Ensure that Hawkular has nodes to run on
+ fail:
+ msg: |-
+ No schedulable nodes found matching node selector for hawkular - '{{ openshift_metrics_hawkular_nodeselector }}'
+ when:
+ - openshift_schedulable_node_labels | lib_utils_oo_has_no_matching_selector(openshift_metrics_hawkular_nodeselector)
+
- command: >
{{ openshift_client_binary }} -n {{ openshift_metrics_project | quote }}
--config={{ mktemp.stdout }}/admin.kubeconfig
diff --git a/roles/openshift_metrics/tasks/install_heapster.yaml b/roles/openshift_metrics/tasks/install_heapster.yaml
index 73e7454f0..e4ddf98ff 100644
--- a/roles/openshift_metrics/tasks/install_heapster.yaml
+++ b/roles/openshift_metrics/tasks/install_heapster.yaml
@@ -1,4 +1,11 @@
---
+- name: Ensure that Heapster has nodes to run on
+ fail:
+ msg: |-
+ No schedulable nodes found matching node selector for heapster - '{{ openshift_metrics_heapster_nodeselector }}'
+ when:
+ - openshift_schedulable_node_labels | lib_utils_oo_has_no_matching_selector(openshift_metrics_heapster_nodeselector)
+
- command: >
{{ openshift_client_binary }} -n {{ openshift_metrics_project | quote }}
--config={{ mktemp.stdout }}/admin.kubeconfig
@@ -20,8 +27,8 @@
- set_fact:
heapster_sa_secrets: "{{ heapster_sa_secrets + [item] }}"
with_items:
- - hawkular-metrics-certs
- - hawkular-metrics-account
+ - hawkular-metrics-certs
+ - hawkular-metrics-account
when: not openshift_metrics_heapster_standalone | bool
- name: Generating serviceaccount for heapster
@@ -38,7 +45,7 @@
vars:
obj_name: heapster
ports:
- - {port: 80, targetPort: http-endpoint}
+ - {port: 80, targetPort: http-endpoint}
selector:
name: "{{obj_name}}"
annotations:
@@ -61,9 +68,9 @@
kind: ClusterRole
name: cluster-reader
subjects:
- - kind: ServiceAccount
- name: heapster
- namespace: "{{ openshift_metrics_project }}"
+ - kind: ServiceAccount
+ name: heapster
+ namespace: "{{ openshift_metrics_project }}"
changed_when: no
- include_tasks: generate_heapster_secrets.yaml
diff --git a/roles/openshift_metrics/tasks/install_hosa.yaml b/roles/openshift_metrics/tasks/install_hosa.yaml
index 7c9bc26d0..3624cb5ab 100644
--- a/roles/openshift_metrics/tasks/install_hosa.yaml
+++ b/roles/openshift_metrics/tasks/install_hosa.yaml
@@ -1,4 +1,11 @@
---
+- name: Ensure that Hawkular agent has nodes to run on
+ fail:
+ msg: |-
+ No schedulable nodes found matching node selector for Hawkular agent - '{{ openshift_metrics_hawkular_agent_nodeselector }}'
+ when:
+ - openshift_schedulable_node_labels | lib_utils_oo_has_no_matching_selector(openshift_metrics_hawkular_agent_nodeselector)
+
- name: Generate Hawkular Agent (HOSA) Cluster Role
template:
src: hawkular_openshift_agent_role.j2
@@ -38,7 +45,7 @@
kind: ClusterRole
name: hawkular-openshift-agent
subjects:
- - kind: ServiceAccount
- name: hawkular-openshift-agent
- namespace: "{{openshift_metrics_hawkular_agent_namespace}}"
+ - kind: ServiceAccount
+ name: hawkular-openshift-agent
+ namespace: "{{openshift_metrics_hawkular_agent_namespace}}"
changed_when: no
diff --git a/roles/openshift_metrics/vars/default_images.yml b/roles/openshift_metrics/vars/default_images.yml
index 8704ddfa0..e7003db76 100644
--- a/roles/openshift_metrics/vars/default_images.yml
+++ b/roles/openshift_metrics/vars/default_images.yml
@@ -1,3 +1,3 @@
---
__openshift_metrics_image_prefix: "docker.io/openshift/origin-"
-__openshift_metrics_image_version: "latest"
+__openshift_metrics_image_version: "{{ openshift_image_tag }}"
diff --git a/roles/openshift_metrics/vars/openshift-enterprise.yml b/roles/openshift_metrics/vars/openshift-enterprise.yml
index 5a1728de5..b3b3552b7 100644
--- a/roles/openshift_metrics/vars/openshift-enterprise.yml
+++ b/roles/openshift_metrics/vars/openshift-enterprise.yml
@@ -1,3 +1,3 @@
---
__openshift_metrics_image_prefix: "registry.access.redhat.com/openshift3/"
-__openshift_metrics_image_version: "v3.7"
+__openshift_metrics_image_version: "{{ openshift_image_tag }}"
diff --git a/roles/openshift_node/files/bootstrap.yml b/roles/openshift_node/files/bootstrap.yml
index a5545c81b..ea280640f 100644
--- a/roles/openshift_node/files/bootstrap.yml
+++ b/roles/openshift_node/files/bootstrap.yml
@@ -61,11 +61,3 @@
with_items:
- line: "BOOTSTRAP_CONFIG_NAME=node-config-{{ openshift_group_type }}"
regexp: "^BOOTSTRAP_CONFIG_NAME=.*"
-
- - name: "Start the {{ openshift_service_type }}-node service"
- systemd:
- daemon_reload: yes
- state: restarted
- enabled: True
- name: "{{ openshift_service_type }}-node"
- no_block: true
diff --git a/roles/openshift_openstack/defaults/main.yml b/roles/openshift_openstack/defaults/main.yml
index 2bdb81632..75f1300e1 100644
--- a/roles/openshift_openstack/defaults/main.yml
+++ b/roles/openshift_openstack/defaults/main.yml
@@ -55,7 +55,9 @@ openshift_openstack_app_subdomain: "apps"
# heat vars
openshift_openstack_clusterid: openshift
openshift_openstack_stack_name: "{{ openshift_openstack_clusterid }}.{{ openshift_openstack_public_dns_domain }}"
-openshift_openstack_subnet_prefix: "192.168.99"
+openshift_openstack_subnet_cidr: "192.168.99.0/24"
+openshift_openstack_pool_start: "192.168.99.3"
+openshift_openstack_pool_end: "192.168.99.254"
openshift_openstack_master_hostname: master
openshift_openstack_infra_hostname: infra-node
openshift_openstack_cns_hostname: cns
@@ -94,6 +96,8 @@ openshift_openstack_etcd_volume_size: 2
openshift_openstack_lb_volume_size: 5
openshift_openstack_ephemeral_volumes: false
+# User commands for cloud-init executed on all Nova servers provisioned
+openshift_openstack_provision_user_commands: []
# cloud-config
openshift_openstack_disable_root: true
diff --git a/roles/openshift_openstack/templates/heat_stack.yaml.j2 b/roles/openshift_openstack/templates/heat_stack.yaml.j2
index 1d3173022..b62cb2bc8 100644
--- a/roles/openshift_openstack/templates/heat_stack.yaml.j2
+++ b/roles/openshift_openstack/templates/heat_stack.yaml.j2
@@ -78,22 +78,10 @@ resources:
params:
cluster_id: {{ openshift_openstack_stack_name }}
network: { get_resource: net }
- cidr:
- str_replace:
- template: subnet_24_prefix.0/24
- params:
- subnet_24_prefix: {{ openshift_openstack_subnet_prefix }}
+ cidr: {{ openshift_openstack_subnet_cidr }}
allocation_pools:
- - start:
- str_replace:
- template: subnet_24_prefix.3
- params:
- subnet_24_prefix: {{ openshift_openstack_subnet_prefix }}
- end:
- str_replace:
- template: subnet_24_prefix.254
- params:
- subnet_24_prefix: {{ openshift_openstack_subnet_prefix }}
+ - start: {{ openshift_openstack_pool_start }}
+ end: {{ openshift_openstack_pool_end }}
dns_nameservers:
{% for nameserver in openshift_openstack_dns_nameservers %}
- {{ nameserver }}
@@ -261,7 +249,7 @@ resources:
protocol: tcp
port_range_min: 30000
port_range_max: 32767
- remote_ip_prefix: "{{ openshift_openstack_subnet_prefix }}.0/24"
+ remote_ip_prefix: "{{ openshift_openstack_subnet_cidr }}"
{% else %}
master-secgrp:
type: OS::Neutron::SecurityGroup
@@ -393,7 +381,7 @@ resources:
protocol: tcp
port_range_min: 30000
port_range_max: 32767
- remote_ip_prefix: "{{ openshift_openstack_subnet_prefix }}.0/24"
+ remote_ip_prefix: "{{ openshift_openstack_subnet_cidr }}"
{% endif %}
infra-secgrp:
diff --git a/roles/openshift_openstack/templates/user_data.j2 b/roles/openshift_openstack/templates/user_data.j2
index ccaa5d464..1ca87a429 100644
--- a/roles/openshift_openstack/templates/user_data.j2
+++ b/roles/openshift_openstack/templates/user_data.j2
@@ -11,3 +11,19 @@ write_files:
permissions: 440
content: |
Defaults:openshift !requiretty
+
+{% if openshift_openstack_provision_user_commands %}
+ - path: /root/ansible_install.sh
+ permissions: '0544'
+ content: |
+{% for cmd in openshift_openstack_provision_user_commands %}
+{% if cmd is string %}
+ {{ cmd }}
+{% elif cmd is iterable %}
+ {{ cmd|join(' ') }}
+{% endif %}
+{% endfor %}
+
+runcmd:
+ - /root/ansible_install.sh
+{% endif %}
diff --git a/roles/openshift_prometheus/tasks/install_prometheus.yaml b/roles/openshift_prometheus/tasks/install_prometheus.yaml
index 0b565502f..5a8228bc4 100644
--- a/roles/openshift_prometheus/tasks/install_prometheus.yaml
+++ b/roles/openshift_prometheus/tasks/install_prometheus.yaml
@@ -2,6 +2,13 @@
# set facts
- include_tasks: facts.yaml
+- name: Ensure that Prometheus has nodes to run on
+ fail:
+ msg: |-
+ No schedulable nodes found matching node selector for Prometheus - '{{ openshift_prometheus_node_selector }}'
+ when:
+ - openshift_schedulable_node_labels | lib_utils_oo_has_no_matching_selector(openshift_prometheus_node_selector)
+
# namespace
- name: Add prometheus project
oc_project:
@@ -17,12 +24,12 @@
name: "{{ item }}-proxy"
namespace: "{{ openshift_prometheus_namespace }}"
contents:
- - path: session_secret
- data: "{{ 43 | lib_utils_oo_random_word }}="
+ - path: session_secret
+ data: "{{ 43 | lib_utils_oo_random_word }}="
with_items:
- - prometheus
- - alerts
- - alertmanager
+ - prometheus
+ - alerts
+ - alertmanager
# serviceaccount
- name: create prometheus serviceaccount
@@ -62,10 +69,10 @@
oprometheus.io/scheme: https
service.alpha.openshift.io/serving-cert-secret-name: prometheus-tls
ports:
- - name: prometheus
- port: "{{ openshift_prometheus_service_port }}"
- targetPort: "{{ openshift_prometheus_service_targetport }}"
- protocol: TCP
+ - name: prometheus
+ port: "{{ openshift_prometheus_service_port }}"
+ targetPort: "{{ openshift_prometheus_service_targetport }}"
+ protocol: TCP
selector:
app: prometheus
@@ -78,10 +85,10 @@
annotations:
service.alpha.openshift.io/serving-cert-secret-name: alerts-tls
ports:
- - name: prometheus
- port: "{{ openshift_prometheus_service_port }}"
- targetPort: "{{ openshift_prometheus_alerts_service_targetport }}"
- protocol: TCP
+ - name: prometheus
+ port: "{{ openshift_prometheus_service_port }}"
+ targetPort: "{{ openshift_prometheus_alerts_service_targetport }}"
+ protocol: TCP
selector:
app: prometheus
@@ -94,10 +101,10 @@
annotations:
service.alpha.openshift.io/serving-cert-secret-name: alertmanager-tls
ports:
- - name: prometheus
- port: "{{ openshift_prometheus_service_port }}"
- targetPort: "{{ openshift_prometheus_alertmanager_service_targetport }}"
- protocol: TCP
+ - name: prometheus
+ port: "{{ openshift_prometheus_service_port }}"
+ targetPort: "{{ openshift_prometheus_alertmanager_service_targetport }}"
+ protocol: TCP
selector:
app: prometheus
@@ -112,12 +119,12 @@
service_name: "{{ item.name }}"
tls_termination: reencrypt
with_items:
- - name: prometheus
- host: "{{ openshift_prometheus_hostname }}"
- - name: alerts
- host: "{{ openshift_prometheus_alerts_hostname }}"
- - name: alertmanager
- host: "{{ openshift_prometheus_alertmanager_hostname }}"
+ - name: prometheus
+ host: "{{ openshift_prometheus_hostname }}"
+ - name: alerts
+ host: "{{ openshift_prometheus_alerts_hostname }}"
+ - name: alertmanager
+ host: "{{ openshift_prometheus_alertmanager_hostname }}"
# Storage
- name: create prometheus pvc
@@ -157,9 +164,9 @@
src: "{{ openshift_prometheus_additional_rules_file }}"
dest: "{{ tempdir }}/prometheus.additional.rules"
when:
- - openshift_prometheus_additional_rules_file is defined
- - openshift_prometheus_additional_rules_file is not none
- - openshift_prometheus_additional_rules_file | trim | length > 0
+ - openshift_prometheus_additional_rules_file is defined
+ - openshift_prometheus_additional_rules_file is not none
+ - openshift_prometheus_additional_rules_file | trim | length > 0
- stat:
path: "{{ tempdir }}/prometheus.additional.rules"
@@ -227,5 +234,5 @@
namespace: "{{ openshift_prometheus_namespace }}"
kind: statefulset
files:
- - "{{ tempdir }}/templates/prometheus.yaml"
+ - "{{ tempdir }}/templates/prometheus.yaml"
delete_after: true
diff --git a/roles/openshift_prometheus/vars/openshift-enterprise.yml b/roles/openshift_prometheus/vars/openshift-enterprise.yml
index 9bb4c99bb..02319867d 100644
--- a/roles/openshift_prometheus/vars/openshift-enterprise.yml
+++ b/roles/openshift_prometheus/vars/openshift-enterprise.yml
@@ -6,7 +6,7 @@ l_openshift_prometheus_alertmanager_image_prefix: "{{ openshift_prometheus_alter
l_openshift_prometheus_alertbuffer_image_prefix: "{{ openshift_prometheus_alertbuffer_image_prefix | default(l_openshift_prometheus_image_prefix) }}"
# image version defaults
-l_openshift_prometheus_image_version: "{{ openshift_prometheus_image_version | default('v3.7') }}"
-l_openshift_prometheus_proxy_image_version: "{{ openshift_prometheus_proxy_image_version | default('v3.7') }}"
-l_openshift_prometheus_alertmanager_image_version: "{{ openshift_prometheus_alertmanager_image_version | default('v3.7') }}"
-l_openshift_prometheus_alertbuffer_image_version: "{{ openshift_prometheus_alertbuffer_image_version | default('v3.7') }}"
+l_openshift_prometheus_image_version: "{{ openshift_prometheus_image_version | default(openshift_image_tag) }}"
+l_openshift_prometheus_proxy_image_version: "{{ openshift_prometheus_proxy_image_version | default(openshift_image_tag) }}"
+l_openshift_prometheus_alertmanager_image_version: "{{ openshift_prometheus_alertmanager_image_version | default(openshift_image_tag) }}"
+l_openshift_prometheus_alertbuffer_image_version: "{{ openshift_prometheus_alertbuffer_image_version | default(openshift_image_tag) }}"
diff --git a/roles/openshift_provisioners/tasks/install_provisioners.yaml b/roles/openshift_provisioners/tasks/install_provisioners.yaml
index 2d1217c74..1be498489 100644
--- a/roles/openshift_provisioners/tasks/install_provisioners.yaml
+++ b/roles/openshift_provisioners/tasks/install_provisioners.yaml
@@ -15,6 +15,13 @@
fail: msg='the openshift_provisioners_efs_aws_secret_access_key variable is required'
when: (openshift_provisioners_efs | bool) and openshift_provisioners_efs_aws_secret_access_key is not defined
+- name: Ensure that provisioners have nodes to run on
+ fail:
+ msg: |-
+ No schedulable nodes found matching node selector for Prometheus - '{{ openshift_provisioners_efs_nodeselector }}'
+ when:
+ - openshift_schedulable_node_labels | lib_utils_oo_has_no_matching_selector(openshift_provisioners_efs_nodeselector)
+
- name: Install support
include_tasks: install_support.yaml
@@ -34,10 +41,10 @@
- name: Create objects
include_tasks: oc_apply.yaml
vars:
- - kubeconfig: "{{ mktemp.stdout }}/admin.kubeconfig"
- - namespace: "{{ openshift_provisioners_project }}"
- - file_name: "{{ file.source }}"
- - file_content: "{{ file.content | b64decode | from_yaml }}"
+ - kubeconfig: "{{ mktemp.stdout }}/admin.kubeconfig"
+ - namespace: "{{ openshift_provisioners_project }}"
+ - file_name: "{{ file.source }}"
+ - file_content: "{{ file.content | b64decode | from_yaml }}"
with_items: "{{ object_defs.results }}"
loop_control:
loop_var: file
diff --git a/roles/openshift_service_catalog/tasks/install.yml b/roles/openshift_service_catalog/tasks/install.yml
index 4d06c1872..96fa4a93e 100644
--- a/roles/openshift_service_catalog/tasks/install.yml
+++ b/roles/openshift_service_catalog/tasks/install.yml
@@ -1,6 +1,5 @@
---
# do any asserts here
-
- name: Create temp directory for doing work in
command: mktemp -d /tmp/openshift-service-catalog-ansible-XXXXXX
register: mktemp
@@ -9,8 +8,8 @@
- name: Set default image variables based on openshift_deployment_type
include_vars: "{{ item }}"
with_first_found:
- - "{{ openshift_deployment_type }}.yml"
- - "default_images.yml"
+ - "{{ openshift_deployment_type }}.yml"
+ - "default_images.yml"
- name: Set service_catalog image facts
set_fact:
@@ -25,20 +24,20 @@
- when: os_sdn_network_plugin_name == 'redhat/openshift-ovs-multitenant'
block:
- - name: Waiting for netnamespace kube-service-catalog to be ready
- oc_obj:
- kind: netnamespace
- name: kube-service-catalog
- state: list
- register: get_output
- until: not get_output.results.stderr is defined
- retries: 30
- delay: 1
- changed_when: false
-
- - name: Make kube-service-catalog project network global
- command: >
- {{ openshift_client_binary }} --config=/etc/origin/master/admin.kubeconfig adm pod-network make-projects-global kube-service-catalog
+ - name: Waiting for netnamespace kube-service-catalog to be ready
+ oc_obj:
+ kind: netnamespace
+ name: kube-service-catalog
+ state: list
+ register: get_output
+ until: not get_output.results.stderr is defined
+ retries: 30
+ delay: 1
+ changed_when: false
+
+ - name: Make kube-service-catalog project network global
+ command: >
+ {{ openshift_client_binary }} --config=/etc/origin/master/admin.kubeconfig adm pod-network make-projects-global kube-service-catalog
- include_tasks: generate_certs.yml
@@ -51,7 +50,7 @@
kind: template
namespace: "kube-service-catalog"
files:
- - "{{ mktemp.stdout }}/kubeservicecatalog_roles_bindings.yml"
+ - "{{ mktemp.stdout }}/kubeservicecatalog_roles_bindings.yml"
- oc_process:
create: True
@@ -67,7 +66,7 @@
kind: template
namespace: kube-system
files:
- - "{{ mktemp.stdout }}/kubesystem_roles_bindings.yml"
+ - "{{ mktemp.stdout }}/kubesystem_roles_bindings.yml"
- oc_process:
create: True
@@ -132,7 +131,7 @@
kind: daemonset
name: apiserver
files:
- - "{{ mktemp.stdout }}/service_catalog_api_server.yml"
+ - "{{ mktemp.stdout }}/service_catalog_api_server.yml"
delete_after: yes
- name: Set Service Catalog API Server service
@@ -141,10 +140,10 @@
namespace: kube-service-catalog
state: present
ports:
- - name: secure
- port: 443
- protocol: TCP
- targetPort: 6443
+ - name: secure
+ port: 443
+ protocol: TCP
+ targetPort: 6443
selector:
app: apiserver
session_affinity: None
@@ -160,7 +159,7 @@
kind: route
name: apiserver
files:
- - "{{ mktemp.stdout }}/service_catalog_api_route.yml"
+ - "{{ mktemp.stdout }}/service_catalog_api_route.yml"
delete_after: yes
## controller manager
@@ -180,7 +179,7 @@
kind: daemonset
name: controller-manager
files:
- - "{{ mktemp.stdout }}/controller_manager.yml"
+ - "{{ mktemp.stdout }}/controller_manager.yml"
delete_after: yes
- name: Set Controller Manager service
@@ -189,9 +188,9 @@
namespace: kube-service-catalog
state: present
ports:
- - port: 6443
- protocol: TCP
- targetPort: 6443
+ - port: 6443
+ protocol: TCP
+ targetPort: 6443
selector:
app: controller-manager
session_affinity: None
diff --git a/roles/openshift_service_catalog/tasks/start_api_server.yml b/roles/openshift_service_catalog/tasks/start_api_server.yml
index 84e542eaf..687d07abd 100644
--- a/roles/openshift_service_catalog/tasks/start_api_server.yml
+++ b/roles/openshift_service_catalog/tasks/start_api_server.yml
@@ -17,6 +17,6 @@
warn: no
register: api_health
until: api_health.stdout == 'ok'
- retries: 120
- delay: 1
+ retries: 60
+ delay: 5
changed_when: false
diff --git a/roles/openshift_service_catalog/vars/default_images.yml b/roles/openshift_service_catalog/vars/default_images.yml
index 6fb9d1b86..b3bb16f18 100644
--- a/roles/openshift_service_catalog/vars/default_images.yml
+++ b/roles/openshift_service_catalog/vars/default_images.yml
@@ -1,3 +1,3 @@
---
__openshift_service_catalog_image_prefix: "docker.io/openshift/origin-"
-__openshift_service_catalog_image_version: "latest"
+__openshift_service_catalog_image_version: "{{ openshift_service_catalog_image_version | default(openshift_image_tag) }}"
diff --git a/roles/openshift_service_catalog/vars/openshift-enterprise.yml b/roles/openshift_service_catalog/vars/openshift-enterprise.yml
index cab9cc7d8..7f56292a2 100644
--- a/roles/openshift_service_catalog/vars/openshift-enterprise.yml
+++ b/roles/openshift_service_catalog/vars/openshift-enterprise.yml
@@ -1,3 +1,3 @@
---
__openshift_service_catalog_image_prefix: "registry.access.redhat.com/openshift3/ose-"
-__openshift_service_catalog_image_version: "v3.7"
+__openshift_service_catalog_image_version: "{{ openshift_service_catalog_image_version | default(openshift_image_tag) }}"
diff --git a/roles/openshift_web_console/tasks/install.yml b/roles/openshift_web_console/tasks/install.yml
index f79a05c94..ab6613567 100644
--- a/roles/openshift_web_console/tasks/install.yml
+++ b/roles/openshift_web_console/tasks/install.yml
@@ -3,8 +3,8 @@
- name: Set default image variables based on deployment type
include_vars: "{{ item }}"
with_first_found:
- - "{{ openshift_deployment_type | default(deployment_type) }}.yml"
- - "default_images.yml"
+ - "{{ openshift_deployment_type | default(deployment_type) }}.yml"
+ - "default_images.yml"
- name: Set openshift_web_console facts
set_fact:
@@ -19,7 +19,8 @@
name: openshift-web-console
state: present
node_selector:
- - ""
+ - ""
+ register: create_console_project
- name: Make temp directory for web console templates
command: mktemp -d /tmp/console-ansible-XXXXXX
@@ -36,9 +37,9 @@
src: "{{ item }}"
dest: "{{ mktemp.stdout }}/{{ item }}"
with_items:
- - "{{ __console_template_file }}"
- - "{{ __console_rbac_file }}"
- - "{{ __console_config_file }}"
+ - "{{ __console_template_file }}"
+ - "{{ __console_rbac_file }}"
+ - "{{ __console_config_file }}"
# Check if an existing webconsole-config config map exists. If so, use those
# contents so we don't overwrite changes.
@@ -61,69 +62,69 @@
# Generate a new config when a config map is not defined.
- when: existing_config_map_data['webconsole-config.yaml'] is not defined
block:
- # Migrate the previous master-config.yaml asset config if it exists into the new
- # web console config config map.
- - name: Read existing assetConfig in master-config.yaml
- slurp:
- src: "{{ openshift.common.config_base }}/master/master-config.yaml"
- register: master_config_output
-
- - set_fact:
- config_to_migrate: "{{ master_config_output.content | b64decode | from_yaml }}"
-
- - set_fact:
- cro_plugin_enabled: "{{ config_to_migrate.admissionConfig is defined and config_to_migrate.admissionConfig.pluginConfig is defined and config_to_migrate.admissionConfig.pluginConfig.ClusterResourceOverrides is defined }}"
-
- # Update properties in the config template based on inventory vars when the
- # asset config does not exist.
- - name: Set web console config properties from inventory variables
- yedit:
- src: "{{ mktemp.stdout }}/{{ __console_config_file }}"
- edits:
- - key: clusterInfo#consolePublicURL
- # Must have a trailing slash
- value: "{{ openshift.master.public_console_url }}/"
- - key: clusterInfo#masterPublicURL
- value: "{{ openshift.master.public_api_url }}"
- - key: clusterInfo#logoutPublicURL
- value: "{{ openshift.master.logout_url | default('') }}"
- - key: features#inactivityTimeoutMinutes
- value: "{{ openshift_web_console_inactivity_timeout_minutes | default(0) }}"
- - key: features#clusterResourceOverridesEnabled
- value: "{{ openshift_web_console_cluster_resource_overrides_enabled | default(cro_plugin_enabled) }}"
- - key: extensions#scriptURLs
- value: "{{ openshift_web_console_extension_script_urls | default([]) }}"
- - key: extensions#stylesheetURLs
- value: "{{ openshift_web_console_extension_stylesheet_urls | default([]) }}"
- - key: extensions#properties
- value: "{{ openshift_web_console_extension_properties | default({}) }}"
- separator: '#'
- state: present
- when: config_to_migrate.assetConfig is not defined
-
- - name: Migrate assetConfig from master-config.yaml
- yedit:
- src: "{{ mktemp.stdout }}/{{ __console_config_file }}"
- edits:
- - key: clusterInfo#consolePublicURL
- value: "{{ config_to_migrate.assetConfig.publicURL }}"
- - key: clusterInfo#masterPublicURL
- value: "{{ config_to_migrate.assetConfig.masterPublicURL }}"
- - key: clusterInfo#logoutPublicURL
- value: "{{ config_to_migrate.assetConfig.logoutURL | default('') }}"
- - key: clusterInfo#metricsPublicURL
- value: "{{ config_to_migrate.assetConfig.metricsPublicURL | default('') }}"
- - key: clusterInfo#loggingPublicURL
- value: "{{ config_to_migrate.assetConfig.loggingPublicURL | default('') }}"
- - key: servingInfo#maxRequestsInFlight
- value: "{{ config_to_migrate.assetConfig.servingInfo.maxRequestsInFlight | default(0) }}"
- - key: servingInfo#requestTimeoutSeconds
- value: "{{ config_to_migrate.assetConfig.servingInfo.requestTimeoutSeconds | default(0) }}"
- - key: features#clusterResourceOverridesEnabled
- value: "{{ openshift_web_console_cluster_resource_overrides_enabled | default(cro_plugin_enabled) }}"
- separator: '#'
- state: present
- when: config_to_migrate.assetConfig is defined
+ # Migrate the previous master-config.yaml asset config if it exists into the new
+ # web console config config map.
+ - name: Read existing assetConfig in master-config.yaml
+ slurp:
+ src: "{{ openshift.common.config_base }}/master/master-config.yaml"
+ register: master_config_output
+
+ - set_fact:
+ config_to_migrate: "{{ master_config_output.content | b64decode | from_yaml }}"
+
+ - set_fact:
+ cro_plugin_enabled: "{{ config_to_migrate.admissionConfig is defined and config_to_migrate.admissionConfig.pluginConfig is defined and config_to_migrate.admissionConfig.pluginConfig.ClusterResourceOverrides is defined }}"
+
+ # Update properties in the config template based on inventory vars when the
+ # asset config does not exist.
+ - name: Set web console config properties from inventory variables
+ yedit:
+ src: "{{ mktemp.stdout }}/{{ __console_config_file }}"
+ edits:
+ - key: clusterInfo#consolePublicURL
+ # Must have a trailing slash
+ value: "{{ openshift.master.public_console_url }}/"
+ - key: clusterInfo#masterPublicURL
+ value: "{{ openshift.master.public_api_url }}"
+ - key: clusterInfo#logoutPublicURL
+ value: "{{ openshift.master.logout_url | default('') }}"
+ - key: features#inactivityTimeoutMinutes
+ value: "{{ openshift_web_console_inactivity_timeout_minutes | default(0) }}"
+ - key: features#clusterResourceOverridesEnabled
+ value: "{{ openshift_web_console_cluster_resource_overrides_enabled | default(cro_plugin_enabled) }}"
+ - key: extensions#scriptURLs
+ value: "{{ openshift_web_console_extension_script_urls | default([]) }}"
+ - key: extensions#stylesheetURLs
+ value: "{{ openshift_web_console_extension_stylesheet_urls | default([]) }}"
+ - key: extensions#properties
+ value: "{{ openshift_web_console_extension_properties | default({}) }}"
+ separator: '#'
+ state: present
+ when: config_to_migrate.assetConfig is not defined
+
+ - name: Migrate assetConfig from master-config.yaml
+ yedit:
+ src: "{{ mktemp.stdout }}/{{ __console_config_file }}"
+ edits:
+ - key: clusterInfo#consolePublicURL
+ value: "{{ config_to_migrate.assetConfig.publicURL }}"
+ - key: clusterInfo#masterPublicURL
+ value: "{{ config_to_migrate.assetConfig.masterPublicURL }}"
+ - key: clusterInfo#logoutPublicURL
+ value: "{{ config_to_migrate.assetConfig.logoutURL | default('') }}"
+ - key: clusterInfo#metricsPublicURL
+ value: "{{ config_to_migrate.assetConfig.metricsPublicURL | default('') }}"
+ - key: clusterInfo#loggingPublicURL
+ value: "{{ config_to_migrate.assetConfig.loggingPublicURL | default('') }}"
+ - key: servingInfo#maxRequestsInFlight
+ value: "{{ config_to_migrate.assetConfig.servingInfo.maxRequestsInFlight | default(0) }}"
+ - key: servingInfo#requestTimeoutSeconds
+ value: "{{ config_to_migrate.assetConfig.servingInfo.requestTimeoutSeconds | default(0) }}"
+ - key: features#clusterResourceOverridesEnabled
+ value: "{{ openshift_web_console_cluster_resource_overrides_enabled | default(cro_plugin_enabled) }}"
+ separator: '#'
+ state: present
+ when: config_to_migrate.assetConfig is defined
- slurp:
src: "{{ mktemp.stdout }}/{{ __console_config_file }}"
@@ -144,6 +145,16 @@
--config={{ mktemp.stdout }}/admin.kubeconfig
| {{ openshift_client_binary }} apply --config={{ mktemp.stdout }}/admin.kubeconfig -f -
+# Wait to give the rollout time to start before verifying that the console is
+# running. Unfortunately, we can't check if the deployment revision changed
+# because it's possible applying the template did not result in any changes to
+# the pod template spec, which would skip a new revision.
+- name: Pause for the web console deployment to start
+ pause:
+ seconds: 30
+ # Skip if the project didn't exist since there was no previous deployment.
+ when: not create_console_project.changed
+
- name: Verify that the web console is running
command: >
curl -k https://webconsole.openshift-web-console.svc/healthz
@@ -153,8 +164,8 @@
warn: no
register: console_health
until: console_health.stdout == 'ok'
- retries: 120
- delay: 1
+ retries: 60
+ delay: 5
changed_when: false
- name: Remove temp directory
diff --git a/roles/openshift_web_console/vars/default_images.yml b/roles/openshift_web_console/vars/default_images.yml
index 42d331ac5..0bd4e981d 100644
--- a/roles/openshift_web_console/vars/default_images.yml
+++ b/roles/openshift_web_console/vars/default_images.yml
@@ -1,4 +1,4 @@
---
__openshift_web_console_prefix: "docker.io/openshift/origin-"
-__openshift_web_console_version: "latest"
+__openshift_web_console_version: "{{ openshift_image_tag }}"
__openshift_web_console_image_name: "web-console"
diff --git a/roles/openshift_web_console/vars/openshift-enterprise.yml b/roles/openshift_web_console/vars/openshift-enterprise.yml
index 375c22067..fd5e4e9a0 100644
--- a/roles/openshift_web_console/vars/openshift-enterprise.yml
+++ b/roles/openshift_web_console/vars/openshift-enterprise.yml
@@ -1,4 +1,4 @@
---
__openshift_web_console_prefix: "registry.access.redhat.com/openshift3/ose-"
-__openshift_web_console_version: "v3.9"
+__openshift_web_console_version: "{{ openshift_image_tag }}"
__openshift_web_console_image_name: "web-console"
diff --git a/roles/template_service_broker/tasks/install.yml b/roles/template_service_broker/tasks/install.yml
index d0a07c48d..d674d24e4 100644
--- a/roles/template_service_broker/tasks/install.yml
+++ b/roles/template_service_broker/tasks/install.yml
@@ -1,10 +1,17 @@
---
# Fact setting
+- name: Ensure that Template Service Broker has nodes to run on
+ fail:
+ msg: |-
+ No schedulable nodes found matching node selector for Template Service Broker - '{{ template_service_broker_selector }}'
+ when:
+ - openshift_schedulable_node_labels | lib_utils_oo_has_no_matching_selector(template_service_broker_selector)
+
- name: Set default image variables based on openshift_deployment_type
include_vars: "{{ item }}"
with_first_found:
- - "{{ openshift_deployment_type }}.yml"
- - "default_images.yml"
+ - "{{ openshift_deployment_type }}.yml"
+ - "default_images.yml"
- name: set template_service_broker facts
set_fact:
@@ -16,7 +23,7 @@
name: openshift-template-service-broker
state: present
node_selector:
- - ""
+ - ""
- command: mktemp -d /tmp/tsb-ansible-XXXXXX
register: mktemp
@@ -31,10 +38,10 @@
src: "{{ item }}"
dest: "{{ mktemp.stdout }}/{{ item }}"
with_items:
- - "{{ __tsb_template_file }}"
- - "{{ __tsb_rbac_file }}"
- - "{{ __tsb_broker_file }}"
- - "{{ __tsb_config_file }}"
+ - "{{ __tsb_template_file }}"
+ - "{{ __tsb_rbac_file }}"
+ - "{{ __tsb_broker_file }}"
+ - "{{ __tsb_config_file }}"
- yedit:
src: "{{ mktemp.stdout }}/{{ __tsb_config_file }}"
@@ -71,8 +78,8 @@
warn: no
register: api_health
until: api_health.stdout == 'ok'
- retries: 120
- delay: 1
+ retries: 60
+ delay: 5
changed_when: false
- set_fact:
diff --git a/roles/template_service_broker/vars/default_images.yml b/roles/template_service_broker/vars/default_images.yml
index dc164a4db..b99f52bbb 100644
--- a/roles/template_service_broker/vars/default_images.yml
+++ b/roles/template_service_broker/vars/default_images.yml
@@ -1,4 +1,4 @@
---
__template_service_broker_prefix: "docker.io/openshift/origin-"
-__template_service_broker_version: "latest"
+__template_service_broker_version: "{{ openshift_image_tag }}"
__template_service_broker_image_name: "template-service-broker"
diff --git a/roles/template_service_broker/vars/openshift-enterprise.yml b/roles/template_service_broker/vars/openshift-enterprise.yml
index b65b97691..4bc3795bd 100644
--- a/roles/template_service_broker/vars/openshift-enterprise.yml
+++ b/roles/template_service_broker/vars/openshift-enterprise.yml
@@ -1,4 +1,4 @@
---
__template_service_broker_prefix: "registry.access.redhat.com/openshift3/ose-"
-__template_service_broker_version: "v3.7"
+__template_service_broker_version: "{{ openshift_image_tag }}"
__template_service_broker_image_name: "template-service-broker"