summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.tito/packages/openshift-ansible2
-rw-r--r--callback_plugins/aa_version_requirement.py2
-rw-r--r--files/origin-components/apiserver-template.yaml3
-rw-r--r--openshift-ansible.spec12
-rw-r--r--playbooks/adhoc/uninstall.yml14
-rw-r--r--playbooks/byo/rhel_subscribe.yml3
-rw-r--r--playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml8
-rw-r--r--playbooks/common/openshift-cluster/upgrades/docker/tasks/restart.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/docker/tasks/upgrade.yml4
-rw-r--r--playbooks/common/openshift-cluster/upgrades/post_control_plane.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/verify_cluster.yml12
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml4
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml18
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml8
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml6
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml2
-rw-r--r--playbooks/init/facts.yml6
-rw-r--r--playbooks/init/main.yml2
-rw-r--r--playbooks/init/repos.yml8
-rw-r--r--playbooks/init/validate_hostnames.yml34
-rw-r--r--playbooks/openshift-etcd/private/upgrade_image_members.yml2
-rw-r--r--playbooks/openshift-etcd/private/upgrade_rpm_members.yml2
-rw-r--r--playbooks/openshift-node/private/network_manager.yml2
-rw-r--r--playbooks/openshift-node/private/restart.yml3
-rw-r--r--playbooks/openstack/openshift-cluster/provision.yml4
-rw-r--r--playbooks/prerequisites.yml2
-rw-r--r--requirements.txt2
-rw-r--r--roles/calico/handlers/main.yml2
-rw-r--r--roles/cockpit/tasks/main.yml2
-rw-r--r--roles/container_runtime/handlers/main.yml2
-rw-r--r--roles/container_runtime/tasks/common/syscontainer_packages.yml6
-rw-r--r--roles/container_runtime/tasks/docker_sanity.yml23
-rw-r--r--roles/container_runtime/tasks/docker_upgrade_check.yml17
-rw-r--r--roles/container_runtime/tasks/package_docker.yml13
-rw-r--r--roles/container_runtime/tasks/systemcontainer_docker.yml8
-rw-r--r--roles/container_runtime/templates/crio.conf.j22
-rw-r--r--roles/contiv/tasks/download_bins.yml2
-rw-r--r--roles/contiv/tasks/netplugin.yml6
-rw-r--r--roles/contiv/tasks/pkgMgrInstallers/centos-install.yml8
-rw-r--r--roles/contiv_facts/tasks/fedora-install.yml2
-rw-r--r--roles/etcd/tasks/auxiliary/drop_etcdctl.yml2
-rw-r--r--roles/etcd/tasks/backup/backup.yml2
-rw-r--r--roles/etcd/tasks/certificates/deploy_ca.yml2
-rw-r--r--roles/etcd/tasks/certificates/fetch_server_certificates_from_ca.yml2
-rw-r--r--roles/etcd/tasks/main.yml8
-rw-r--r--roles/etcd/tasks/system_container.yml8
-rw-r--r--roles/etcd/tasks/upgrade/upgrade_image.yml2
-rw-r--r--roles/etcd/tasks/upgrade/upgrade_rpm.yml2
-rw-r--r--roles/flannel/handlers/main.yml4
-rw-r--r--roles/flannel/tasks/main.yml2
-rw-r--r--roles/nickhammond.logrotate/tasks/main.yml2
-rw-r--r--roles/nuage_ca/tasks/main.yaml2
-rw-r--r--roles/openshift_ca/tasks/main.yml4
-rw-r--r--roles/openshift_cli/tasks/main.yml4
-rw-r--r--roles/openshift_clock/tasks/main.yaml2
-rw-r--r--roles/openshift_excluder/tasks/install.yml8
-rw-r--r--roles/openshift_excluder/tasks/verify_excluder.yml2
-rw-r--r--roles/openshift_expand_partition/tasks/main.yml2
-rw-r--r--roles/openshift_loadbalancer/tasks/main.yml4
-rw-r--r--roles/openshift_logging_elasticsearch/handlers/main.yml13
-rw-r--r--roles/openshift_logging_elasticsearch/tasks/main.yaml21
-rw-r--r--roles/openshift_logging_elasticsearch/tasks/restart_cluster.yml35
-rw-r--r--roles/openshift_logging_elasticsearch/tasks/restart_es_node.yml35
-rw-r--r--roles/openshift_logging_elasticsearch/templates/es.j21
-rw-r--r--roles/openshift_logging_elasticsearch/vars/main.yml2
-rw-r--r--roles/openshift_logging_fluentd/templates/fluentd.j22
-rw-r--r--roles/openshift_logging_mux/defaults/main.yml2
-rw-r--r--roles/openshift_manage_node/tasks/main.yml2
-rw-r--r--roles/openshift_master/defaults/main.yml42
-rw-r--r--roles/openshift_master/tasks/journald.yml2
-rw-r--r--roles/openshift_master/tasks/main.yml24
-rw-r--r--roles/openshift_master/tasks/set_loopback_context.yml4
-rw-r--r--roles/openshift_master/tasks/systemd_units.yml2
-rw-r--r--roles/openshift_master/tasks/upgrade/rpm_upgrade.yml2
-rw-r--r--roles/openshift_master/vars/main.yml41
-rw-r--r--roles/openshift_metrics/tasks/install_hawkular.yaml6
-rw-r--r--roles/openshift_nfs/tasks/create_export.yml2
-rw-r--r--roles/openshift_nfs/tasks/setup.yml4
-rw-r--r--roles/openshift_node/handlers/main.yml4
-rw-r--r--roles/openshift_node/tasks/bootstrap.yml2
-rw-r--r--roles/openshift_node/tasks/config.yml12
-rw-r--r--roles/openshift_node/tasks/dnsmasq/no-network-manager.yml2
-rw-r--r--roles/openshift_node/tasks/dnsmasq_install.yml2
-rw-r--r--roles/openshift_node/tasks/install.yml6
-rw-r--r--roles/openshift_node/tasks/main.yml4
-rw-r--r--roles/openshift_node/tasks/storage_plugins/ceph.yml2
-rw-r--r--roles/openshift_node/tasks/storage_plugins/glusterfs.yml6
-rw-r--r--roles/openshift_node/tasks/storage_plugins/iscsi.yml2
-rw-r--r--roles/openshift_node/tasks/storage_plugins/nfs.yml6
-rw-r--r--roles/openshift_node/tasks/upgrade.yml2
-rw-r--r--roles/openshift_node/tasks/upgrade/config_changes.yml2
-rw-r--r--roles/openshift_node/tasks/upgrade/restart.yml2
-rw-r--r--roles/openshift_node/tasks/upgrade/rpm_upgrade.yml4
-rw-r--r--roles/openshift_node/tasks/upgrade/rpm_upgrade_install.yml2
-rw-r--r--roles/openshift_node/tasks/upgrade/stop_services.yml2
-rw-r--r--roles/openshift_node/tasks/upgrade_pre.yml4
-rw-r--r--roles/openshift_node/templates/node.yaml.v1.j24
-rw-r--r--roles/openshift_node_certificates/handlers/main.yml2
-rw-r--r--roles/openshift_openstack/tasks/container-storage-setup.yml4
-rw-r--r--roles/openshift_openstack/tasks/node-packages.yml4
-rw-r--r--roles/openshift_openstack/tasks/populate-dns.yml2
-rw-r--r--roles/openshift_repos/tasks/main.yaml9
-rw-r--r--roles/openshift_repos/tasks/rhel_repos.yml34
-rw-r--r--roles/openshift_sanitize_inventory/tasks/main.yml2
-rw-r--r--roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml2
-rw-r--r--roles/openshift_storage_glusterfs/tasks/kernel_modules.yml2
-rw-r--r--roles/openshift_storage_nfs/tasks/main.yml6
-rw-r--r--roles/openshift_storage_nfs_lvm/tasks/nfs.yml2
-rw-r--r--roles/openshift_version/tasks/main.yml4
-rw-r--r--roles/os_firewall/tasks/firewalld.yml12
-rw-r--r--roles/os_firewall/tasks/iptables.yml10
-rw-r--r--roles/os_update_latest/tasks/main.yml2
-rw-r--r--roles/rhel_subscribe/tasks/enterprise.yml18
-rw-r--r--roles/rhel_subscribe/tasks/main.yml68
-rw-r--r--roles/rhel_subscribe/tasks/satellite.yml5
-rw-r--r--roles/template_service_broker/defaults/main.yml1
-rw-r--r--roles/template_service_broker/tasks/install.yml3
117 files changed, 518 insertions, 312 deletions
diff --git a/.tito/packages/openshift-ansible b/.tito/packages/openshift-ansible
index 4924f050b..1ca23082d 100644
--- a/.tito/packages/openshift-ansible
+++ b/.tito/packages/openshift-ansible
@@ -1 +1 @@
-3.9.0-0.9.0 ./
+3.9.0-0.10.0 ./
diff --git a/callback_plugins/aa_version_requirement.py b/callback_plugins/aa_version_requirement.py
index 110b3d673..1093acdae 100644
--- a/callback_plugins/aa_version_requirement.py
+++ b/callback_plugins/aa_version_requirement.py
@@ -29,7 +29,7 @@ else:
# Set to minimum required Ansible version
-REQUIRED_VERSION = '2.4.0.0'
+REQUIRED_VERSION = '2.4.1.0'
DESCRIPTION = "Supported versions: %s or newer" % REQUIRED_VERSION
diff --git a/files/origin-components/apiserver-template.yaml b/files/origin-components/apiserver-template.yaml
index 1b42597af..035e4734b 100644
--- a/files/origin-components/apiserver-template.yaml
+++ b/files/origin-components/apiserver-template.yaml
@@ -15,6 +15,8 @@ parameters:
apiVersion: config.templateservicebroker.openshift.io/v1
templateNamespaces:
- openshift
+- name: NODE_SELECTOR
+ value: "{}"
objects:
# to create the tsb server
@@ -59,6 +61,7 @@ objects:
path: /healthz
port: 8443
scheme: HTTPS
+ nodeSelector: "${{NODE_SELECTOR}}"
volumes:
- name: serving-cert
secret:
diff --git a/openshift-ansible.spec b/openshift-ansible.spec
index 943931572..0d5964dda 100644
--- a/openshift-ansible.spec
+++ b/openshift-ansible.spec
@@ -10,7 +10,7 @@
Name: openshift-ansible
Version: 3.9.0
-Release: 0.9.0%{?dist}
+Release: 0.10.0%{?dist}
Summary: Openshift and Atomic Enterprise Ansible
License: ASL 2.0
URL: https://github.com/openshift/openshift-ansible
@@ -285,6 +285,16 @@ Atomic OpenShift Utilities includes
%changelog
+* Thu Dec 14 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.9.0-0.10.0
+- Bump requirements.txt to Ansible 2.4.1 (rteague@redhat.com)
+- Commit to stabalize RHSM operations. This code is derived from contrib
+ (mazzystr@gmail.com)
+- Contiv systemd fixes (flamingo@2thebatcave.com)
+- Combine openshift_master/vars with defaults (mgugino@redhat.com)
+- crio: change socket path to /var/run/crio/crio.sock (gscrivan@redhat.com)
+- Remove version requirement from openvswitch package, since listed version got
+ removed from repo (riffraff@hobbes.alephone.org)
+
* Thu Dec 14 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.9.0-0.9.0
- etcd: use Fedora /latest/ instead of hardcoding the version
(gscrivan@redhat.com)
diff --git a/playbooks/adhoc/uninstall.yml b/playbooks/adhoc/uninstall.yml
index 9f044c089..584117e6b 100644
--- a/playbooks/adhoc/uninstall.yml
+++ b/playbooks/adhoc/uninstall.yml
@@ -126,13 +126,13 @@
- tuned-profiles-atomic-openshift-node
- tuned-profiles-origin-node
register: result
- until: result | success
+ until: result is succeeded
- name: Remove flannel package
package: name=flannel state=absent
when: openshift_use_flannel | default(false) | bool
register: result
- until: result | success
+ until: result is succeeded
when: not is_atomic | bool
- shell: systemctl reset-failed
@@ -286,9 +286,9 @@
- name: restart docker
service: name=docker state=stopped enabled=no
failed_when: false
- when: not (container_engine | changed)
+ when: not (container_engine is changed)
register: l_docker_restart_docker_in_pb_result
- until: not l_docker_restart_docker_in_pb_result | failed
+ until: not (l_docker_restart_docker_in_pb_result is failed)
retries: 3
delay: 30
@@ -384,7 +384,7 @@
- origin-docker-excluder
- origin-master
register: result
- until: result | success
+ until: result is succeeded
- shell: systemctl reset-failed
changed_when: False
@@ -499,7 +499,7 @@
- etcd
- etcd3
register: result
- until: result | success
+ until: result is succeeded
- shell: systemctl reset-failed
changed_when: False
@@ -558,7 +558,7 @@
with_items:
- haproxy
register: result
- until: result | success
+ until: result is succeeded
- shell: systemctl reset-failed
changed_when: False
diff --git a/playbooks/byo/rhel_subscribe.yml b/playbooks/byo/rhel_subscribe.yml
index 5a877809a..dc9d0a139 100644
--- a/playbooks/byo/rhel_subscribe.yml
+++ b/playbooks/byo/rhel_subscribe.yml
@@ -8,6 +8,7 @@
when:
- deployment_type == 'openshift-enterprise'
- ansible_distribution == "RedHat"
- - lookup('env', 'rhel_skip_subscription') | default(rhsub_skip, True) | default('no', True) | lower in ['no', 'false']
+ - rhsub_user is defined
+ - rhsub_pass is defined
- role: openshift_repos
- role: os_update_latest
diff --git a/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml
index 38aa9df47..42cd51bd9 100644
--- a/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml
@@ -46,7 +46,7 @@
retries: 10
delay: 5
register: node_unschedulable
- until: node_unschedulable|succeeded
+ until: node_unschedulable is succeeded
when:
- l_docker_upgrade is defined
- l_docker_upgrade | bool
@@ -58,7 +58,7 @@
delegate_to: "{{ groups.oo_first_master.0 }}"
when: l_docker_upgrade is defined and l_docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_upgrade
register: l_docker_upgrade_drain_result
- until: not l_docker_upgrade_drain_result | failed
+ until: not (l_docker_upgrade_drain_result is failed)
retries: 60
delay: 60
@@ -73,5 +73,5 @@
retries: 10
delay: 5
register: node_schedulable
- until: node_schedulable|succeeded
- when: node_unschedulable|changed
+ until: node_schedulable is succeeded
+ when: node_unschedulable is changed
diff --git a/playbooks/common/openshift-cluster/upgrades/docker/tasks/restart.yml b/playbooks/common/openshift-cluster/upgrades/docker/tasks/restart.yml
index dbc4f39c7..385a141ea 100644
--- a/playbooks/common/openshift-cluster/upgrades/docker/tasks/restart.yml
+++ b/playbooks/common/openshift-cluster/upgrades/docker/tasks/restart.yml
@@ -2,7 +2,7 @@
- name: Restart docker
service: name=docker state=restarted
register: l_docker_restart_docker_in_upgrade_result
- until: not l_docker_restart_docker_in_upgrade_result | failed
+ until: not (l_docker_restart_docker_in_upgrade_result is failed)
retries: 3
delay: 30
diff --git a/playbooks/common/openshift-cluster/upgrades/docker/tasks/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/docker/tasks/upgrade.yml
index 4856a4b51..b5000d3a1 100644
--- a/playbooks/common/openshift-cluster/upgrades/docker/tasks/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/docker/tasks/upgrade.yml
@@ -35,14 +35,14 @@
name: docker
state: stopped
register: l_pb_docker_upgrade_stop_result
- until: not l_pb_docker_upgrade_stop_result | failed
+ until: not (l_pb_docker_upgrade_stop_result is failed)
retries: 3
delay: 30
- name: Upgrade Docker
package: name=docker{{ '-' + docker_version }} state=present
register: result
- until: result | success
+ until: result is succeeded
- include_tasks: restart.yml
when: not skip_docker_restart | default(False) | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml
index 344ddea3c..50df8a890 100644
--- a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml
@@ -126,7 +126,7 @@
debug:
msg: "WARNING pluginOrderOverride is being deprecated in master-config.yaml, please see https://docs.openshift.com/enterprise/latest/architecture/additional_concepts/admission_controllers.html for more information."
when:
- - not grep_plugin_order_override | skipped
+ - not (grep_plugin_order_override is skipped)
- grep_plugin_order_override.rc == 0
- name: Warn if shared-resource-viewer could not be updated
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_cluster.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_cluster.yml
index 2ab9f852c..3fc18c9b7 100644
--- a/playbooks/common/openshift-cluster/upgrades/pre/verify_cluster.yml
+++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_cluster.yml
@@ -20,13 +20,17 @@
msg: >
openshift_pkg_version is {{ openshift_pkg_version }} which is not a
valid version for a {{ openshift_upgrade_target }} upgrade
- when: openshift_pkg_version is defined and openshift_pkg_version.split('-',1).1 | version_compare(openshift_upgrade_target ,'<')
+ when:
+ - openshift_pkg_version is defined
+ - openshift_pkg_version.split('-',1).1 is version_compare(openshift_upgrade_target ,'<')
- fail:
msg: >
openshift_image_tag is {{ openshift_image_tag }} which is not a
valid version for a {{ openshift_upgrade_target }} upgrade
- when: openshift_image_tag is defined and openshift_image_tag.split('v',1).1 | version_compare(openshift_upgrade_target ,'<')
+ when:
+ - openshift_image_tag is defined
+ - openshift_image_tag.split('v',1).1 is version_compare(openshift_upgrade_target ,'<')
- set_fact:
openshift_release: "{{ openshift_release[1:] }}"
@@ -36,7 +40,9 @@
msg: >
openshift_release is {{ openshift_release }} which is not a
valid release for a {{ openshift_upgrade_target }} upgrade
- when: openshift_release is defined and not openshift_release | version_compare(openshift_upgrade_target ,'=')
+ when:
+ - openshift_release is defined
+ - not (openshift_release is version_compare(openshift_upgrade_target ,'='))
- name: Verify master processes
hosts: oo_masters_to_config
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
index 96f970506..065a9a8ab 100644
--- a/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
+++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
@@ -43,11 +43,11 @@
fail:
msg: "OpenShift {{ avail_openshift_version }} is available, but {{ openshift_upgrade_target }} or greater is required"
when:
- - (openshift_pkg_version | default('-0.0', True)).split('-')[1] | version_compare(openshift_release, '<')
+ - (openshift_pkg_version | default('-0.0', True)).split('-')[1] is version_compare(openshift_release, '<')
- name: Fail when openshift version does not meet minium requirement for Origin upgrade
fail:
msg: "This upgrade playbook must be run against OpenShift {{ openshift_upgrade_min }} or later"
when:
- deployment_type == 'origin'
- - openshift.common.version | version_compare(openshift_upgrade_min,'<')
+ - openshift.common.version is version_compare(openshift_upgrade_min,'<')
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
index 7b82fe05b..44724e979 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
@@ -91,7 +91,7 @@
register: l_pb_upgrade_control_plane_post_upgrade_storage
when:
- openshift_upgrade_post_storage_migration_enabled | default(true) | bool
- - openshift_version | version_compare('3.7','<')
+ - openshift_version is version_compare('3.7','<')
failed_when:
- openshift_upgrade_post_storage_migration_enabled | default(true) | bool
- l_pb_upgrade_control_plane_post_upgrade_storage.rc != 0
@@ -136,7 +136,7 @@
{{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig
policy reconcile-cluster-roles --additive-only=true --confirm -o name
register: reconcile_cluster_role_result
- when: openshift_version | version_compare('3.7','<')
+ when: openshift_version is version_compare('3.7','<')
changed_when:
- reconcile_cluster_role_result.stdout != ''
- reconcile_cluster_role_result.rc == 0
@@ -151,7 +151,7 @@
--exclude-groups=system:unauthenticated
--exclude-users=system:anonymous
--additive-only=true --confirm -o name
- when: openshift_version | version_compare('3.7','<')
+ when: openshift_version is version_compare('3.7','<')
register: reconcile_bindings_result
changed_when:
- reconcile_bindings_result.stdout != ''
@@ -167,9 +167,9 @@
- reconcile_jenkins_role_binding_result.stdout != ''
- reconcile_jenkins_role_binding_result.rc == 0
when:
- - openshift_version | version_compare('3.7','<')
+ - openshift_version is version_compare('3.7','<')
- - when: openshift_upgrade_target | version_compare('3.7','<')
+ - when: openshift_upgrade_target is version_compare('3.7','<')
block:
- name: Retrieve shared-resource-viewer
oc_obj:
@@ -287,14 +287,14 @@
retries: 10
delay: 5
register: node_unschedulable
- until: node_unschedulable|succeeded
+ until: node_unschedulable is succeeded
- name: Drain Node for Kubelet upgrade
command: >
{{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm drain {{ openshift.node.nodename | lower }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets
delegate_to: "{{ groups.oo_first_master.0 }}"
register: l_upgrade_control_plane_drain_result
- until: not l_upgrade_control_plane_drain_result | failed
+ until: not (l_upgrade_control_plane_drain_result is failed)
retries: 60
delay: 60
@@ -314,5 +314,5 @@
retries: 10
delay: 5
register: node_schedulable
- until: node_schedulable|succeeded
- when: node_unschedulable|changed
+ until: node_schedulable is succeeded
+ when: node_unschedulable is changed
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
index a3cb1d0f9..956ad0d53 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
@@ -31,14 +31,14 @@
retries: 10
delay: 5
register: node_unschedulable
- until: node_unschedulable|succeeded
+ until: node_unschedulable is succeeded
- name: Drain Node for Kubelet upgrade
command: >
{{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm drain {{ openshift.node.nodename | lower }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets
delegate_to: "{{ groups.oo_first_master.0 }}"
register: l_upgrade_nodes_drain_result
- until: not l_upgrade_nodes_drain_result | failed
+ until: not (l_upgrade_nodes_drain_result is failed)
retries: 60
delay: 60
@@ -56,8 +56,8 @@
retries: 10
delay: 5
register: node_schedulable
- until: node_schedulable|succeeded
- when: node_unschedulable|changed
+ until: node_schedulable is succeeded
+ when: node_unschedulable is changed
- name: Re-enable excluders
hosts: oo_nodes_to_upgrade:!oo_masters_to_config
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml
index 4fc897a57..e8c0f361a 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml
@@ -32,7 +32,7 @@
retries: 10
delay: 5
register: node_unschedulable
- until: node_unschedulable|succeeded
+ until: node_unschedulable is succeeded
- name: Drain nodes
hosts: oo_sg_current_nodes
@@ -49,11 +49,11 @@
--timeout={{ openshift_upgrade_nodes_drain_timeout | default(0) }}s
delegate_to: "{{ groups.oo_first_master.0 }}"
register: l_upgrade_nodes_drain_result
- until: not l_upgrade_nodes_drain_result | failed
+ until: not (l_upgrade_nodes_drain_result is failed)
retries: "{{ 1 if openshift_upgrade_nodes_drain_timeout | default(0) == '0' else 0 | int }}"
delay: 5
failed_when:
- - l_upgrade_nodes_drain_result | failed
+ - l_upgrade_nodes_drain_result is failed
- openshift_upgrade_nodes_drain_timeout | default(0) == '0'
# Alright, let's clean up!
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml
index 74d0cd8ad..c8c87a9c3 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml
@@ -16,7 +16,7 @@
command: >
{{ openshift.common.client_binary }} adm migrate authorization
when:
- - openshift_currently_installed_version | version_compare('3.7','<')
+ - openshift_currently_installed_version is version_compare('3.7','<')
- openshift_upgrade_pre_authorization_migration_enabled | default(true) | bool
changed_when: false
register: l_oc_result
diff --git a/playbooks/init/facts.yml b/playbooks/init/facts.yml
index d41f365dc..4d40e472c 100644
--- a/playbooks/init/facts.yml
+++ b/playbooks/init/facts.yml
@@ -69,7 +69,7 @@
- name: assert atomic host docker version is 1.12 or later
assert:
that:
- - l_atomic_docker_version.stdout | replace('"', '') | version_compare('1.12','>=')
+ - l_atomic_docker_version.stdout | replace('"', '') is version_compare('1.12','>=')
msg: Installation on Atomic Host requires Docker 1.12 or later. Please upgrade and restart the Atomic Host.
- when:
@@ -85,7 +85,7 @@
- "{{ 'python3-PyYAML' if ansible_distribution == 'Fedora' else 'PyYAML' }}"
- yum-utils
register: result
- until: result | success
+ until: result is succeeded
- name: Ensure various deps for running system containers are installed
package:
@@ -103,7 +103,7 @@
or (openshift_use_node_system_container | default(False)) | bool
or (openshift_use_master_system_container | default(False)) | bool
register: result
- until: result | success
+ until: result is succeeded
- name: Gather Cluster facts and set is_containerized if needed
openshift_facts:
diff --git a/playbooks/init/main.yml b/playbooks/init/main.yml
index b2b972a7d..06e8ba504 100644
--- a/playbooks/init/main.yml
+++ b/playbooks/init/main.yml
@@ -23,8 +23,6 @@
- import_playbook: validate_hostnames.yml
when: not (skip_validate_hostnames | default(False))
-- import_playbook: repos.yml
-
- import_playbook: version.yml
when: not (skip_verison | default(False))
diff --git a/playbooks/init/repos.yml b/playbooks/init/repos.yml
index a7114fc80..66786a41a 100644
--- a/playbooks/init/repos.yml
+++ b/playbooks/init/repos.yml
@@ -3,6 +3,14 @@
hosts: oo_all_hosts
gather_facts: no
tasks:
+ - name: subscribe instances to Red Hat Subscription Manager
+ include_role:
+ name: rhel_subscribe
+ when:
+ - ansible_distribution == 'RedHat'
+ - deployment_type == 'openshift-enterprise'
+ - rhsub_user is defined
+ - rhsub_pass is defined
- name: initialize openshift repos
include_role:
name: openshift_repos
diff --git a/playbooks/init/validate_hostnames.yml b/playbooks/init/validate_hostnames.yml
index be2e6a15a..86e0b2416 100644
--- a/playbooks/init/validate_hostnames.yml
+++ b/playbooks/init/validate_hostnames.yml
@@ -1,6 +1,7 @@
---
- name: Validate node hostnames
hosts: oo_nodes_to_config
+ any_errors_fatal: true
tasks:
- name: Query DNS for IP address of {{ openshift.common.hostname }}
shell:
@@ -8,16 +9,35 @@
register: lookupip
changed_when: false
failed_when: false
- - name: Warn user about bad openshift_hostname values
- pause:
- prompt:
+
+ - name: Validate openshift_hostname when defined
+ fail:
+ msg: >
The hostname {{ openshift.common.hostname }} for {{ ansible_nodename }}
doesn't resolve to an IP address owned by this host. Please set
openshift_hostname variable to a hostname that when resolved on the host
- in question resolves to an IP address matching an interface on this
- host. This host will fail liveness checks for pods utilizing hostPorts,
- press ENTER to continue or CTRL-C to abort.
- seconds: "{{ 10 if openshift_override_hostname_check | default(false) | bool else omit }}"
+ in question resolves to an IP address matching an interface on this host.
+ This will ensure proper functionality of OpenShift networking features.
+ Inventory setting: openshift_hostname={{ openshift_hostname }}
+ This check can be overridden by setting openshift_hostname_check=false in
+ the inventory.
+ See https://docs.openshift.org/latest/install_config/install/advanced_install.html#configuring-host-variables
when:
- lookupip.stdout != '127.0.0.1'
- lookupip.stdout not in ansible_all_ipv4_addresses
+ - openshift_hostname_check | default(true)
+
+ - name: Validate openshift_ip exists on node when defined
+ fail:
+ msg: >
+ The IP address {{ openshift_ip }} does not exist on {{ ansible_nodename }}.
+ Please set the openshift_ip variable to an IP address of this node.
+ This will ensure proper functionality of OpenShift networking features.
+ Inventory setting: openshift_ip={{ openshift_ip }}
+ This check can be overridden by setting openshift_ip_check=false in
+ the inventory.
+ See https://docs.openshift.org/latest/install_config/install/advanced_install.html#configuring-host-variables
+ when:
+ - openshift_ip is defined
+ - openshift_ip not in ansible_all_ipv4_addresses
+ - openshift_ip_check | default(true)
diff --git a/playbooks/openshift-etcd/private/upgrade_image_members.yml b/playbooks/openshift-etcd/private/upgrade_image_members.yml
index c133c0201..339fc6b74 100644
--- a/playbooks/openshift-etcd/private/upgrade_image_members.yml
+++ b/playbooks/openshift-etcd/private/upgrade_image_members.yml
@@ -13,5 +13,5 @@
r_etcd_upgrade_version: "{{ etcd_upgrade_version }}"
etcd_peer: "{{ openshift.common.hostname }}"
when:
- - etcd_container_version | default('99') | version_compare(etcd_upgrade_version,'<')
+ - etcd_container_version | default('99') is version_compare(etcd_upgrade_version,'<')
- openshift.common.is_containerized | bool
diff --git a/playbooks/openshift-etcd/private/upgrade_rpm_members.yml b/playbooks/openshift-etcd/private/upgrade_rpm_members.yml
index 902c39d9c..327a35b09 100644
--- a/playbooks/openshift-etcd/private/upgrade_rpm_members.yml
+++ b/playbooks/openshift-etcd/private/upgrade_rpm_members.yml
@@ -13,6 +13,6 @@
r_etcd_upgrade_version: "{{ etcd_upgrade_version }}"
etcd_peer: "{{ openshift.common.hostname }}"
when:
- - etcd_rpm_version.stdout | default('99') | version_compare(etcd_upgrade_version, '<')
+ - etcd_rpm_version.stdout | default('99') is version_compare(etcd_upgrade_version, '<')
- ansible_distribution == 'RedHat'
- not openshift.common.is_containerized | bool
diff --git a/playbooks/openshift-node/private/network_manager.yml b/playbooks/openshift-node/private/network_manager.yml
index 39640345f..2638c5223 100644
--- a/playbooks/openshift-node/private/network_manager.yml
+++ b/playbooks/openshift-node/private/network_manager.yml
@@ -8,7 +8,7 @@
name: 'NetworkManager'
state: present
register: result
- until: result | success
+ until: result is succeeded
- name: configure NetworkManager
lineinfile:
diff --git a/playbooks/openshift-node/private/restart.yml b/playbooks/openshift-node/private/restart.yml
index 0786bd7d3..c2092b23c 100644
--- a/playbooks/openshift-node/private/restart.yml
+++ b/playbooks/openshift-node/private/restart.yml
@@ -5,6 +5,7 @@
roles:
- lib_openshift
+ - openshift_facts
tasks:
- name: Restart docker
@@ -12,7 +13,7 @@
name: docker
state: restarted
register: l_docker_restart_docker_in_node_result
- until: not l_docker_restart_docker_in_node_result | failed
+ until: not (l_docker_restart_docker_in_node_result is failed)
retries: 3
delay: 30
diff --git a/playbooks/openstack/openshift-cluster/provision.yml b/playbooks/openstack/openshift-cluster/provision.yml
index 583e72b51..0a69952df 100644
--- a/playbooks/openstack/openshift-cluster/provision.yml
+++ b/playbooks/openstack/openshift-cluster/provision.yml
@@ -51,8 +51,8 @@
- role: rhel_subscribe
when:
- ansible_distribution == "RedHat"
- - rhsub_user | default(False)
- - rhsub_pass | default(False)
+ - rhsub_user is defined
+ - rhsub_pass is defined
tasks:
- name: Install dependencies
diff --git a/playbooks/prerequisites.yml b/playbooks/prerequisites.yml
index 7b7868cfe..5ba62a6d6 100644
--- a/playbooks/prerequisites.yml
+++ b/playbooks/prerequisites.yml
@@ -3,6 +3,8 @@
vars:
skip_verison: True
+- import_playbook: init/repos.yml
+
# This is required for container runtime for crio, only needs to run once.
- name: Configure os_firewall
hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config:oo_nfs_to_config:oo_nodes_to_config
diff --git a/requirements.txt b/requirements.txt
index be1bde18e..67cdaaff5 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,6 +1,6 @@
# Versions are pinned to prevent pypi releases arbitrarily breaking
# tests with new APIs/semantics. We want to update versions deliberately.
-ansible==2.4.0.0
+ansible==2.4.1.0
boto==2.34.0
click==6.7
pyOpenSSL==16.2.0
diff --git a/roles/calico/handlers/main.yml b/roles/calico/handlers/main.yml
index 9cc0604a3..ed484c0dd 100644
--- a/roles/calico/handlers/main.yml
+++ b/roles/calico/handlers/main.yml
@@ -9,6 +9,6 @@
name: "{{ openshift_docker_service_name }}"
state: restarted
register: l_docker_restart_docker_in_calico_result
- until: not l_docker_restart_docker_in_calico_result | failed
+ until: not (l_docker_restart_docker_in_calico_result is failed)
retries: 3
delay: 30
diff --git a/roles/cockpit/tasks/main.yml b/roles/cockpit/tasks/main.yml
index f63b3e49b..fc13afed3 100644
--- a/roles/cockpit/tasks/main.yml
+++ b/roles/cockpit/tasks/main.yml
@@ -12,7 +12,7 @@
- "{{ cockpit_plugins }}"
when: not openshift.common.is_containerized | bool
register: result
- until: result | success
+ until: result is succeeded
- name: Enable cockpit-ws
systemd:
diff --git a/roles/container_runtime/handlers/main.yml b/roles/container_runtime/handlers/main.yml
index 67cd6d782..87c5de0e9 100644
--- a/roles/container_runtime/handlers/main.yml
+++ b/roles/container_runtime/handlers/main.yml
@@ -6,7 +6,7 @@
state: restarted
daemon_reload: yes
register: r_docker_restart_docker_result
- until: not r_docker_restart_docker_result | failed
+ until: not (r_docker_restart_docker_result is failed)
retries: 3
delay: 30
when: not docker_service_status_changed | default(false) | bool
diff --git a/roles/container_runtime/tasks/common/syscontainer_packages.yml b/roles/container_runtime/tasks/common/syscontainer_packages.yml
index 715ed492d..b41122880 100644
--- a/roles/container_runtime/tasks/common/syscontainer_packages.yml
+++ b/roles/container_runtime/tasks/common/syscontainer_packages.yml
@@ -6,7 +6,7 @@
state: present
when: not openshift.common.is_atomic | bool
register: result
- until: result | success
+ until: result is succeeded
# Used to pull and install the system container
- name: Ensure atomic is installed
@@ -15,7 +15,7 @@
state: present
when: not openshift.common.is_atomic | bool
register: result
- until: result | success
+ until: result is succeeded
# At the time of writing the atomic command requires runc for it's own use. This
# task is here in the even that the atomic package ever removes the dependency.
@@ -25,4 +25,4 @@
state: present
when: not openshift.common.is_atomic | bool
register: result
- until: result | success
+ until: result is succeeded
diff --git a/roles/container_runtime/tasks/docker_sanity.yml b/roles/container_runtime/tasks/docker_sanity.yml
index e62cf5505..bc4da1cce 100644
--- a/roles/container_runtime/tasks/docker_sanity.yml
+++ b/roles/container_runtime/tasks/docker_sanity.yml
@@ -5,23 +5,38 @@
- name: Error out if Docker pre-installed but too old
fail:
msg: "Docker {{ curr_docker_version.stdout }} is installed, but >= 1.9.1 is required."
- when: not curr_docker_version | skipped and curr_docker_version.stdout != '' and curr_docker_version.stdout | version_compare('1.9.1', '<') and not docker_version is defined
+ when:
+ - not (curr_docker_version is skipped)
+ - curr_docker_version.stdout != ''
+ - curr_docker_version.stdout is version_compare('1.9.1', '<')
+ - not (docker_version is defined)
- name: Error out if requested Docker is too old
fail:
msg: "Docker {{ docker_version }} requested, but >= 1.9.1 is required."
- when: docker_version is defined and docker_version | version_compare('1.9.1', '<')
+ when:
+ - docker_version is defined
+ - docker_version is version_compare('1.9.1', '<')
# If a docker_version was requested, sanity check that we can install or upgrade to it, and
# no downgrade is required.
- name: Fail if Docker version requested but downgrade is required
fail:
msg: "Docker {{ curr_docker_version.stdout }} is installed, but version {{ docker_version }} was requested."
- when: not curr_docker_version | skipped and curr_docker_version.stdout != '' and docker_version is defined and curr_docker_version.stdout | version_compare(docker_version, '>')
+ when:
+ - not (curr_docker_version is skipped)
+ - curr_docker_version.stdout != ''
+ - docker_version is defined
+ - curr_docker_version.stdout is version_compare(docker_version, '>')
# This involves an extremely slow migration process, users should instead run the
# Docker 1.10 upgrade playbook to accomplish this.
- name: Error out if attempting to upgrade Docker across the 1.10 boundary
fail:
msg: "Cannot upgrade Docker to >= 1.10, please upgrade or remove Docker manually, or use the Docker upgrade playbook if OpenShift is already installed."
- when: not curr_docker_version | skipped and curr_docker_version.stdout != '' and curr_docker_version.stdout | version_compare('1.10', '<') and docker_version is defined and docker_version | version_compare('1.10', '>=')
+ when:
+ - not (curr_docker_version is skipped)
+ - curr_docker_version.stdout != ''
+ - curr_docker_version.stdout is version_compare('1.10', '<')
+ - docker_version is defined
+ - docker_version is version_compare('1.10', '>=')
diff --git a/roles/container_runtime/tasks/docker_upgrade_check.yml b/roles/container_runtime/tasks/docker_upgrade_check.yml
index f29619f42..6731963dd 100644
--- a/roles/container_runtime/tasks/docker_upgrade_check.yml
+++ b/roles/container_runtime/tasks/docker_upgrade_check.yml
@@ -19,7 +19,7 @@
command: "{{ repoquery_installed }} --qf '%{version}' docker"
register: curr_docker_version
retries: 4
- until: curr_docker_version | succeeded
+ until: curr_docker_version is succeeded
changed_when: false
- name: Get latest available version of Docker
@@ -27,7 +27,7 @@
{{ repoquery_cmd }} --qf '%{version}' "docker"
register: avail_docker_version
retries: 4
- until: avail_docker_version | succeeded
+ until: avail_docker_version is succeeded
# Don't expect docker rpm to be available on hosts that don't already have it installed:
when: pkg_check.rc == 0
failed_when: false
@@ -36,7 +36,10 @@
- fail:
msg: This playbook requires access to Docker 1.12 or later
# Disable the 1.12 requirement if the user set a specific Docker version
- when: docker_version is not defined and (docker_upgrade is not defined or docker_upgrade | bool == True) and (pkg_check.rc == 0 and (avail_docker_version.stdout == "" or avail_docker_version.stdout | version_compare('1.12','<')))
+ when:
+ - docker_version is not defined
+ - docker_upgrade is not defined or docker_upgrade | bool == True
+ - (pkg_check.rc == 0 and (avail_docker_version.stdout == "" or avail_docker_version.stdout is version_compare('1.12','<')))
# Default l_docker_upgrade to False, we'll set to True if an upgrade is required:
- set_fact:
@@ -50,7 +53,9 @@
- name: Flag for Docker upgrade if necessary
set_fact:
l_docker_upgrade: True
- when: pkg_check.rc == 0 and curr_docker_version.stdout | version_compare(docker_version,'<')
+ when:
+ - pkg_check.rc == 0
+ - curr_docker_version.stdout is version_compare(docker_version,'<')
# Additional checks for Atomic hosts:
- name: Determine available Docker
@@ -64,4 +69,6 @@
- fail:
msg: This playbook requires access to Docker 1.12 or later
- when: openshift.common.is_atomic | bool and l_docker_version.avail_version | default(l_docker_version.curr_version, true) | version_compare('1.12','<')
+ when:
+ - openshift.common.is_atomic | bool
+ - l_docker_version.avail_version | default(l_docker_version.curr_version, true) is version_compare('1.12','<')
diff --git a/roles/container_runtime/tasks/package_docker.yml b/roles/container_runtime/tasks/package_docker.yml
index 89899c9cf..d9d4037dd 100644
--- a/roles/container_runtime/tasks/package_docker.yml
+++ b/roles/container_runtime/tasks/package_docker.yml
@@ -6,7 +6,7 @@
when: not openshift.common.is_atomic | bool
register: curr_docker_version
retries: 4
- until: curr_docker_version | succeeded
+ until: curr_docker_version is succeeded
changed_when: false
# Some basic checks to ensure the role will complete
@@ -19,9 +19,12 @@
package:
name: "docker{{ '-' + docker_version if docker_version is defined else '' }}"
state: present
- when: not openshift.common.is_atomic | bool and not curr_docker_version | skipped and not curr_docker_version.stdout != ''
+ when:
+ - not (openshift.common.is_atomic | bool)
+ - not (curr_docker_version is skipped)
+ - not (curr_docker_version.stdout != '')
register: result
- until: result | success
+ until: result is succeeded
- block:
# Extend the default Docker service unit file when using iptables-services
@@ -137,11 +140,11 @@
state: started
daemon_reload: yes
register: r_docker_package_docker_start_result
- until: not r_docker_package_docker_start_result | failed
+ until: not (r_docker_package_docker_start_result is failed)
retries: 3
delay: 30
- set_fact:
- docker_service_status_changed: "{{ (r_docker_package_docker_start_result | changed) and (r_docker_already_running_result.stdout != 'ActiveState=active' ) }}"
+ docker_service_status_changed: "{{ (r_docker_package_docker_start_result is changed) and (r_docker_already_running_result.stdout != 'ActiveState=active' ) }}"
- include_tasks: common/post.yml
diff --git a/roles/container_runtime/tasks/systemcontainer_docker.yml b/roles/container_runtime/tasks/systemcontainer_docker.yml
index 10570fe34..639585367 100644
--- a/roles/container_runtime/tasks/systemcontainer_docker.yml
+++ b/roles/container_runtime/tasks/systemcontainer_docker.yml
@@ -20,7 +20,7 @@
package: name=docker{{ '-' + docker_version if docker_version is defined else '' }} state=present
when: not openshift.common.is_atomic | bool
register: result
- until: result | success
+ until: result is succeeded
# Make sure docker is disabled. Errors are ignored.
- name: Disable Docker
@@ -31,7 +31,7 @@
daemon_reload: yes
ignore_errors: True
register: r_docker_systemcontainer_docker_stop_result
- until: not r_docker_systemcontainer_docker_stop_result | failed
+ until: not (r_docker_systemcontainer_docker_stop_result is failed)
retries: 3
delay: 30
@@ -87,12 +87,12 @@
state: started
daemon_reload: yes
register: r_docker_systemcontainer_docker_start_result
- until: not r_docker_systemcontainer_docker_start_result | failed
+ until: not (r_docker_systemcontainer_docker_start_result is failed)
retries: 3
delay: 30
- set_fact:
- docker_service_status_changed: "{{ r_docker_systemcontainer_docker_start_result | changed }}"
+ docker_service_status_changed: "{{ r_docker_systemcontainer_docker_start_result is changed }}"
# Since docker is running as a system container, docker login will fail to create
# credentials. Use alternate method if requiring authenticated registries.
diff --git a/roles/container_runtime/templates/crio.conf.j2 b/roles/container_runtime/templates/crio.conf.j2
index 3f066a17f..0a1ff2e0a 100644
--- a/roles/container_runtime/templates/crio.conf.j2
+++ b/roles/container_runtime/templates/crio.conf.j2
@@ -27,7 +27,7 @@ storage_option = [
[crio.api]
# listen is the path to the AF_LOCAL socket on which crio will listen.
-listen = "/var/run/crio.sock"
+listen = "/var/run/crio/crio.sock"
# stream_address is the IP address on which the stream server will listen
stream_address = ""
diff --git a/roles/contiv/tasks/download_bins.yml b/roles/contiv/tasks/download_bins.yml
index 741c1d1da..831fd360a 100644
--- a/roles/contiv/tasks/download_bins.yml
+++ b/roles/contiv/tasks/download_bins.yml
@@ -9,7 +9,7 @@
name: bzip2
state: installed
register: result
- until: result | success
+ until: result is succeeded
- name: Download Bins | Download Contiv tar file
get_url:
diff --git a/roles/contiv/tasks/netplugin.yml b/roles/contiv/tasks/netplugin.yml
index cf92a8cc0..540f6e4bc 100644
--- a/roles/contiv/tasks/netplugin.yml
+++ b/roles/contiv/tasks/netplugin.yml
@@ -101,15 +101,15 @@
- name: systemd reload
command: systemctl daemon-reload
- when: docker_updated|changed
+ when: docker_updated is changed
- name: Docker | Restart docker
service:
name: "{{ openshift_docker_service_name }}"
state: restarted
- when: docker_updated|changed
+ when: docker_updated is changed
register: l_docker_restart_docker_in_contiv_result
- until: not l_docker_restart_docker_in_contiv_result | failed
+ until: not (l_docker_restart_docker_in_contiv_result is failed)
retries: 3
delay: 30
diff --git a/roles/contiv/tasks/pkgMgrInstallers/centos-install.yml b/roles/contiv/tasks/pkgMgrInstallers/centos-install.yml
index 62b4716a3..53c5b4099 100644
--- a/roles/contiv/tasks/pkgMgrInstallers/centos-install.yml
+++ b/roles/contiv/tasks/pkgMgrInstallers/centos-install.yml
@@ -4,7 +4,7 @@
pkg=net-tools
state=latest
register: result
- until: result | success
+ until: result is succeeded
- name: PkgMgr RHEL/CentOS | Get openstack ocata rpm
get_url:
@@ -23,11 +23,11 @@
tags:
- ovs_install
register: result
- until: result | success
+ until: result is succeeded
- name: PkgMgr RHEL/CentOS | Install ovs
yum:
- pkg=openvswitch-2.5.0-2.el7.x86_64
+ pkg=openvswitch
state=present
environment:
http_proxy: "{{ http_proxy|default('') }}"
@@ -36,4 +36,4 @@
tags:
- ovs_install
register: result
- until: result | success
+ until: result is succeeded
diff --git a/roles/contiv_facts/tasks/fedora-install.yml b/roles/contiv_facts/tasks/fedora-install.yml
index a57f6eb19..932ff091a 100644
--- a/roles/contiv_facts/tasks/fedora-install.yml
+++ b/roles/contiv_facts/tasks/fedora-install.yml
@@ -4,7 +4,7 @@
name: dnf
state: installed
register: result
- until: result | success
+ until: result is succeeded
- name: Update repo cache
command: dnf update -y
diff --git a/roles/etcd/tasks/auxiliary/drop_etcdctl.yml b/roles/etcd/tasks/auxiliary/drop_etcdctl.yml
index 603f2531f..ccfd9da14 100644
--- a/roles/etcd/tasks/auxiliary/drop_etcdctl.yml
+++ b/roles/etcd/tasks/auxiliary/drop_etcdctl.yml
@@ -3,7 +3,7 @@
package: name=etcd{{ '-' + etcd_version if etcd_version is defined else '' }} state=present
when: not openshift.common.is_atomic | bool
register: result
- until: result | success
+ until: result is succeeded
- name: Configure etcd profile.d aliases
template:
diff --git a/roles/etcd/tasks/backup/backup.yml b/roles/etcd/tasks/backup/backup.yml
index 9da023dbd..acd1bb0bc 100644
--- a/roles/etcd/tasks/backup/backup.yml
+++ b/roles/etcd/tasks/backup/backup.yml
@@ -44,7 +44,7 @@
- r_etcd_common_embedded_etcd | bool
- not l_ostree_booted.stat.exists | bool
register: result
- until: result | success
+ until: result is succeeded
- name: Check selinux label of '{{ etcd_data_dir }}'
command: >
diff --git a/roles/etcd/tasks/certificates/deploy_ca.yml b/roles/etcd/tasks/certificates/deploy_ca.yml
index bd4dafafd..ebaff353b 100644
--- a/roles/etcd/tasks/certificates/deploy_ca.yml
+++ b/roles/etcd/tasks/certificates/deploy_ca.yml
@@ -7,7 +7,7 @@
delegate_to: "{{ etcd_ca_host }}"
run_once: true
register: result
- until: result | success
+ until: result is succeeded
- file:
path: "{{ item }}"
diff --git a/roles/etcd/tasks/certificates/fetch_server_certificates_from_ca.yml b/roles/etcd/tasks/certificates/fetch_server_certificates_from_ca.yml
index f4726940a..deb2301d7 100644
--- a/roles/etcd/tasks/certificates/fetch_server_certificates_from_ca.yml
+++ b/roles/etcd/tasks/certificates/fetch_server_certificates_from_ca.yml
@@ -5,7 +5,7 @@
state: present
when: not etcd_is_containerized | bool
register: result
- until: result | success
+ until: result is succeeded
- name: Check status of etcd certificates
stat:
diff --git a/roles/etcd/tasks/main.yml b/roles/etcd/tasks/main.yml
index b2100801f..12e41667e 100644
--- a/roles/etcd/tasks/main.yml
+++ b/roles/etcd/tasks/main.yml
@@ -13,7 +13,7 @@
package: name=etcd{{ '-' + etcd_version if etcd_version is defined else '' }} state=present
when: not etcd_is_containerized | bool
register: result
- until: result | success
+ until: result is succeeded
- include_tasks: drop_etcdctl.yml
when:
@@ -93,7 +93,9 @@
daemon_reload: yes
when: not l_is_etcd_system_container | bool
register: task_result
- failed_when: task_result|failed and 'could not' not in task_result.msg|lower
+ failed_when:
+ - task_result is failed
+ - ('could not' not in task_result.msg|lower)
- name: Install etcd container service file
template:
@@ -131,4 +133,4 @@
- name: Set fact etcd_service_status_changed
set_fact:
- etcd_service_status_changed: "{{ start_result | changed }}"
+ etcd_service_status_changed: "{{ start_result is changed }}"
diff --git a/roles/etcd/tasks/system_container.yml b/roles/etcd/tasks/system_container.yml
index ca8b6a707..e37652536 100644
--- a/roles/etcd/tasks/system_container.yml
+++ b/roles/etcd/tasks/system_container.yml
@@ -29,7 +29,9 @@
masked: no
daemon_reload: yes
register: task_result
- failed_when: task_result|failed and 'could not' not in task_result.msg|lower
+ failed_when:
+ - task_result is failed
+ - ('could not' not in task_result.msg|lower)
when: "'etcd' not in etcd_result.stdout"
- name: Disable etcd_container
@@ -39,7 +41,9 @@
enabled: no
daemon_reload: yes
register: task_result
- failed_when: task_result|failed and 'could not' not in task_result.msg|lower
+ failed_when:
+ - task_result is failed
+ - ('could not' not in task_result.msg|lower)
- name: Remove etcd_container.service
file:
diff --git a/roles/etcd/tasks/upgrade/upgrade_image.yml b/roles/etcd/tasks/upgrade/upgrade_image.yml
index 6e712ba74..13bb0faca 100644
--- a/roles/etcd/tasks/upgrade/upgrade_image.yml
+++ b/roles/etcd/tasks/upgrade/upgrade_image.yml
@@ -45,7 +45,7 @@
state: latest
when: not l_ostree_booted.stat.exists | bool
register: result
- until: result | success
+ until: result is succeeded
- name: Verify cluster is healthy
command: "{{ etcdctlv2 }} cluster-health"
diff --git a/roles/etcd/tasks/upgrade/upgrade_rpm.yml b/roles/etcd/tasks/upgrade/upgrade_rpm.yml
index e98def46e..180ed4135 100644
--- a/roles/etcd/tasks/upgrade/upgrade_rpm.yml
+++ b/roles/etcd/tasks/upgrade/upgrade_rpm.yml
@@ -19,7 +19,7 @@
name: "{{ l_etcd_target_package }}"
state: latest
register: result
- until: result | success
+ until: result is succeeded
- lineinfile:
destfile: "{{ etcd_conf_file }}"
diff --git a/roles/flannel/handlers/main.yml b/roles/flannel/handlers/main.yml
index 705d39f9a..7d79bd3d4 100644
--- a/roles/flannel/handlers/main.yml
+++ b/roles/flannel/handlers/main.yml
@@ -9,7 +9,7 @@
name: "{{ openshift_docker_service_name }}"
state: restarted
register: l_docker_restart_docker_in_flannel_result
- until: not l_docker_restart_docker_in_flannel_result | failed
+ until: not (l_docker_restart_docker_in_flannel_result is failed)
retries: 3
delay: 30
@@ -18,6 +18,6 @@
name: "{{ openshift_service_type }}-node"
state: restarted
register: l_restart_node_result
- until: not l_restart_node_result | failed
+ until: not (l_restart_node_result is failed)
retries: 3
delay: 30
diff --git a/roles/flannel/tasks/main.yml b/roles/flannel/tasks/main.yml
index befe1b2e6..9b9250f31 100644
--- a/roles/flannel/tasks/main.yml
+++ b/roles/flannel/tasks/main.yml
@@ -4,7 +4,7 @@
package: name=flannel state=present
when: not openshift.common.is_atomic | bool
register: result
- until: result | success
+ until: result is succeeded
- name: Set flannel etcd options
become: yes
diff --git a/roles/nickhammond.logrotate/tasks/main.yml b/roles/nickhammond.logrotate/tasks/main.yml
index 32d3acb86..677f206ea 100644
--- a/roles/nickhammond.logrotate/tasks/main.yml
+++ b/roles/nickhammond.logrotate/tasks/main.yml
@@ -3,7 +3,7 @@
package: name=logrotate state=present
when: not openshift.common.is_atomic | bool
register: result
- until: result | success
+ until: result is succeeded
- name: nickhammond.logrotate | Setup logrotate.d scripts
template:
diff --git a/roles/nuage_ca/tasks/main.yaml b/roles/nuage_ca/tasks/main.yaml
index 46929fa1f..d96d0d802 100644
--- a/roles/nuage_ca/tasks/main.yaml
+++ b/roles/nuage_ca/tasks/main.yaml
@@ -3,7 +3,7 @@
package: name=openssl state=present
when: not openshift.common.is_atomic | bool
register: result
- until: result | success
+ until: result is succeeded
- name: Create CA directory
file: path="{{ nuage_ca_dir }}" state=directory
diff --git a/roles/openshift_ca/tasks/main.yml b/roles/openshift_ca/tasks/main.yml
index eb00f13db..ea4702248 100644
--- a/roles/openshift_ca/tasks/main.yml
+++ b/roles/openshift_ca/tasks/main.yml
@@ -13,13 +13,13 @@
state: present
when: not openshift.common.is_containerized | bool
register: install_result
- until: install_result | success
+ until: install_result is succeeded
delegate_to: "{{ openshift_ca_host }}"
run_once: true
- name: Reload generated facts
openshift_facts:
- when: hostvars[openshift_ca_host].install_result | changed
+ when: hostvars[openshift_ca_host].install_result is changed
- name: Create openshift_ca_config_dir if it does not exist
file:
diff --git a/roles/openshift_cli/tasks/main.yml b/roles/openshift_cli/tasks/main.yml
index 888aa8f0c..68d82e436 100644
--- a/roles/openshift_cli/tasks/main.yml
+++ b/roles/openshift_cli/tasks/main.yml
@@ -3,7 +3,7 @@
package: name={{ openshift_service_type }}-clients{{ openshift_pkg_version | default('') }} state=present
when: not openshift.common.is_containerized | bool
register: result
- until: result | success
+ until: result is succeeded
- block:
- name: Pull CLI Image
@@ -44,4 +44,4 @@
package: name=bash-completion state=present
when: not openshift.common.is_containerized | bool
register: result
- until: result | success
+ until: result is succeeded
diff --git a/roles/openshift_clock/tasks/main.yaml b/roles/openshift_clock/tasks/main.yaml
index 82c73b583..cdacdd042 100644
--- a/roles/openshift_clock/tasks/main.yaml
+++ b/roles/openshift_clock/tasks/main.yaml
@@ -10,7 +10,7 @@
- openshift_clock_enabled | bool
- chrony_installed.rc != 0
register: result
- until: result | success
+ until: result is succeeded
- name: Start and enable ntpd/chronyd
command: timedatectl set-ntp true
diff --git a/roles/openshift_excluder/tasks/install.yml b/roles/openshift_excluder/tasks/install.yml
index 3ac55894f..ad7c00d14 100644
--- a/roles/openshift_excluder/tasks/install.yml
+++ b/roles/openshift_excluder/tasks/install.yml
@@ -14,7 +14,7 @@
- r_openshift_excluder_enable_docker_excluder | bool
- ansible_pkg_mgr == "yum"
register: result
- until: result | success
+ until: result is succeeded
# For DNF we do not need the "*" and if we add it, it causes an error because
@@ -29,7 +29,7 @@
- r_openshift_excluder_enable_docker_excluder | bool
- ansible_pkg_mgr == "dnf"
register: result
- until: result | success
+ until: result is succeeded
- name: Install openshift excluder - yum
package:
@@ -39,7 +39,7 @@
- r_openshift_excluder_enable_openshift_excluder | bool
- ansible_pkg_mgr == "yum"
register: result
- until: result | success
+ until: result is succeeded
# For DNF we do not need the "*" and if we add it, it causes an error because
# it's not a valid pkg_spec
@@ -53,7 +53,7 @@
- r_openshift_excluder_enable_openshift_excluder | bool
- ansible_pkg_mgr == "dnf"
register: result
- until: result | success
+ until: result is succeeded
- set_fact:
r_openshift_excluder_install_ran: True
diff --git a/roles/openshift_excluder/tasks/verify_excluder.yml b/roles/openshift_excluder/tasks/verify_excluder.yml
index c35639c1b..4f5277fa2 100644
--- a/roles/openshift_excluder/tasks/verify_excluder.yml
+++ b/roles/openshift_excluder/tasks/verify_excluder.yml
@@ -29,4 +29,4 @@
msg: "Available {{ excluder }} version {{ excluder_version }} is higher than the upgrade target version"
when:
- excluder_version != ''
- - excluder_version.split('.')[0:2] | join('.') | version_compare(r_openshift_excluder_upgrade_target.split('.')[0:2] | join('.'), '>', strict=True)
+ - excluder_version.split('.')[0:2] | join('.') is version_compare(r_openshift_excluder_upgrade_target.split('.')[0:2] | join('.'), '>', strict=True)
diff --git a/roles/openshift_expand_partition/tasks/main.yml b/roles/openshift_expand_partition/tasks/main.yml
index b7acb0c5a..c7e21ba99 100644
--- a/roles/openshift_expand_partition/tasks/main.yml
+++ b/roles/openshift_expand_partition/tasks/main.yml
@@ -3,7 +3,7 @@
package: name=cloud-utils-growpart state=present
when: not openshift.common.is_containerized | bool
register: result
- until: result | success
+ until: result is succeeded
- name: Determine if growpart is installed
command: "rpm -q cloud-utils-growpart"
diff --git a/roles/openshift_loadbalancer/tasks/main.yml b/roles/openshift_loadbalancer/tasks/main.yml
index 79c5793d9..7d23ea6c8 100644
--- a/roles/openshift_loadbalancer/tasks/main.yml
+++ b/roles/openshift_loadbalancer/tasks/main.yml
@@ -6,7 +6,7 @@
package: name=haproxy state=present
when: not openshift.common.is_containerized | bool
register: result
- until: result | success
+ until: result is succeeded
- name: Pull haproxy image
command: >
@@ -70,4 +70,4 @@
register: start_result
- set_fact:
- haproxy_start_result_changed: "{{ start_result | changed }}"
+ haproxy_start_result_changed: "{{ start_result is changed }}"
diff --git a/roles/openshift_logging_elasticsearch/handlers/main.yml b/roles/openshift_logging_elasticsearch/handlers/main.yml
new file mode 100644
index 000000000..fa56897d0
--- /dev/null
+++ b/roles/openshift_logging_elasticsearch/handlers/main.yml
@@ -0,0 +1,13 @@
+---
+- name: "Restarting logging-{{ _cluster_component }} cluster"
+ listen: "restart elasticsearch"
+ include_tasks: restart_cluster.yml
+ with_items: "{{ _restart_logging_components }}"
+ loop_control:
+ loop_var: _cluster_component
+ when: not logging_elasticsearch_rollout_override | bool
+
+## Stop this from running more than once
+- set_fact:
+ logging_elasticsearch_rollout_override: True
+ listen: "restart elasticsearch"
diff --git a/roles/openshift_logging_elasticsearch/tasks/main.yaml b/roles/openshift_logging_elasticsearch/tasks/main.yaml
index 8f2050043..5fe683ae5 100644
--- a/roles/openshift_logging_elasticsearch/tasks/main.yaml
+++ b/roles/openshift_logging_elasticsearch/tasks/main.yaml
@@ -204,7 +204,21 @@
from_file:
elasticsearch.yml: "{{ tempdir }}/elasticsearch.yml"
logging.yml: "{{ tempdir }}/elasticsearch-logging.yml"
+ register: es_config_creation
+ notify: "restart elasticsearch"
+- when: es_config_creation.changed | bool
+ block:
+ - set_fact:
+ _restart_logging_components: "{{ _restart_logging_components | default([]) + [es_component] | unique }}"
+
+ - shell: >
+ oc get dc -l component="{{ es_component }}" -n "{{ openshift_logging_elasticsearch_namespace }}" -o name | cut -d'/' -f2
+ register: _es_dcs
+
+ - set_fact:
+ _restart_logging_nodes: "{{ _restart_logging_nodes | default([]) + [_es_dcs.stdout] | unique }}"
+ when: _es_dcs.stdout != ""
# secret
- name: Set ES secret
@@ -375,6 +389,13 @@
files:
- "{{ tempdir }}/templates/logging-es-dc.yml"
delete_after: true
+ register: es_dc_creation
+ notify: "restart elasticsearch"
+
+- set_fact:
+ _restart_logging_components: "{{ _restart_logging_components | default([]) + [es_component] | unique }}"
+ _restart_logging_nodes: "{{ _restart_logging_nodes | default([]) + [es_deploy_name] | unique }}"
+ when: es_dc_creation.changed | bool
- name: Retrieving the cert to use when generating secrets for the {{ es_component }} component
slurp:
diff --git a/roles/openshift_logging_elasticsearch/tasks/restart_cluster.yml b/roles/openshift_logging_elasticsearch/tasks/restart_cluster.yml
new file mode 100644
index 000000000..4a32453e3
--- /dev/null
+++ b/roles/openshift_logging_elasticsearch/tasks/restart_cluster.yml
@@ -0,0 +1,35 @@
+---
+## get all pods for the cluster
+- command: >
+ oc get pod -l component={{ _cluster_component }},provider=openshift -n {{ openshift_logging_elasticsearch_namespace }} -o jsonpath={.items[*].metadata.name}
+ register: _cluster_pods
+
+- name: "Disable shard balancing for logging-{{ _cluster_component }} cluster"
+ command: >
+ oc exec {{ _cluster_pods.stdout.split(' ')[0] }} -c elasticsearch -n {{ openshift_logging_elasticsearch_namespace }} -- {{ __es_local_curl }} -XPUT 'https://localhost:9200/_cluster/settings' -d '{ "transient": { "cluster.routing.allocation.enable" : "none" } }'
+ register: _disable_output
+ changed_when: "'\"acknowledged\":true' in _disable_output.stdout"
+ when: _cluster_pods.stdout_lines | count > 0
+
+- command: >
+ oc get dc -l component={{ _cluster_component }},provider=openshift -n {{ openshift_logging_elasticsearch_namespace }} -o jsonpath={.items[*].metadata.name}
+ register: _cluster_dcs
+
+## restart the node if it's dc is in the list of nodes to restart?
+- name: "Restart ES node {{ _es_node }}"
+ include_tasks: restart_es_node.yml
+ with_items: "{{ _restart_logging_nodes }}"
+ loop_control:
+ loop_var: _es_node
+ when: _es_node in _cluster_dcs.stdout
+
+## we may need a new first pod to run against -- fetch them all again
+- command: >
+ oc get pod -l component={{ _cluster_component }},provider=openshift -n {{ openshift_logging_elasticsearch_namespace }} -o jsonpath={.items[*].metadata.name}
+ register: _cluster_pods
+
+- name: "Enable shard balancing for logging-{{ _cluster_component }} cluster"
+ command: >
+ oc exec {{ _cluster_pods.stdout.split(' ')[0] }} -c elasticsearch -n {{ openshift_logging_elasticsearch_namespace }} -- {{ __es_local_curl }} -XPUT 'https://localhost:9200/_cluster/settings' -d '{ "transient": { "cluster.routing.allocation.enable" : "all" } }'
+ register: _enable_output
+ changed_when: "'\"acknowledged\":true' in _enable_output.stdout"
diff --git a/roles/openshift_logging_elasticsearch/tasks/restart_es_node.yml b/roles/openshift_logging_elasticsearch/tasks/restart_es_node.yml
new file mode 100644
index 000000000..b07b232ce
--- /dev/null
+++ b/roles/openshift_logging_elasticsearch/tasks/restart_es_node.yml
@@ -0,0 +1,35 @@
+---
+- name: "Rolling out new pod(s) for {{ _es_node }}"
+ command: >
+ oc rollout latest {{ _es_node }} -n {{ openshift_logging_elasticsearch_namespace }}
+
+- name: "Waiting for {{ _es_node }} to finish scaling up"
+ oc_obj:
+ state: list
+ name: "{{ _es_node }}"
+ namespace: "{{ openshift_logging_elasticsearch_namespace }}"
+ kind: dc
+ register: _dc_output
+ until:
+ - _dc_output.results.results[0].status is defined
+ - _dc_output.results.results[0].status.readyReplicas is defined
+ - _dc_output.results.results[0].status.readyReplicas > 0
+ retries: 60
+ delay: 30
+
+- name: Gettings name(s) of replica pod(s)
+ command: >
+ oc get pods -l deploymentconfig={{ _es_node }} -n {{ openshift_logging_elasticsearch_namespace }} -o jsonpath={.items[*].metadata.name}
+ register: _pods
+
+- name: "Waiting for ES to be ready for {{ _es_node }}"
+ shell: >
+ oc exec "{{ _pod }}" -c elasticsearch -n "{{ openshift_logging_elasticsearch_namespace }}" -- {{ __es_local_curl }} https://localhost:9200/_cat/health | cut -d' ' -f4
+ with_items: "{{ _pods.stdout.split(' ') }}"
+ loop_control:
+ loop_var: _pod
+ register: _pod_status
+ until: _pod_status.stdout in ['green', 'yellow']
+ retries: 60
+ delay: 5
+ changed_when: false
diff --git a/roles/openshift_logging_elasticsearch/templates/es.j2 b/roles/openshift_logging_elasticsearch/templates/es.j2
index bf04094a3..cf6ee36bb 100644
--- a/roles/openshift_logging_elasticsearch/templates/es.j2
+++ b/roles/openshift_logging_elasticsearch/templates/es.j2
@@ -17,6 +17,7 @@ spec:
logging-infra: "{{logging_component}}"
strategy:
type: Recreate
+ triggers: []
template:
metadata:
name: "{{deploy_name}}"
diff --git a/roles/openshift_logging_elasticsearch/vars/main.yml b/roles/openshift_logging_elasticsearch/vars/main.yml
index 09e2ee4d0..c8e995146 100644
--- a/roles/openshift_logging_elasticsearch/vars/main.yml
+++ b/roles/openshift_logging_elasticsearch/vars/main.yml
@@ -5,6 +5,8 @@ __allowed_es_types: ["data-master", "data-client", "master", "client"]
__es_log_appenders: ['file', 'console']
__kibana_index_modes: ["unique", "shared_ops"]
+__es_local_curl: "curl -s --cacert /etc/elasticsearch/secret/admin-ca --cert /etc/elasticsearch/secret/admin-cert --key /etc/elasticsearch/secret/admin-key"
+
# TODO: integrate these
openshift_master_config_dir: "{{ openshift.common.config_base }}/master"
es_node_quorum: "{{ openshift_logging_elasticsearch_replica_count | int/2 + 1 }}"
diff --git a/roles/openshift_logging_fluentd/templates/fluentd.j2 b/roles/openshift_logging_fluentd/templates/fluentd.j2
index 10283316c..c6256cf49 100644
--- a/roles/openshift_logging_fluentd/templates/fluentd.j2
+++ b/roles/openshift_logging_fluentd/templates/fluentd.j2
@@ -130,7 +130,7 @@ spec:
containerName: "{{ daemonset_container_name }}"
resource: limits.memory
- name: "FILE_BUFFER_LIMIT"
- value: "{{ openshift_logging_fluentd_file_buffer_limit | default('256i') }}"
+ value: "{{ openshift_logging_fluentd_file_buffer_limit | default('256Mi') }}"
{% if openshift_logging_mux_client_mode is defined and
((openshift_logging_mux_allow_external is defined and openshift_logging_mux_allow_external | bool) or
(openshift_logging_use_mux is defined and openshift_logging_use_mux | bool)) %}
diff --git a/roles/openshift_logging_mux/defaults/main.yml b/roles/openshift_logging_mux/defaults/main.yml
index 1e6c501bf..db6f23126 100644
--- a/roles/openshift_logging_mux/defaults/main.yml
+++ b/roles/openshift_logging_mux/defaults/main.yml
@@ -63,4 +63,4 @@ openshift_logging_mux_file_buffer_pvc_access_modes: ['ReadWriteOnce']
openshift_logging_mux_file_buffer_storage_group: '65534'
openshift_logging_mux_file_buffer_pvc_prefix: "logging-mux"
-openshift_logging_mux_file_buffer_limit: 256Mi
+openshift_logging_mux_file_buffer_limit: 2Gi
diff --git a/roles/openshift_manage_node/tasks/main.yml b/roles/openshift_manage_node/tasks/main.yml
index db9a780b7..af22a1a03 100644
--- a/roles/openshift_manage_node/tasks/main.yml
+++ b/roles/openshift_manage_node/tasks/main.yml
@@ -41,7 +41,7 @@
retries: 10
delay: 5
register: node_schedulable
- until: node_schedulable|succeeded
+ until: node_schedulable is succeeded
when: "'nodename' in openshift.node"
delegate_to: "{{ openshift_master_host }}"
diff --git a/roles/openshift_master/defaults/main.yml b/roles/openshift_master/defaults/main.yml
index 38b2fd8b8..efd119299 100644
--- a/roles/openshift_master/defaults/main.yml
+++ b/roles/openshift_master/defaults/main.yml
@@ -54,6 +54,48 @@ ha_svc_template_path: "native-cluster"
openshift_docker_service_name: "{{ 'container-engine' if (openshift_docker_use_system_container | default(False)) else 'docker' }}"
+openshift_master_loopback_config: "{{ openshift_master_config_dir }}/openshift-master.kubeconfig"
+loopback_context_string: "current-context: {{ openshift.master.loopback_context_name }}"
+openshift_master_session_secrets_file: "{{ openshift_master_config_dir }}/session-secrets.yaml"
+openshift_master_policy: "{{ openshift_master_config_dir }}/policy.json"
+
+scheduler_config:
+ kind: Policy
+ apiVersion: v1
+ predicates: "{{ openshift_master_scheduler_predicates
+ | default(openshift_master_scheduler_current_predicates
+ | default(openshift_master_scheduler_default_predicates)) }}"
+ priorities: "{{ openshift_master_scheduler_priorities
+ | default(openshift_master_scheduler_current_priorities
+ | default(openshift_master_scheduler_default_priorities)) }}"
+
+openshift_master_valid_grant_methods:
+- auto
+- prompt
+- deny
+
+openshift_master_is_scaleup_host: False
+
+# These defaults assume forcing journald persistence, fsync to disk once
+# a second, rate-limiting to 10,000 logs a second, no forwarding to
+# syslog or wall, using 8GB of disk space maximum, using 10MB journal
+# files, keeping only a days worth of logs per journal file, and
+# retaining journal files no longer than a month.
+journald_vars_to_replace:
+- { var: Storage, val: persistent }
+- { var: Compress, val: yes }
+- { var: SyncIntervalSec, val: 1s }
+- { var: RateLimitInterval, val: 1s }
+- { var: RateLimitBurst, val: 10000 }
+- { var: SystemMaxUse, val: 8G }
+- { var: SystemKeepFree, val: 20% }
+- { var: SystemMaxFileSize, val: 10M }
+- { var: MaxRetentionSec, val: 1month }
+- { var: MaxFileSec, val: 1day }
+- { var: ForwardToSyslog, val: no }
+- { var: ForwardToWall, val: no }
+
+
# NOTE
# r_openshift_master_*_default may be defined external to this role.
# openshift_use_*, if defined, may affect other roles or play behavior.
diff --git a/roles/openshift_master/tasks/journald.yml b/roles/openshift_master/tasks/journald.yml
index a16cbe78e..6166062ed 100644
--- a/roles/openshift_master/tasks/journald.yml
+++ b/roles/openshift_master/tasks/journald.yml
@@ -26,4 +26,4 @@
delay: 5
register: result
until: result.rc == 0
- when: journald_update | changed
+ when: journald_update is changed
diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml
index 9be5508aa..7bfc870d5 100644
--- a/roles/openshift_master/tasks/main.yml
+++ b/roles/openshift_master/tasks/main.yml
@@ -21,7 +21,7 @@
when:
- not openshift.common.is_containerized | bool
register: result
- until: result | success
+ until: result is succeeded
- name: Create r_openshift_master_data_dir
file:
@@ -72,7 +72,7 @@
- not openshift.common.is_atomic | bool
with_items: "{{ openshift.master.identity_providers }}"
register: result
- until: result | success
+ until: result is succeeded
- name: Ensure htpasswd directory exists
file:
@@ -147,7 +147,7 @@
register: l_already_set
- set_fact:
- openshift_push_via_dns: "{{ openshift.common.version_gte_3_6 or (l_already_set.stdout is defined and l_already_set.stdout | match('OPENSHIFT_DEFAULT_REGISTRY=docker-registry.default.svc:5000')) }}"
+ openshift_push_via_dns: "{{ openshift.common.version_gte_3_6 or (l_already_set.stdout is defined and l_already_set.stdout is match('OPENSHIFT_DEFAULT_REGISTRY=docker-registry.default.svc:5000')) }}"
- name: Set fact of all etcd host IPs
openshift_facts:
@@ -209,17 +209,17 @@
when:
- inventory_hostname == openshift_master_hosts[0]
register: l_start_result
- until: not l_start_result | failed
+ until: not (l_start_result is failed)
retries: 1
delay: 60
- name: Dump logs from master-api if it failed
command: journalctl --no-pager -n 100 -u {{ openshift_service_type }}-master-api
when:
- - l_start_result | failed
+ - l_start_result is failed
- set_fact:
- master_api_service_status_changed: "{{ l_start_result | changed }}"
+ master_api_service_status_changed: "{{ l_start_result is changed }}"
when:
- inventory_hostname == openshift_master_hosts[0]
@@ -236,17 +236,17 @@
when:
- inventory_hostname != openshift_master_hosts[0]
register: l_start_result
- until: not l_start_result | failed
+ until: not (l_start_result is failed)
retries: 1
delay: 60
- name: Dump logs from master-api if it failed
command: journalctl --no-pager -n 100 -u {{ openshift_service_type }}-master-api
when:
- - l_start_result | failed
+ - l_start_result is failed
- set_fact:
- master_api_service_status_changed: "{{ l_start_result | changed }}"
+ master_api_service_status_changed: "{{ l_start_result is changed }}"
when:
- inventory_hostname != openshift_master_hosts[0]
@@ -262,18 +262,18 @@
enabled: yes
state: started
register: l_start_result
- until: not l_start_result | failed
+ until: not (l_start_result is failed)
retries: 1
delay: 60
- name: Dump logs from master-controllers if it failed
command: journalctl --no-pager -n 100 -u {{ openshift_service_type }}-master-controllers
when:
- - l_start_result | failed
+ - l_start_result is failed
- name: Set fact master_controllers_service_status_changed
set_fact:
- master_controllers_service_status_changed: "{{ l_start_result | changed }}"
+ master_controllers_service_status_changed: "{{ l_start_result is changed }}"
- name: node bootstrap settings
include_tasks: bootstrap.yml
diff --git a/roles/openshift_master/tasks/set_loopback_context.yml b/roles/openshift_master/tasks/set_loopback_context.yml
index 308b2f4cd..487fefb63 100644
--- a/roles/openshift_master/tasks/set_loopback_context.yml
+++ b/roles/openshift_master/tasks/set_loopback_context.yml
@@ -23,12 +23,12 @@
{{ openshift.master.loopback_context_name }}
--config={{ openshift_master_loopback_config }}
when:
- - set_loopback_cluster | changed
+ - set_loopback_cluster is changed
register: l_set_loopback_context
- command: >
{{ openshift.common.client_binary }} config use-context {{ openshift.master.loopback_context_name }}
--config={{ openshift_master_loopback_config }}
when:
- - l_set_loopback_context | changed
+ - l_set_loopback_context is changed
register: set_current_context
diff --git a/roles/openshift_master/tasks/systemd_units.yml b/roles/openshift_master/tasks/systemd_units.yml
index 76b6f46aa..1c9ecafaa 100644
--- a/roles/openshift_master/tasks/systemd_units.yml
+++ b/roles/openshift_master/tasks/systemd_units.yml
@@ -50,7 +50,7 @@
- command: systemctl daemon-reload
when:
- - l_create_ha_unit_files | changed
+ - l_create_ha_unit_files is changed
# end workaround for missing systemd unit files
- name: enable master services
diff --git a/roles/openshift_master/tasks/upgrade/rpm_upgrade.yml b/roles/openshift_master/tasks/upgrade/rpm_upgrade.yml
index f50b91ff5..f72710832 100644
--- a/roles/openshift_master/tasks/upgrade/rpm_upgrade.yml
+++ b/roles/openshift_master/tasks/upgrade/rpm_upgrade.yml
@@ -19,4 +19,4 @@
- "{{ openshift_service_type }}-clients{{ openshift_pkg_version }}"
- "tuned-profiles-{{ openshift_service_type }}-node{{ openshift_pkg_version }}"
register: result
- until: result | success
+ until: result is succeeded
diff --git a/roles/openshift_master/vars/main.yml b/roles/openshift_master/vars/main.yml
deleted file mode 100644
index 0c681c764..000000000
--- a/roles/openshift_master/vars/main.yml
+++ /dev/null
@@ -1,41 +0,0 @@
----
-openshift_master_loopback_config: "{{ openshift_master_config_dir }}/openshift-master.kubeconfig"
-loopback_context_string: "current-context: {{ openshift.master.loopback_context_name }}"
-openshift_master_session_secrets_file: "{{ openshift_master_config_dir }}/session-secrets.yaml"
-openshift_master_policy: "{{ openshift_master_config_dir }}/policy.json"
-
-scheduler_config:
- kind: Policy
- apiVersion: v1
- predicates: "{{ openshift_master_scheduler_predicates
- | default(openshift_master_scheduler_current_predicates
- | default(openshift_master_scheduler_default_predicates)) }}"
- priorities: "{{ openshift_master_scheduler_priorities
- | default(openshift_master_scheduler_current_priorities
- | default(openshift_master_scheduler_default_priorities)) }}"
-
-openshift_master_valid_grant_methods:
-- auto
-- prompt
-- deny
-
-openshift_master_is_scaleup_host: False
-
-# These defaults assume forcing journald persistence, fsync to disk once
-# a second, rate-limiting to 10,000 logs a second, no forwarding to
-# syslog or wall, using 8GB of disk space maximum, using 10MB journal
-# files, keeping only a days worth of logs per journal file, and
-# retaining journal files no longer than a month.
-journald_vars_to_replace:
-- { var: Storage, val: persistent }
-- { var: Compress, val: yes }
-- { var: SyncIntervalSec, val: 1s }
-- { var: RateLimitInterval, val: 1s }
-- { var: RateLimitBurst, val: 10000 }
-- { var: SystemMaxUse, val: 8G }
-- { var: SystemKeepFree, val: 20% }
-- { var: SystemMaxFileSize, val: 10M }
-- { var: MaxRetentionSec, val: 1month }
-- { var: MaxFileSec, val: 1day }
-- { var: ForwardToSyslog, val: no }
-- { var: ForwardToWall, val: no }
diff --git a/roles/openshift_metrics/tasks/install_hawkular.yaml b/roles/openshift_metrics/tasks/install_hawkular.yaml
index b63f5ca8c..a4ffa1890 100644
--- a/roles/openshift_metrics/tasks/install_hawkular.yaml
+++ b/roles/openshift_metrics/tasks/install_hawkular.yaml
@@ -23,15 +23,15 @@
- block:
- set_fact: hawkular_key={{ lookup('file', openshift_metrics_hawkular_key) }}
- when: openshift_metrics_hawkular_key | exists
+ when: openshift_metrics_hawkular_key is exists
changed_when: false
- set_fact: hawkular_cert={{ lookup('file', openshift_metrics_hawkular_cert) }}
- when: openshift_metrics_hawkular_cert | exists
+ when: openshift_metrics_hawkular_cert is exists
changed_when: false
- set_fact: hawkular_ca={{ lookup('file', openshift_metrics_hawkular_ca) }}
- when: openshift_metrics_hawkular_ca | exists
+ when: openshift_metrics_hawkular_ca is exists
changed_when: false
- name: generate the hawkular-metrics route
diff --git a/roles/openshift_nfs/tasks/create_export.yml b/roles/openshift_nfs/tasks/create_export.yml
index b0b888d56..5fcdbf76e 100644
--- a/roles/openshift_nfs/tasks/create_export.yml
+++ b/roles/openshift_nfs/tasks/create_export.yml
@@ -31,4 +31,4 @@
- name: Re-export NFS filesystems
command: exportfs -ar
when:
- - created_export | changed
+ - created_export is changed
diff --git a/roles/openshift_nfs/tasks/setup.yml b/roles/openshift_nfs/tasks/setup.yml
index 1aa7e7079..bd8fb44a2 100644
--- a/roles/openshift_nfs/tasks/setup.yml
+++ b/roles/openshift_nfs/tasks/setup.yml
@@ -5,7 +5,7 @@
- name: Install nfs-utils
package: name=nfs-utils state=present
register: result
- until: result | success
+ until: result is succeeded
- name: Configure NFS
lineinfile:
@@ -16,7 +16,7 @@
- name: Restart nfs-config
systemd: name=nfs-config state=restarted
- when: nfs_config | changed
+ when: nfs_config is changed
- name: Ensure exports directory exists
file:
diff --git a/roles/openshift_node/handlers/main.yml b/roles/openshift_node/handlers/main.yml
index 1d9797f84..62e0e1341 100644
--- a/roles/openshift_node/handlers/main.yml
+++ b/roles/openshift_node/handlers/main.yml
@@ -24,7 +24,7 @@
- openshift_node_use_openshift_sdn | bool
- not openshift_node_bootstrap
register: l_openshift_node_stop_openvswitch_result
- until: not l_openshift_node_stop_openvswitch_result | failed
+ until: not (l_openshift_node_stop_openvswitch_result is failed)
retries: 3
delay: 30
notify:
@@ -41,7 +41,7 @@
name: "{{ openshift_service_type }}-node"
state: restarted
register: l_openshift_node_restart_node_result
- until: not l_openshift_node_restart_node_result | failed
+ until: not (l_openshift_node_restart_node_result is failed)
retries: 3
delay: 30
when:
diff --git a/roles/openshift_node/tasks/bootstrap.yml b/roles/openshift_node/tasks/bootstrap.yml
index a042bc01b..1a6f209e0 100644
--- a/roles/openshift_node/tasks/bootstrap.yml
+++ b/roles/openshift_node/tasks/bootstrap.yml
@@ -5,7 +5,7 @@
state: present
with_items: "{{ r_openshift_node_image_prep_packages }}"
register: result
- until: result | success
+ until: result is succeeded
- name: create the directory for node
file:
diff --git a/roles/openshift_node/tasks/config.yml b/roles/openshift_node/tasks/config.yml
index 33c96d81a..a96785d5c 100644
--- a/roles/openshift_node/tasks/config.yml
+++ b/roles/openshift_node/tasks/config.yml
@@ -16,12 +16,12 @@
- openshift.common.is_containerized | bool
- openshift_node_use_openshift_sdn | default(true) | bool
register: ovs_start_result
- until: not ovs_start_result | failed
+ until: not (ovs_start_result is failed)
retries: 3
delay: 30
- set_fact:
- ovs_service_status_changed: "{{ ovs_start_result | changed }}"
+ ovs_service_status_changed: "{{ ovs_start_result is changed }}"
- file:
dest: "{{ l2_openshift_node_kubelet_args['config'] }}"
@@ -93,19 +93,19 @@
state: started
daemon_reload: yes
register: node_start_result
- until: not node_start_result | failed
+ until: not node_start_result is failed
retries: 1
delay: 30
ignore_errors: true
- name: Dump logs from node service if it failed
command: journalctl --no-pager -n 100 -u {{ openshift_service_type }}-node
- when: node_start_result | failed
+ when: node_start_result is failed
- name: Abort if node failed to start
fail:
msg: Node failed to start please inspect the logs and try again
- when: node_start_result | failed
+ when: node_start_result is failed
- set_fact:
- node_service_status_changed: "{{ node_start_result | changed }}"
+ node_service_status_changed: "{{ node_start_result is changed }}"
diff --git a/roles/openshift_node/tasks/dnsmasq/no-network-manager.yml b/roles/openshift_node/tasks/dnsmasq/no-network-manager.yml
index 541c8115a..5d2c67b86 100644
--- a/roles/openshift_node/tasks/dnsmasq/no-network-manager.yml
+++ b/roles/openshift_node/tasks/dnsmasq/no-network-manager.yml
@@ -8,6 +8,6 @@
state: present
notify: restart NetworkManager
register: result
- until: result | success
+ until: result is succeeded
- include_tasks: network-manager.yml
diff --git a/roles/openshift_node/tasks/dnsmasq_install.yml b/roles/openshift_node/tasks/dnsmasq_install.yml
index 9f66bf12d..0c8857b11 100644
--- a/roles/openshift_node/tasks/dnsmasq_install.yml
+++ b/roles/openshift_node/tasks/dnsmasq_install.yml
@@ -14,7 +14,7 @@
package: name=dnsmasq state=installed
when: not openshift.common.is_atomic | bool
register: result
- until: result | success
+ until: result is succeeded
- name: ensure origin/node directory exists
file:
diff --git a/roles/openshift_node/tasks/install.yml b/roles/openshift_node/tasks/install.yml
index c1f83d88b..9f004e8dd 100644
--- a/roles/openshift_node/tasks/install.yml
+++ b/roles/openshift_node/tasks/install.yml
@@ -6,7 +6,7 @@
name: "{{ openshift_service_type }}-node{{ (openshift_pkg_version | default('')) | oo_image_tag_to_rpm_version(include_dash=True) }}"
state: present
register: result
- until: result | success
+ until: result is succeeded
- name: Install sdn-ovs package
package:
@@ -15,14 +15,14 @@
when:
- openshift_node_use_openshift_sdn | bool
register: result
- until: result | success
+ until: result is succeeded
- name: Install conntrack-tools package
package:
name: "conntrack-tools"
state: present
register: result
- until: result | success
+ until: result is succeeded
- when:
- openshift.common.is_containerized | bool
diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml
index 5ec364932..2daa6c75f 100644
--- a/roles/openshift_node/tasks/main.yml
+++ b/roles/openshift_node/tasks/main.yml
@@ -52,7 +52,9 @@
state: restarted
when: openshift_use_crio
register: task_result
- failed_when: task_result|failed and 'could not find the requested service' not in task_result.msg|lower
+ failed_when:
+ - task_result is failed
+ - ('could not find the requested service' not in task_result.msg|lower)
- name: restart NetworkManager to ensure resolv.conf is present
systemd:
diff --git a/roles/openshift_node/tasks/storage_plugins/ceph.yml b/roles/openshift_node/tasks/storage_plugins/ceph.yml
index 72a3b837f..52d80357e 100644
--- a/roles/openshift_node/tasks/storage_plugins/ceph.yml
+++ b/roles/openshift_node/tasks/storage_plugins/ceph.yml
@@ -3,4 +3,4 @@
package: name=ceph-common state=present
when: not openshift.common.is_atomic | bool
register: result
- until: result | success
+ until: result is succeeded
diff --git a/roles/openshift_node/tasks/storage_plugins/glusterfs.yml b/roles/openshift_node/tasks/storage_plugins/glusterfs.yml
index 08ea71a0c..e60f57ae7 100644
--- a/roles/openshift_node/tasks/storage_plugins/glusterfs.yml
+++ b/roles/openshift_node/tasks/storage_plugins/glusterfs.yml
@@ -3,7 +3,7 @@
package: name=glusterfs-fuse state=present
when: not openshift.common.is_atomic | bool
register: result
- until: result | success
+ until: result is succeeded
- name: Check for existence of fusefs sebooleans
command: getsebool {{ item }}
@@ -31,7 +31,7 @@
# since getsebool prints the resolved name. (At some point Ansible's seboolean module
# should learn to deal with aliases)
- item.item in item.stdout # Boolean does not have an alias.
- - ansible_python_version | version_compare('3', '<')
+ - ansible_python_version is version_compare('3', '<')
with_items: "{{ fusefs_getsebool_status.results }}"
# Workaround for https://github.com/openshift/openshift-ansible/issues/4438
@@ -52,5 +52,5 @@
# should learn to deal with aliases)
- item.item in item.stdout # Boolean does not have an alias.
- ('--> off' in item.stdout) # Boolean is currently off.
- - ansible_python_version | version_compare('3', '>=')
+ - ansible_python_version is version_compare('3', '>=')
with_items: "{{ fusefs_getsebool_status.results }}"
diff --git a/roles/openshift_node/tasks/storage_plugins/iscsi.yml b/roles/openshift_node/tasks/storage_plugins/iscsi.yml
index ece68dc71..d3a3668d5 100644
--- a/roles/openshift_node/tasks/storage_plugins/iscsi.yml
+++ b/roles/openshift_node/tasks/storage_plugins/iscsi.yml
@@ -3,4 +3,4 @@
package: name=iscsi-initiator-utils state=present
when: not openshift.common.is_atomic | bool
register: result
- until: result | success
+ until: result is succeeded
diff --git a/roles/openshift_node/tasks/storage_plugins/nfs.yml b/roles/openshift_node/tasks/storage_plugins/nfs.yml
index 5eacf42e8..1484aa076 100644
--- a/roles/openshift_node/tasks/storage_plugins/nfs.yml
+++ b/roles/openshift_node/tasks/storage_plugins/nfs.yml
@@ -3,7 +3,7 @@
package: name=nfs-utils state=present
when: not openshift.common.is_atomic | bool
register: result
- until: result | success
+ until: result is succeeded
- name: Check for existence of nfs sebooleans
command: getsebool {{ item }}
@@ -31,7 +31,7 @@
# since getsebool prints the resolved name. (At some point Ansible's seboolean module
# should learn to deal with aliases)
- item.item in item.stdout # Boolean does not have an alias.
- - ansible_python_version | version_compare('3', '<')
+ - ansible_python_version is version_compare('3', '<')
with_items: "{{ nfs_getsebool_status.results }}"
# Workaround for https://github.com/openshift/openshift-ansible/issues/4438
@@ -52,5 +52,5 @@
# should learn to deal with aliases)
- item.item in item.stdout # Boolean does not have an alias.
- ('--> off' in item.stdout) # Boolean is currently off.
- - ansible_python_version | version_compare('3', '>=')
+ - ansible_python_version is version_compare('3', '>=')
with_items: "{{ nfs_getsebool_status.results }}"
diff --git a/roles/openshift_node/tasks/upgrade.yml b/roles/openshift_node/tasks/upgrade.yml
index 87556533a..f0a013e45 100644
--- a/roles/openshift_node/tasks/upgrade.yml
+++ b/roles/openshift_node/tasks/upgrade.yml
@@ -17,7 +17,7 @@
- name: download docker upgrade rpm
command: "{{ ansible_pkg_mgr }} install -C -y docker{{ '-' + docker_version }}"
register: result
- until: result | success
+ until: result is succeeded
when:
- l_docker_upgrade is defined
- l_docker_upgrade | bool
diff --git a/roles/openshift_node/tasks/upgrade/config_changes.yml b/roles/openshift_node/tasks/upgrade/config_changes.yml
index e22018e6d..439700df6 100644
--- a/roles/openshift_node/tasks/upgrade/config_changes.yml
+++ b/roles/openshift_node/tasks/upgrade/config_changes.yml
@@ -74,4 +74,4 @@
# require a service to be part of the call.
- name: Reload systemd units
command: systemctl daemon-reload
- when: l_node_unit | changed
+ when: l_node_unit is changed
diff --git a/roles/openshift_node/tasks/upgrade/restart.yml b/roles/openshift_node/tasks/upgrade/restart.yml
index 717cfa712..45b0be0a0 100644
--- a/roles/openshift_node/tasks/upgrade/restart.yml
+++ b/roles/openshift_node/tasks/upgrade/restart.yml
@@ -27,7 +27,7 @@
name: "{{ openshift_docker_service_name }}"
state: started
register: docker_start_result
- until: not docker_start_result | failed
+ until: not (docker_start_result is failed)
retries: 3
delay: 30
diff --git a/roles/openshift_node/tasks/upgrade/rpm_upgrade.yml b/roles/openshift_node/tasks/upgrade/rpm_upgrade.yml
index d2864e6b8..cc9a8f2d9 100644
--- a/roles/openshift_node/tasks/upgrade/rpm_upgrade.yml
+++ b/roles/openshift_node/tasks/upgrade/rpm_upgrade.yml
@@ -9,7 +9,7 @@
- name: download new node packages
command: "{{ ansible_pkg_mgr }} install -y --downloadonly {{ openshift_node_upgrade_rpm_list | join(' ')}}"
register: result
- until: result | success
+ until: result is succeeded
vars:
openshift_node_upgrade_rpm_list:
- "{{ openshift_service_type }}-node{{ openshift_pkg_version }}"
@@ -21,4 +21,4 @@
- name: download openvswitch upgrade rpm
command: "{{ ansible_pkg_mgr }} update -y --downloadonly openvswitch"
register: result
- until: result | success
+ until: result is succeeded
diff --git a/roles/openshift_node/tasks/upgrade/rpm_upgrade_install.yml b/roles/openshift_node/tasks/upgrade/rpm_upgrade_install.yml
index 6390be558..32eeb76c6 100644
--- a/roles/openshift_node/tasks/upgrade/rpm_upgrade_install.yml
+++ b/roles/openshift_node/tasks/upgrade/rpm_upgrade_install.yml
@@ -11,7 +11,7 @@
- name: download new node packages
command: "{{ ansible_pkg_mgr }} install -C -y {{ openshift_node_upgrade_rpm_list | join(' ')}}"
register: result
- until: result | success
+ until: result is succeeded
vars:
openshift_node_upgrade_rpm_list:
- "{{ openshift_service_type }}-node{{ openshift_pkg_version }}"
diff --git a/roles/openshift_node/tasks/upgrade/stop_services.yml b/roles/openshift_node/tasks/upgrade/stop_services.yml
index bbf1c5f25..2fff556e5 100644
--- a/roles/openshift_node/tasks/upgrade/stop_services.yml
+++ b/roles/openshift_node/tasks/upgrade/stop_services.yml
@@ -25,7 +25,7 @@
name: docker
state: stopped
register: l_openshift_node_upgrade_docker_stop_result
- until: not l_openshift_node_upgrade_docker_stop_result | failed
+ until: not (l_openshift_node_upgrade_docker_stop_result is failed)
retries: 3
delay: 30
when:
diff --git a/roles/openshift_node/tasks/upgrade_pre.yml b/roles/openshift_node/tasks/upgrade_pre.yml
index 3346b7c65..7f591996c 100644
--- a/roles/openshift_node/tasks/upgrade_pre.yml
+++ b/roles/openshift_node/tasks/upgrade_pre.yml
@@ -10,7 +10,7 @@
- name: update package meta data to speed install later.
command: "{{ ansible_pkg_mgr }} makecache"
register: result
- until: result | success
+ until: result is succeeded
when: not openshift.common.is_containerized | bool
- name: Check Docker image count
@@ -32,7 +32,7 @@
- name: download docker upgrade rpm
command: "{{ ansible_pkg_mgr }} install -y --downloadonly docker{{ '-' + docker_version }}"
register: result
- until: result | success
+ until: result is succeeded
when:
- l_docker_upgrade is defined
- l_docker_upgrade | bool
diff --git a/roles/openshift_node/templates/node.yaml.v1.j2 b/roles/openshift_node/templates/node.yaml.v1.j2
index 3bef1fe6b..b673733df 100644
--- a/roles/openshift_node/templates/node.yaml.v1.j2
+++ b/roles/openshift_node/templates/node.yaml.v1.j2
@@ -20,9 +20,9 @@ kubeletArguments: {{ l2_openshift_node_kubelet_args | default(None) | to_padded_
container-runtime:
- remote
container-runtime-endpoint:
- - /var/run/crio.sock
+ - /var/run/crio/crio.sock
image-service-endpoint:
- - /var/run/crio.sock
+ - /var/run/crio/crio.sock
node-labels:
- router=true
- registry=true
diff --git a/roles/openshift_node_certificates/handlers/main.yml b/roles/openshift_node_certificates/handlers/main.yml
index 0686ac101..3531e30b8 100644
--- a/roles/openshift_node_certificates/handlers/main.yml
+++ b/roles/openshift_node_certificates/handlers/main.yml
@@ -22,6 +22,6 @@
state: restarted
when: not openshift_certificates_redeploy | default(false) | bool
register: l_docker_restart_docker_in_cert_result
- until: not l_docker_restart_docker_in_cert_result | failed
+ until: not (l_docker_restart_docker_in_cert_result is failed)
retries: 3
delay: 30
diff --git a/roles/openshift_openstack/tasks/container-storage-setup.yml b/roles/openshift_openstack/tasks/container-storage-setup.yml
index 82307b208..be73d18be 100644
--- a/roles/openshift_openstack/tasks/container-storage-setup.yml
+++ b/roles/openshift_openstack/tasks/container-storage-setup.yml
@@ -8,7 +8,7 @@
group: root
mode: 0644
when:
- - ansible_distribution_version | version_compare('7.4', '>=')
+ - ansible_distribution_version is version_compare('7.4', '>=')
- ansible_distribution == "RedHat"
- block:
@@ -20,7 +20,7 @@
group: root
mode: 0644
when:
- - ansible_distribution_version | version_compare('7.4', '<')
+ - ansible_distribution_version is version_compare('7.4', '<')
- ansible_distribution == "RedHat"
- block:
diff --git a/roles/openshift_openstack/tasks/node-packages.yml b/roles/openshift_openstack/tasks/node-packages.yml
index e41104af1..c95c9e607 100644
--- a/roles/openshift_openstack/tasks/node-packages.yml
+++ b/roles/openshift_openstack/tasks/node-packages.yml
@@ -7,7 +7,7 @@
state: latest
with_items: "{{ openshift_openstack_required_packages }}"
register: result
- until: result | success
+ until: result is succeeded
- name: Install debug packages (optional)
yum:
@@ -16,4 +16,4 @@
with_items: "{{ openshift_openstack_debug_packages }}"
when: openshift_openstack_install_debug_packages|bool
register: result
- until: result | success
+ until: result is succeeded
diff --git a/roles/openshift_openstack/tasks/populate-dns.yml b/roles/openshift_openstack/tasks/populate-dns.yml
index cf2ead5c3..858dd9e57 100644
--- a/roles/openshift_openstack/tasks/populate-dns.yml
+++ b/roles/openshift_openstack/tasks/populate-dns.yml
@@ -116,6 +116,6 @@
- "{{ openshift_openstack_dns_records_add | default([]) }}"
- entries
register: nsupdate_add_result
- until: nsupdate_add_result|succeeded
+ until: nsupdate_add_result is succeeded
retries: 10
delay: 1
diff --git a/roles/openshift_repos/tasks/main.yaml b/roles/openshift_repos/tasks/main.yaml
index 5e7bde1e1..35206049f 100644
--- a/roles/openshift_repos/tasks/main.yaml
+++ b/roles/openshift_repos/tasks/main.yaml
@@ -10,7 +10,7 @@
- name: Ensure libselinux-python is installed
package: name=libselinux-python state=present
register: result
- until: result | success
+ until: result is succeeded
- name: Remove openshift_additional.repo file
file:
@@ -37,6 +37,13 @@
- when: r_openshift_repos_has_run is not defined
block:
+ - include_tasks: rhel_repos.yml
+ when:
+ - ansible_distribution == 'RedHat'
+ - deployment_type == 'openshift-enterprise'
+ - rhsub_user is defined
+ - rhsub_pass is defined
+
- include_tasks: centos_repos.yml
when:
- ansible_os_family == "RedHat"
diff --git a/roles/openshift_repos/tasks/rhel_repos.yml b/roles/openshift_repos/tasks/rhel_repos.yml
new file mode 100644
index 000000000..c384cbe9a
--- /dev/null
+++ b/roles/openshift_repos/tasks/rhel_repos.yml
@@ -0,0 +1,34 @@
+---
+- name: Ensure RHEL rhui repositories are disabled
+ command: bash -c "yum -q --noplugins repolist | grep -v 'repo id' | grep 'rhui'"
+ register: repo_rhui
+ changed_when: "repo_rhui.rc != 1"
+ failed_when: repo_rhui.rc == 11
+
+- name: Disable RHEL rhui repositories
+ command: bash -c "yum-config-manager \
+ --disable 'rhui-REGION-client-config-server-7' \
+ --disable 'rhui-REGION-rhel-server-rh-common' \
+ --disable 'rhui-REGION-rhel-server-releases' \
+ --disable 'rhui-REGION-client-config-server-7'"
+ when: repo_rhui.changed
+
+- name: Ensure RHEL repositories are enabled
+ command: bash -c "yum -q --noplugins repolist | grep -v 'repo id' | grep 'Red Hat' | wc -l"
+ register: repo_rhel
+ changed_when: "'4' not in repo_rhel.stdout"
+ failed_when: repo_rhel.rc == 11
+
+- name: Disable all repositories
+ command: bash -c "subscription-manager repos --disable='*'"
+ when: repo_rhel.changed
+
+- name: Enable RHEL repositories
+ command: subscription-manager repos \
+ --enable="rhel-7-server-rpms" \
+ --enable="rhel-7-server-extras-rpms" \
+ --enable="rhel-7-server-ose-{{ (openshift_release | default('')).split('.')[0:2] | join('.') }}-rpms" \
+ --enable="rhel-7-fast-datapath-rpms"
+ register: subscribe_repos
+ until: subscribe_repos | succeeded
+ when: repo_rhel.changed
diff --git a/roles/openshift_sanitize_inventory/tasks/main.yml b/roles/openshift_sanitize_inventory/tasks/main.yml
index 77428272c..651d896cf 100644
--- a/roles/openshift_sanitize_inventory/tasks/main.yml
+++ b/roles/openshift_sanitize_inventory/tasks/main.yml
@@ -47,7 +47,7 @@
- name: Abort when openshift_release is invalid
when:
- openshift_release is defined
- - not openshift_release | match('^\d+(\.\d+){1,3}$')
+ - not (openshift_release is match('^\d+(\.\d+){1,3}$'))
fail:
msg: |-
openshift_release is "{{ openshift_release }}" which is not a valid version string.
diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml
index 315bc5614..d11023a39 100644
--- a/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml
+++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml
@@ -5,7 +5,7 @@
- not openshift.common.is_atomic | bool
- not glusterfs_heketi_is_native | bool
register: result
- until: result | success
+ until: result is succeeded
- name: Verify heketi-cli is installed
shell: "command -v {{ glusterfs_heketi_cli }} >/dev/null 2>&1 || { echo >&2 'ERROR: Make sure heketi-cli is available, then re-run the installer'; exit 1; }"
diff --git a/roles/openshift_storage_glusterfs/tasks/kernel_modules.yml b/roles/openshift_storage_glusterfs/tasks/kernel_modules.yml
index 030fa81c9..3bdfa183f 100644
--- a/roles/openshift_storage_glusterfs/tasks/kernel_modules.yml
+++ b/roles/openshift_storage_glusterfs/tasks/kernel_modules.yml
@@ -9,4 +9,4 @@
systemd:
name: systemd-modules-load.service
state: restarted
- when: km | changed
+ when: km is changed
diff --git a/roles/openshift_storage_nfs/tasks/main.yml b/roles/openshift_storage_nfs/tasks/main.yml
index 55e4024ec..5c043bc14 100644
--- a/roles/openshift_storage_nfs/tasks/main.yml
+++ b/roles/openshift_storage_nfs/tasks/main.yml
@@ -5,7 +5,7 @@
- name: Install nfs-utils
package: name=nfs-utils state=present
register: result
- until: result | success
+ until: result is succeeded
- name: Configure NFS
lineinfile:
@@ -16,7 +16,7 @@
- name: Restart nfs-config
systemd: name=nfs-config state=restarted
- when: nfs_config | changed
+ when: nfs_config is changed
- name: Ensure exports directory exists
file:
@@ -70,4 +70,4 @@
register: start_result
- set_fact:
- nfs_service_status_changed: "{{ start_result | changed }}"
+ nfs_service_status_changed: "{{ start_result is changed }}"
diff --git a/roles/openshift_storage_nfs_lvm/tasks/nfs.yml b/roles/openshift_storage_nfs_lvm/tasks/nfs.yml
index bee786a90..94dc63bd2 100644
--- a/roles/openshift_storage_nfs_lvm/tasks/nfs.yml
+++ b/roles/openshift_storage_nfs_lvm/tasks/nfs.yml
@@ -3,7 +3,7 @@
package: name=nfs-utils state=present
when: not openshift.common.is_containerized | bool
register: result
- until: result | success
+ until: result is succeeded
- name: Start rpcbind
systemd:
diff --git a/roles/openshift_version/tasks/main.yml b/roles/openshift_version/tasks/main.yml
index ae0f68a5b..e50d5371e 100644
--- a/roles/openshift_version/tasks/main.yml
+++ b/roles/openshift_version/tasks/main.yml
@@ -49,7 +49,7 @@
when: openshift.common.deployment_type == 'origin'
assert:
that:
- - "{{ openshift_image_tag|match('(^v?\\d+\\.\\d+\\.\\d+(-[\\w\\-\\.]*)?$)') }}"
+ - "{{ openshift_image_tag is match('(^v?\\d+\\.\\d+\\.\\d+(-[\\w\\-\\.]*)?$)') }}"
msg: |-
openshift_image_tag must be in the format v#.#.#[-optional.#]. Examples: v1.2.3, v3.5.1-alpha.1
You specified openshift_image_tag={{ openshift_image_tag }}
@@ -66,7 +66,7 @@
when: openshift.common.deployment_type == 'openshift-enterprise'
assert:
that:
- - "{{ openshift_image_tag|match('(^v\\d+\\.\\d+(\\.\\d+)*(-\\d+(\\.\\d+)*)?$)') }}"
+ - "{{ openshift_image_tag is match('(^v\\d+\\.\\d+(\\.\\d+)*(-\\d+(\\.\\d+)*)?$)') }}"
msg: |-
openshift_image_tag must be in the format v#.#[.#[.#]]. Examples: v1.2, v3.4.1, v3.5.1.3,
v3.5.1.3.4, v1.2-1, v1.2.3-4, v1.2.3-4.5, v1.2.3-4.5.6
diff --git a/roles/os_firewall/tasks/firewalld.yml b/roles/os_firewall/tasks/firewalld.yml
index 1e27ebaf9..4eae31596 100644
--- a/roles/os_firewall/tasks/firewalld.yml
+++ b/roles/os_firewall/tasks/firewalld.yml
@@ -9,7 +9,7 @@
name: firewalld
state: present
register: result
- until: result | success
+ until: result is succeeded
- name: Ensure iptables services are not enabled
systemd:
@@ -21,12 +21,14 @@
- iptables
- ip6tables
register: task_result
- failed_when: task_result|failed and 'could not' not in task_result.msg|lower
+ failed_when:
+ - task_result is failed
+ - ('could not' not in task_result.msg|lower)
- name: Wait 10 seconds after disabling iptables
pause:
seconds: 10
- when: task_result | changed
+ when: task_result is changed
- name: Start and enable firewalld service
systemd:
@@ -40,13 +42,13 @@
- name: need to pause here, otherwise the firewalld service starting can sometimes cause ssh to fail
pause:
seconds: 10
- when: result | changed
+ when: result is changed
- name: Restart polkitd
systemd:
name: polkit
state: restarted
- when: result | changed
+ when: result is changed
# Fix suspected race between firewalld and polkit BZ1436964
- name: Wait for polkit action to have been created
diff --git a/roles/os_firewall/tasks/iptables.yml b/roles/os_firewall/tasks/iptables.yml
index a7c13e487..49d658d37 100644
--- a/roles/os_firewall/tasks/iptables.yml
+++ b/roles/os_firewall/tasks/iptables.yml
@@ -7,12 +7,14 @@
enabled: no
masked: yes
register: task_result
- failed_when: task_result|failed and 'could not' not in task_result.msg|lower
+ failed_when:
+ - task_result is failed
+ - ('could not' not in task_result.msg|lower)
- name: Wait 10 seconds after disabling firewalld
pause:
seconds: 10
- when: task_result | changed
+ when: task_result is changed
- name: Install iptables packages
package:
@@ -23,7 +25,7 @@
- iptables-services
when: not r_os_firewall_is_atomic | bool
register: result
- until: result | success
+ until: result is succeeded
- name: Start and enable iptables service
systemd:
@@ -40,4 +42,4 @@
- name: need to pause here, otherwise the iptables service starting can sometimes cause ssh to fail
pause:
seconds: 10
- when: result | changed
+ when: result is changed
diff --git a/roles/os_update_latest/tasks/main.yml b/roles/os_update_latest/tasks/main.yml
index 60d665587..9d8ec7887 100644
--- a/roles/os_update_latest/tasks/main.yml
+++ b/roles/os_update_latest/tasks/main.yml
@@ -2,4 +2,4 @@
- name: Update all packages
package: name=* state=latest
register: result
- until: result | success
+ until: result is succeeded
diff --git a/roles/rhel_subscribe/tasks/enterprise.yml b/roles/rhel_subscribe/tasks/enterprise.yml
deleted file mode 100644
index 8acdfb969..000000000
--- a/roles/rhel_subscribe/tasks/enterprise.yml
+++ /dev/null
@@ -1,18 +0,0 @@
----
-- set_fact:
- openshift_release: "{{ openshift_release[1:] }}"
- when:
- - openshift_release is defined
- - openshift_release[0] == 'v'
-
-- name: Disable all repositories
- command: subscription-manager repos --disable="*"
-
-- name: Enable RHEL repositories
- command: subscription-manager repos \
- --enable="rhel-7-server-rpms" \
- --enable="rhel-7-server-extras-rpms" \
- --enable="rhel-7-server-ose-{{ (openshift_release | default('')).split('.')[0:2] | join('.') }}-rpms" \
- --enable="rhel-7-fast-datapath-rpms"
- register: subscribe_repos
- until: subscribe_repos | succeeded
diff --git a/roles/rhel_subscribe/tasks/main.yml b/roles/rhel_subscribe/tasks/main.yml
index 3466b7e44..e7eb6c572 100644
--- a/roles/rhel_subscribe/tasks/main.yml
+++ b/roles/rhel_subscribe/tasks/main.yml
@@ -1,68 +1,48 @@
---
-# TODO: Enhance redhat_subscription module
-# to make it able to attach to a pool
-# to make it able to enable repositories
-
- fail:
msg: "This role is only supported for Red Hat hosts"
when: ansible_distribution != 'RedHat'
-- fail:
- msg: The rhsub_user variable is required for this role.
- when: rhsub_user is not defined or not rhsub_user
-
-- fail:
- msg: The rhsub_pass variable is required for this role.
- when: rhsub_pass is not defined or not rhsub_pass
-
-- name: Detecting Atomic Host Operating System
- stat:
- path: /run/ostree-booted
- register: ostree_booted
-
-- name: Satellite preparation
- command: "rpm -Uvh http://{{ rhsub_server }}/pub/katello-ca-consumer-latest.noarch.rpm"
- args:
- creates: /etc/rhsm/ca/katello-server-ca.pem
- when: rhsub_server is defined and rhsub_server
-
- name: Install Red Hat Subscription manager
yum:
name: subscription-manager
state: present
register: result
- until: result | success
+ until: result is succeeded
+
+- name: Is host already registered?
+ command: "subscription-manager version"
+ register: rh_subscribed
+ changed_when: False
-- name: RedHat subscriptions
+- name: Register host
redhat_subscription:
username: "{{ rhsub_user }}"
password: "{{ rhsub_pass }}"
register: rh_subscription
- until: rh_subscription | succeeded
+ until: rh_subscription is succeeded
+ when:
+ - "'not registered' in rh_subscribed.stdout"
-- name: Retrieve the OpenShift Pool ID
- command: subscription-manager list --available --matches="{{ rhsub_pool }}" --pool-only
- register: openshift_pool_id
- until: openshift_pool_id | succeeded
- changed_when: False
+- fail:
+ msg: 'Unable to register host with Red Hat Subscription Manager'
+ when:
+ - "'not registered' in rh_subscribed.stdout"
+ - rh_subscription.failed
- name: Determine if OpenShift Pool Already Attached
- command: subscription-manager list --consumed --matches="{{ rhsub_pool }}" --pool-only
+ command: "subscription-manager list --consumed --pool-only --matches '*OpenShift*'"
register: openshift_pool_attached
- until: openshift_pool_attached | succeeded
changed_when: False
- when: openshift_pool_id.stdout == ''
-
-- fail:
- msg: "Unable to find pool matching {{ rhsub_pool }} in available or consumed pools"
- when: openshift_pool_id.stdout == '' and openshift_pool_attached is defined and openshift_pool_attached.stdout == ''
+ ignore_errors: yes
- name: Attach to OpenShift Pool
- command: subscription-manager attach --pool {{ openshift_pool_id.stdout_lines[0] }}
- register: subscribe_pool
- until: subscribe_pool | succeeded
- when: openshift_pool_id.stdout != ''
+ command: "subscription-manager attach --pool {{ rhsub_pool }}"
+ register: openshift_pool_attached
+ changed_when: "'Successfully attached a subscription' in openshift_pool_attached.stdout"
+ when: rhsub_pool not in openshift_pool_attached.stdout
-- include_tasks: enterprise.yml
+- include_tasks: satellite.yml
when:
- - not ostree_booted.stat.exists | bool
+ - rhsub_server is defined
+ - rhsub_server
diff --git a/roles/rhel_subscribe/tasks/satellite.yml b/roles/rhel_subscribe/tasks/satellite.yml
new file mode 100644
index 000000000..dadbe3487
--- /dev/null
+++ b/roles/rhel_subscribe/tasks/satellite.yml
@@ -0,0 +1,5 @@
+---
+- name: Satellite preparation
+ command: "rpm -Uvh http://{{ rhsub_server }}/pub/katello-ca-consumer-latest.noarch.rpm"
+ args:
+ creates: /etc/rhsm/ca/katello-server-ca.pem
diff --git a/roles/template_service_broker/defaults/main.yml b/roles/template_service_broker/defaults/main.yml
index 421b4ecf9..c32872d24 100644
--- a/roles/template_service_broker/defaults/main.yml
+++ b/roles/template_service_broker/defaults/main.yml
@@ -3,3 +3,4 @@
template_service_broker_remove: False
template_service_broker_install: True
openshift_template_service_broker_namespaces: ['openshift']
+template_service_broker_selector: { "region": "infra" }
diff --git a/roles/template_service_broker/tasks/install.yml b/roles/template_service_broker/tasks/install.yml
index 99a58baff..1253c1133 100644
--- a/roles/template_service_broker/tasks/install.yml
+++ b/roles/template_service_broker/tasks/install.yml
@@ -15,6 +15,8 @@
- oc_project:
name: openshift-template-service-broker
state: present
+ node_selector:
+ - ""
- command: mktemp -d /tmp/tsb-ansible-XXXXXX
register: mktemp
@@ -45,6 +47,7 @@
{{ openshift.common.client_binary }} process -f "{{ mktemp.stdout }}/{{ __tsb_template_file }}"
--param API_SERVER_CONFIG="{{ config['content'] | b64decode }}"
--param IMAGE="{{ template_service_broker_prefix }}{{ template_service_broker_image_name }}:{{ template_service_broker_version }}"
+ --param NODE_SELECTOR={{ template_service_broker_selector | to_json | quote }}
| {{ openshift.common.client_binary }} apply -f -
# reconcile with rbac