summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/config.yml4
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/version_override.yml29
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml75
-rw-r--r--playbooks/openshift-master/private/validate_restart.yml6
-rw-r--r--roles/openshift_logging/tasks/install_logging.yaml2
-rw-r--r--roles/openshift_logging_elasticsearch/tasks/main.yaml21
-rw-r--r--roles/openshift_logging_elasticsearch/tasks/restart_cluster.yml7
-rw-r--r--roles/openshift_logging_elasticsearch/tasks/restart_es_node.yml9
-rw-r--r--roles/openshift_version/defaults/main.yml4
-rw-r--r--roles/openshift_version/tasks/first_master.yml26
-rw-r--r--roles/openshift_version/tasks/first_master_containerized_version.yml6
-rw-r--r--roles/openshift_version/tasks/first_master_rpm_version.yml9
-rw-r--r--roles/openshift_version/tasks/masters_and_nodes.yml5
13 files changed, 132 insertions, 71 deletions
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/config.yml b/playbooks/common/openshift-cluster/upgrades/pre/config.yml
index c027fc8f5..7bf1496cb 100644
--- a/playbooks/common/openshift-cluster/upgrades/pre/config.yml
+++ b/playbooks/common/openshift-cluster/upgrades/pre/config.yml
@@ -51,6 +51,10 @@
# l_openshift_version_set_hosts is passed via upgrade_control_plane.yml
# l_openshift_version_check_hosts is passed via upgrade_control_plane.yml
+# version_override will set various version-related variables during a double upgrade.
+- import_playbook: version_override.yml
+ when: l_double_upgrade_cp | default(False)
+
- import_playbook: verify_cluster.yml
# If we're only upgrading nodes, we need to ensure masters are already upgraded
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/version_override.yml b/playbooks/common/openshift-cluster/upgrades/pre/version_override.yml
new file mode 100644
index 000000000..b2954397f
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/pre/version_override.yml
@@ -0,0 +1,29 @@
+---
+# This playbook overrides normal version setting during double upgrades.
+
+- name: Set proper version values for upgrade
+ hosts: "{{ l_version_override_hosts | default('all:!all') }}"
+ tasks:
+ - set_fact:
+ # All of these will either have been set by openshift_version or
+ # provided by the user; we need to save these for later.
+ l_double_upgrade_saved_version: "{{ openshift_version }}"
+ l_double_upgrade_saved_release: "{{ openshift_release | default(openshift_upgrade_target) }}"
+ l_double_upgrade_saved_tag: "{{ openshift_image_tag }}"
+ l_double_upgrade_saved_pkgv: "{{ openshift_pkg_version }}"
+ - set_fact:
+ # We already ran openshift_version for the second of two upgrades;
+ # here we need to set some variables to enable the first upgrade.
+ # openshift_version, openshift_image_tag, and openshift_pkg_version
+ # will be modified by openshift_version; we want to ensure these
+ # are initially set to first versions to ensure no accidental usage of
+ # second versions (eg, 3.8 and 3.9 respectively) are used.
+ l_double_upgrade_cp_reset_version: True
+ openshift_version: "{{ l_double_upgrade_first_version }}"
+ openshift_release: "{{ l_double_upgrade_first_release }}"
+ openshift_upgrade_target: '3.8'
+ openshift_upgrade_min: '3.7'
+
+# Now that we have force-set a different version, we need to update a few things
+# to ensure we have settings that actually match what's in repos/registries.
+- import_playbook: ../../../../init/version.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml
index 9c7677f1b..c21862dea 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml
@@ -17,32 +17,32 @@
l_init_fact_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
l_base_packages_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
-## Check to see if they're running 3.7 and if so upgrade them to 3.8 on control plan
-## If they've specified pkg_version or image_tag preserve that for later use
-- name: Configure the upgrade target for the common upgrade tasks 3.8
+- name: Configure the initial upgrade target for the common upgrade tasks
hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
tasks:
- set_fact:
- openshift_upgrade_target: '3.8'
+ # We use 3.9 here so when we run openshift_version we can get
+ # correct values for 3.9, 3.8 we will hard-code the values in
+ # ../pre/version_override.yml, if necessary.
+ openshift_upgrade_target: '3.9'
openshift_upgrade_min: '3.7'
- openshift_release: '3.8'
- _requested_pkg_version: "{{ openshift_pkg_version if openshift_pkg_version is defined else omit }}"
- openshift_pkg_version: ''
- _requested_image_tag: "{{ openshift_image_tag if openshift_image_tag is defined else omit }}"
+
+## Check to see if we need to double upgrade (3.7 -> 3.8 -> 3.9)
+- name: Configure variables for double upgrade
+ hosts: oo_masters_to_config:oo_etcd_to_config
+ tasks:
+ - set_fact:
l_double_upgrade_cp: True
+ l_version_override_hosts: "oo_masters_to_config:oo_etcd_to_config"
+ l_double_upgrade_first_version: "3.8"
+ l_double_upgrade_first_release: "3.8"
when: hostvars[groups.oo_first_master.0].openshift_currently_installed_version | version_compare('3.8','<')
- - name: set l_force_image_tag_to_version = True
- set_fact:
- # Need to set this during 3.8 upgrade to ensure image_tag is set correctly
- # to match 3.8 version
- l_force_image_tag_to_version: True
- when: _requested_image_tag is defined
-
- import_playbook: ../pre/config.yml
# These vars a meant to exclude oo_nodes from plays that would otherwise include
# them by default.
vars:
+ l_version_override_hosts: "oo_masters_to_config:oo_etcd_to_config"
l_openshift_version_set_hosts: "oo_etcd_to_config:oo_masters_to_config:!oo_first_master"
l_openshift_version_check_hosts: "oo_masters_to_config:!oo_first_master"
l_upgrade_repo_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
@@ -52,46 +52,48 @@
l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_etcd_to_config"
l_upgrade_excluder_hosts: "oo_masters_to_config"
openshift_protect_installed_version: False
- when: hostvars[groups.oo_first_master.0].openshift_currently_installed_version | version_compare('3.8','<')
+ when: l_double_upgrade_cp | default(False)
- name: Flag pre-upgrade checks complete for hosts without errors 3.8
hosts: oo_masters_to_config:oo_etcd_to_config
tasks:
- set_fact:
pre_upgrade_complete: True
- when: hostvars[groups.oo_first_master.0].openshift_currently_installed_version | version_compare('3.8','<')
+ when: l_double_upgrade_cp | default(False)
# Pre-upgrade completed
- name: Intermediate 3.8 Upgrade
import_playbook: ../upgrade_control_plane.yml
- when: hostvars[groups.oo_first_master.0].openshift_currently_installed_version | version_compare('3.8','<')
+ when: l_double_upgrade_cp | default(False)
+
+- name: Restore 3.9 version variables
+ hosts: oo_masters_to_config:oo_etcd_to_config
+ tasks:
+ - set_fact:
+ # all:!all == 0 hosts
+ l_version_override_hosts: "all:!all"
+ openshift_version: "{{ l_double_upgrade_saved_version }}"
+ openshift_release: "{{ l_double_upgrade_saved_release }}"
+ openshift_image_tag: "{{ l_double_upgrade_saved_tag }}"
+ openshift_pkg_version: "{{ l_double_upgrade_saved_pkgv }}"
+ when: l_double_upgrade_cp | default(False)
## 3.8 upgrade complete we should now be able to upgrade to 3.9
+- name: Clear some values now that we're done with double upgrades.
+ hosts: oo_masters_to_config:oo_etcd_to_config
+ tasks:
+ - set_fact:
+ l_double_upgrade_cp: False
+ l_double_upgrade_cp_reset_version: False
-- name: Configure the upgrade target for the common upgrade tasks 3.9
+# We should be on 3.8 at this point, need to set upgrade_target to 3.9
+- name: Configure the upgrade target for second upgrade
hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
tasks:
- - meta: clear_facts
- set_fact:
openshift_upgrade_target: '3.9'
openshift_upgrade_min: '3.8'
- openshift_release: '3.9'
- openshift_pkg_version: "{{ _requested_pkg_version if _requested_pkg_version is defined else '' }}"
- # Set the user's specified image_tag for 3.9 upgrade if it was provided.
- - set_fact:
- openshift_image_tag: "{{ _requested_image_tag }}"
- l_force_image_tag_to_version: False
- when: _requested_image_tag is defined
- # If the user didn't specify an image_tag, we need to force update image_tag
- # because it will have already been set during 3.8. If we aren't running
- # a double upgrade, then we can preserve image_tag because it will still
- # be the user provided value.
- - set_fact:
- l_force_image_tag_to_version: True
- when:
- - l_double_upgrade_cp is defined and l_double_upgrade_cp
- - _requested_image_tag is not defined
- import_playbook: ../pre/config.yml
# These vars a meant to exclude oo_nodes from plays that would otherwise include
@@ -106,7 +108,6 @@
l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_etcd_to_config"
l_upgrade_excluder_hosts: "oo_masters_to_config"
openshift_protect_installed_version: False
- openshift_version_reinit: True
- name: Flag pre-upgrade checks complete for hosts without errors
hosts: oo_masters_to_config:oo_etcd_to_config
diff --git a/playbooks/openshift-master/private/validate_restart.yml b/playbooks/openshift-master/private/validate_restart.yml
index 60b0e5bb6..40aaa653c 100644
--- a/playbooks/openshift-master/private/validate_restart.yml
+++ b/playbooks/openshift-master/private/validate_restart.yml
@@ -33,6 +33,7 @@
- stat: path="{{ hostvars.localhost.mktemp.stdout }}"
register: exists
changed_when: false
+ when: "'stdout' in hostvars.localhost.mktemp"
- name: Cleanup temp file on localhost
hosts: localhost
@@ -41,6 +42,7 @@
tasks:
- file: path="{{ hostvars.localhost.mktemp.stdout }}" state=absent
changed_when: false
+ when: "'stdout' in hostvars.localhost.mktemp"
- name: Warn if restarting the system where ansible is running
hosts: oo_masters_to_config
@@ -54,7 +56,9 @@
must be verified manually. To only restart services, set
openshift_master_rolling_restart_mode=services in host
inventory and relaunch the playbook.
- when: exists.stat.exists and openshift.common.rolling_restart_mode == 'system'
+ when:
+ - "'stat' in exists"
+ - exists.stat.exists and openshift.common.rolling_restart_mode == 'system'
- set_fact:
current_host: "{{ exists.stat.exists }}"
when: openshift.common.rolling_restart_mode == 'system'
diff --git a/roles/openshift_logging/tasks/install_logging.yaml b/roles/openshift_logging/tasks/install_logging.yaml
index 9fabc5826..66dd2f5a3 100644
--- a/roles/openshift_logging/tasks/install_logging.yaml
+++ b/roles/openshift_logging/tasks/install_logging.yaml
@@ -131,6 +131,7 @@
openshift_logging_elasticsearch_storage_type: "{{ elasticsearch_storage_type | default(default_elasticsearch_storage_type) }}"
openshift_logging_elasticsearch_pvc_pv_selector: "{{ openshift_logging_es_pv_selector }}"
openshift_logging_elasticsearch_pvc_storage_class_name: "{{ openshift_logging_es_pvc_storage_class_name | default() }}"
+ __logging_scale_up: True
with_sequence: count={{ openshift_logging_es_cluster_size | int - openshift_logging_facts.elasticsearch.deploymentconfigs.keys() | count }}
loop_control:
@@ -221,6 +222,7 @@
openshift_logging_es_hostname: "{{ openshift_logging_es_ops_hostname }}"
openshift_logging_es_edge_term_policy: "{{ openshift_logging_es_ops_edge_term_policy | default('') }}"
openshift_logging_es_allow_external: "{{ openshift_logging_es_ops_allow_external }}"
+ __logging_ops_scale_up: True
with_sequence: count={{ openshift_logging_es_ops_cluster_size | int - openshift_logging_facts.elasticsearch_ops.deploymentconfigs.keys() | count }}
loop_control:
diff --git a/roles/openshift_logging_elasticsearch/tasks/main.yaml b/roles/openshift_logging_elasticsearch/tasks/main.yaml
index 64e5a3a1f..441460b2d 100644
--- a/roles/openshift_logging_elasticsearch/tasks/main.yaml
+++ b/roles/openshift_logging_elasticsearch/tasks/main.yaml
@@ -138,15 +138,22 @@
- "prometheus_out.stderr | length > 0"
- "'already exists' not in prometheus_out.stderr"
-- set_fact:
- _logging_metrics_proxy_passwd: "{{ 16 | lib_utils_oo_random_word | b64encode }}"
+- name: Checking for passwd.yml
+ stat: path="{{ generated_certs_dir }}/passwd.yml"
+ register: passwd_file
+ check_mode: no
-- template:
+- when: not passwd_file.stat.exists
+ template:
src: passwd.j2
- dest: "{{mktemp.stdout}}/passwd.yml"
+ dest: "{{ generated_certs_dir }}/passwd.yml"
vars:
logging_user_name: "{{ openshift_logging_elasticsearch_prometheus_sa }}"
- logging_user_passwd: "{{ _logging_metrics_proxy_passwd }}"
+ logging_user_passwd: "{{ 16 | lib_utils_oo_random_word | b64encode }}"
+
+- slurp:
+ src: "{{ generated_certs_dir }}/passwd.yml"
+ register: _logging_metrics_proxy_passwd
# View role and binding
- name: Generate logging-elasticsearch-view-role
@@ -296,7 +303,7 @@
- name: admin.jks
path: "{{ generated_certs_dir }}/system.admin.jks"
- name: passwd.yml
- path: "{{mktemp.stdout}}/passwd.yml"
+ path: "{{ generated_certs_dir }}/passwd.yml"
# services
- name: Set logging-{{ es_component }}-cluster service
@@ -433,7 +440,7 @@
es_container_security_context: "{{ _es_containers.elasticsearch.securityContext if _es_containers is defined and 'elasticsearch' in _es_containers and 'securityContext' in _es_containers.elasticsearch else None }}"
deploy_type: "{{ openshift_logging_elasticsearch_deployment_type }}"
es_replicas: 1
- basic_auth_passwd: "{{ _logging_metrics_proxy_passwd | b64decode }}"
+ basic_auth_passwd: "{{ ( _logging_metrics_proxy_passwd['content'] | b64decode | from_yaml )[openshift_logging_elasticsearch_prometheus_sa]['passwd'] }}"
es_number_of_shards: "{{ openshift_logging_es_number_of_shards | default(1) }}"
es_number_of_replicas: "{{ openshift_logging_es_number_of_replicas| default(0) }}"
diff --git a/roles/openshift_logging_elasticsearch/tasks/restart_cluster.yml b/roles/openshift_logging_elasticsearch/tasks/restart_cluster.yml
index 14f2313e1..01247dd5d 100644
--- a/roles/openshift_logging_elasticsearch/tasks/restart_cluster.yml
+++ b/roles/openshift_logging_elasticsearch/tasks/restart_cluster.yml
@@ -65,6 +65,12 @@
{{ openshift_client_binary }} get dc -l component={{ _cluster_component }},provider=openshift -n {{ openshift_logging_elasticsearch_namespace }} -o jsonpath={.items[*].metadata.name}
register: _cluster_dcs
+ # If we are currently restarting the "es" cluster we want to check if we are scaling up the number of es nodes
+ # If we are currently restarting the "es-ops" cluster we want to check if we are scaling up the number of ops nodes
+ # If we've created a new node for that cluster then the appropriate variable will be true, otherwise we default to false
+ - set_fact:
+ _skip_healthcheck: "{{ __logging_scale_up | default(false) if _cluster_component == 'es' else __logging_ops_scale_up | default(false) }}"
+
## restart all dcs for full restart
- name: "Restart ES node {{ _es_node }}"
include_tasks: restart_es_node.yml
@@ -94,6 +100,7 @@
{{ openshift_client_binary }} exec {{ _cluster_pods.stdout.split(' ')[0] }} -c elasticsearch -n {{ openshift_logging_elasticsearch_namespace }} -- {{ __es_local_curl }} -XPUT 'https://localhost:9200/_cluster/settings' -d '{ "transient": { "cluster.routing.allocation.enable" : "all" } }'
register: _enable_output
changed_when: "'\"acknowledged\":true' in _enable_output.stdout"
+ when: _cluster_pods.stdout != ""
# Reenable external communication for {{ _cluster_component }}
- name: Reenable external communication for logging-{{ _cluster_component }}
diff --git a/roles/openshift_logging_elasticsearch/tasks/restart_es_node.yml b/roles/openshift_logging_elasticsearch/tasks/restart_es_node.yml
index a1e172168..934ab886b 100644
--- a/roles/openshift_logging_elasticsearch/tasks/restart_es_node.yml
+++ b/roles/openshift_logging_elasticsearch/tasks/restart_es_node.yml
@@ -3,7 +3,8 @@
command: >
{{ openshift_client_binary }} rollout latest {{ _es_node }} -n {{ openshift_logging_elasticsearch_namespace }}
-- name: "Waiting for {{ _es_node }} to finish scaling up"
+- when: not _skip_healthcheck | bool
+ name: "Waiting for {{ _es_node }} to finish scaling up"
oc_obj:
state: list
name: "{{ _es_node }}"
@@ -19,12 +20,14 @@
retries: 60
delay: 30
-- name: Gettings name(s) of replica pod(s)
+- when: not _skip_healthcheck | bool
+ name: Gettings name(s) of replica pod(s)
command: >
{{ openshift_client_binary }} get pods -l deploymentconfig={{ _es_node }} -n {{ openshift_logging_elasticsearch_namespace }} -o jsonpath={.items[*].metadata.name}
register: _pods
-- name: "Waiting for ES to be ready for {{ _es_node }}"
+- when: not _skip_healthcheck | bool
+ name: "Waiting for ES to be ready for {{ _es_node }}"
shell: >
{{ openshift_client_binary }} exec "{{ _pod }}" -c elasticsearch -n "{{ openshift_logging_elasticsearch_namespace }}" -- es_cluster_health
with_items: "{{ _pods.stdout.split(' ') }}"
diff --git a/roles/openshift_version/defaults/main.yml b/roles/openshift_version/defaults/main.yml
index 513dff045..c807df9d3 100644
--- a/roles/openshift_version/defaults/main.yml
+++ b/roles/openshift_version/defaults/main.yml
@@ -10,4 +10,6 @@ openshift_service_type: "{{ openshift_service_type_dict[openshift_deployment_typ
openshift_use_crio_only: False
l_first_master_version_task_file: "{{ openshift_is_containerized | ternary('first_master_containerized_version.yml', 'first_master_rpm_version.yml') }}"
-l_force_image_tag_to_version: False
+
+# Used during double control plane upgrades.
+l_double_upgrade_cp_reset_version: False
diff --git a/roles/openshift_version/tasks/first_master.yml b/roles/openshift_version/tasks/first_master.yml
index b0d155c2c..022ac30fc 100644
--- a/roles/openshift_version/tasks/first_master.yml
+++ b/roles/openshift_version/tasks/first_master.yml
@@ -13,20 +13,30 @@
- include_tasks: "{{ l_first_master_version_task_file }}"
+# When double upgrade is in process, we want to set everything to match
+# openshift_verison.
- block:
- debug:
msg: "openshift_pkg_version was not defined. Falling back to -{{ openshift_version }}"
- set_fact:
- openshift_pkg_version: -{{ openshift_version }}
+ openshift_pkg_version: "-{{ openshift_version }}"
when:
- - openshift_pkg_version is not defined or openshift_pkg_version == ""
- - openshift_upgrade_target is not defined
+ - openshift_pkg_version is not defined or l_double_upgrade_cp_reset_version
+# When double upgrade is in process, we want to set everything to match
+# openshift_verison.
- block:
- debug:
- msg: "openshift_image_tag set to v{{ openshift_version }}"
+ msg: "openshift_image_tag was not defined. Falling back to v{{ openshift_version }}"
- set_fact:
- openshift_image_tag: v{{ openshift_version }}
- when: >
- openshift_image_tag is not defined or openshift_image_tag == ""
- or l_force_image_tag_to_version | bool
+ openshift_image_tag: "v{{ openshift_version }}"
+ when: openshift_image_tag is not defined or l_double_upgrade_cp_reset_version
+
+# The end result of these three variables is quite important so make sure they are displayed and logged:
+- debug: var=openshift_release
+
+- debug: var=openshift_image_tag
+
+- debug: var=openshift_pkg_version
+
+- debug: var=openshift_version
diff --git a/roles/openshift_version/tasks/first_master_containerized_version.yml b/roles/openshift_version/tasks/first_master_containerized_version.yml
index 9eb38cb2b..e02a75eab 100644
--- a/roles/openshift_version/tasks/first_master_containerized_version.yml
+++ b/roles/openshift_version/tasks/first_master_containerized_version.yml
@@ -6,9 +6,7 @@
openshift_version: "{{ openshift_image_tag[1:].split('-')[0] if openshift_image_tag != 'latest' else openshift_image_tag }}"
when:
- openshift_image_tag is defined
- - openshift_image_tag != ""
- openshift_version is not defined
- - not (openshift_version_reinit | default(false))
- name: Set containerized version to configure if openshift_release specified
set_fact:
@@ -22,7 +20,7 @@
docker run --rm {{ openshift_cli_image }}:latest version
register: cli_image_version
when:
- - openshift_version is not defined or openshift_version_reinit | default(false)
+ - openshift_version is not defined
- not openshift_use_crio_only
# Origin latest = pre-release version (i.e. v1.3.0-alpha.1-321-gb095e3a)
@@ -36,7 +34,7 @@
- set_fact:
openshift_version: "{{ cli_image_version.stdout_lines[0].split(' ')[1].split('-')[0][1:] }}"
- when: openshift_version is not defined or openshift_version_reinit | default(false)
+ when: openshift_version is not defined
# If we got an openshift_version like "3.2", lookup the latest 3.2 container version
# and use that value instead.
diff --git a/roles/openshift_version/tasks/first_master_rpm_version.yml b/roles/openshift_version/tasks/first_master_rpm_version.yml
index 85e440513..9a5f0c568 100644
--- a/roles/openshift_version/tasks/first_master_rpm_version.yml
+++ b/roles/openshift_version/tasks/first_master_rpm_version.yml
@@ -5,17 +5,14 @@
openshift_version: "{{ openshift_pkg_version[1:].split('-')[0] }}"
when:
- openshift_pkg_version is defined
- - openshift_pkg_version != ""
- openshift_version is not defined
- - not (openshift_version_reinit | default(false))
# These tasks should only be run against masters and nodes
- name: Set openshift_version for rpm installation
include_tasks: check_available_rpms.yml
+# If double upgrade is in process, we want to set openshift_version to whatever
+# rpm package is available.
- set_fact:
openshift_version: "{{ rpm_results.results.versions.available_versions.0 }}"
- when: openshift_version is not defined or ( openshift_version_reinit | default(false) )
-- set_fact:
- openshift_pkg_version: "-{{ rpm_results.results.versions.available_versions.0 }}"
- when: openshift_version_reinit | default(false)
+ when: openshift_version is not defined or l_double_upgrade_cp_reset_version
diff --git a/roles/openshift_version/tasks/masters_and_nodes.yml b/roles/openshift_version/tasks/masters_and_nodes.yml
index eddd5ff42..c4dbc2a5f 100644
--- a/roles/openshift_version/tasks/masters_and_nodes.yml
+++ b/roles/openshift_version/tasks/masters_and_nodes.yml
@@ -8,10 +8,7 @@
fail:
msg: "OCP rpm version {{ rpm_results.results.versions.available_versions.0 }} is different from OCP image version {{ openshift_version }}"
# Both versions have the same string representation
- when:
- - openshift_version not in rpm_results.results.versions.available_versions.0
- - openshift_version_reinit | default(false)
-
+ when: rpm_results.results.versions.available_versions.0 != openshift_version
# block when
when: not openshift_is_atomic | bool