From 2a830f458e6512c3b6305934a650ba23d234a8ac Mon Sep 17 00:00:00 2001 From: Devan Goodwin Date: Thu, 1 Sep 2016 14:29:10 -0300 Subject: Support openshift_upgrade_dry_run=true for pre-upgrade checks only. --- playbooks/common/openshift-cluster/upgrades/pre.yml | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'playbooks') diff --git a/playbooks/common/openshift-cluster/upgrades/pre.yml b/playbooks/common/openshift-cluster/upgrades/pre.yml index 42a24eaf8..13f6471b5 100644 --- a/playbooks/common/openshift-cluster/upgrades/pre.yml +++ b/playbooks/common/openshift-cluster/upgrades/pre.yml @@ -309,3 +309,10 @@ - fail: msg: "Upgrade cannot continue. The following hosts did not complete etcd backup: {{ etcd_backup_failed | join(',') }}" when: etcd_backup_failed | length > 0 + +- name: Exit upgrade if dry-run specified + hosts: oo_first_master + tasks: + - fail: + msg: "Pre-upgrade checks completed, exiting due to openshift_upgrade_dry_run variable." + when: openshift_upgrade_dry_run is defined and openshift_upgrade_dry_run | bool -- cgit v1.2.1 From d5545fbc68f0f398c727065faf835faca68e1fc0 Mon Sep 17 00:00:00 2001 From: Devan Goodwin Date: Thu, 1 Sep 2016 14:29:52 -0300 Subject: Stop guarding against pacemaker in upgrade, no longer necessary. --- playbooks/common/openshift-cluster/upgrades/pre.yml | 8 -------- 1 file changed, 8 deletions(-) (limited to 'playbooks') diff --git a/playbooks/common/openshift-cluster/upgrades/pre.yml b/playbooks/common/openshift-cluster/upgrades/pre.yml index 13f6471b5..3a134c802 100644 --- a/playbooks/common/openshift-cluster/upgrades/pre.yml +++ b/playbooks/common/openshift-cluster/upgrades/pre.yml @@ -38,8 +38,6 @@ ############################################################################### - name: Verify upgrade can proceed on first master hosts: oo_first_master - vars: - g_pacemaker_upgrade_url_segment: "{{ 'org/latest' if deployment_type =='origin' else '.com/enterprise/3.1' }}" gather_facts: no tasks: - fail: @@ -48,12 +46,6 @@ deployment types when: deployment_type not in ['atomic-enterprise', 'origin','openshift-enterprise', 'online'] - - fail: - msg: > - This upgrade does not support Pacemaker: - https://docs.openshift.{{ g_pacemaker_upgrade_url_segment }}/install_config/upgrading/pacemaker_to_native_ha.html - when: openshift.master.cluster_method is defined and openshift.master.cluster_method == 'pacemaker' - # Error out in situations where the user has older versions specified in their # inventory in any of the openshift_release, openshift_image_tag, and # openshift_pkg_version variables. These must be removed or updated to proceed -- cgit v1.2.1 From b3d66ad8db005d1e0f8de67eae6471b8f1cb7800 Mon Sep 17 00:00:00 2001 From: Devan Goodwin Date: Thu, 1 Sep 2016 14:30:58 -0300 Subject: Drop atomic-enterprise as a valid deployment type in upgrade. --- playbooks/common/openshift-cluster/upgrades/pre.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'playbooks') diff --git a/playbooks/common/openshift-cluster/upgrades/pre.yml b/playbooks/common/openshift-cluster/upgrades/pre.yml index 3a134c802..6052481c2 100644 --- a/playbooks/common/openshift-cluster/upgrades/pre.yml +++ b/playbooks/common/openshift-cluster/upgrades/pre.yml @@ -42,9 +42,9 @@ tasks: - fail: msg: > - This upgrade is only supported for atomic-enterprise, origin, openshift-enterprise, and online + This upgrade is only supported for origin, openshift-enterprise, and online deployment types - when: deployment_type not in ['atomic-enterprise', 'origin','openshift-enterprise', 'online'] + when: deployment_type not in ['origin','openshift-enterprise', 'online'] # Error out in situations where the user has older versions specified in their # inventory in any of the openshift_release, openshift_image_tag, and -- cgit v1.2.1 From 1ee66d50b7ca6d77f5bc3718cb58f86c622b2125 Mon Sep 17 00:00:00 2001 From: Devan Goodwin Date: Fri, 2 Sep 2016 11:21:11 -0300 Subject: Reunite upgrade reconciliation gating with the play it gates on. --- .../common/openshift-cluster/upgrades/upgrade.yml | 35 +++++++++++----------- 1 file changed, 18 insertions(+), 17 deletions(-) (limited to 'playbooks') diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/upgrade.yml index 8a2784fb4..c4ce5fef6 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade.yml @@ -177,6 +177,24 @@ - set_fact: reconcile_complete: True +############################################################################## +# Gate on reconcile +############################################################################## +- name: Gate on reconcile + hosts: localhost + connection: local + become: no + tasks: + - set_fact: + reconcile_completed: "{{ hostvars + | oo_select_keys(groups.oo_masters_to_config) + | oo_collect('inventory_hostname', {'reconcile_complete': true}) }}" + - set_fact: + reconcile_failed: "{{ groups.oo_masters_to_config | difference(reconcile_completed) }}" + - fail: + msg: "Upgrade cannot continue. The following masters did not finish reconciling: {{ reconcile_failed | join(',') }}" + when: reconcile_failed | length > 0 + ############################################################################### # Upgrade Nodes ############################################################################### @@ -242,20 +260,3 @@ when: inventory_hostname in groups.oo_nodes_to_config and was_schedulable | bool -############################################################################## -# Gate on reconcile -############################################################################## -- name: Gate on reconcile - hosts: localhost - connection: local - become: no - tasks: - - set_fact: - reconcile_completed: "{{ hostvars - | oo_select_keys(groups.oo_masters_to_config) - | oo_collect('inventory_hostname', {'reconcile_complete': true}) }}" - - set_fact: - reconcile_failed: "{{ groups.oo_masters_to_config | difference(reconcile_completed) }}" - - fail: - msg: "Upgrade cannot continue. The following masters did not finish reconciling: {{ reconcile_failed | join(',') }}" - when: reconcile_failed | length > 0 -- cgit v1.2.1 From 0cb963375165f03807818a8e8fa99a52a8244c04 Mon Sep 17 00:00:00 2001 From: Devan Goodwin Date: Mon, 12 Sep 2016 08:47:53 -0300 Subject: Split upgrade entry points into control plane/node. --- .../openshift-cluster/upgrades/v3_3/upgrade.yml | 49 +--------------------- .../upgrades/v3_3/upgrade_masters.yml | 9 ++++ .../upgrades/v3_3/upgrade_nodes.yml | 9 ++++ .../common/openshift-cluster/upgrades/init.yml | 48 +++++++++++++++++++++ .../common/openshift-cluster/upgrades/pre.yml | 2 +- 5 files changed, 68 insertions(+), 49 deletions(-) create mode 100644 playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_masters.yml create mode 100644 playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml create mode 100644 playbooks/common/openshift-cluster/upgrades/init.yml (limited to 'playbooks') diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml index e740b12c0..3a1712000 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml +++ b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml @@ -1,52 +1,5 @@ --- -- include: ../../../../common/openshift-cluster/verify_ansible_version.yml - -- hosts: localhost - connection: local - become: no - gather_facts: no - tasks: - - include_vars: ../../../../byo/openshift-cluster/cluster_hosts.yml - - add_host: - name: "{{ item }}" - groups: l_oo_all_hosts - with_items: g_all_hosts | default([]) - -- hosts: l_oo_all_hosts - gather_facts: no - tasks: - - include_vars: ../../../../byo/openshift-cluster/cluster_hosts.yml - -- include: ../../../../common/openshift-cluster/evaluate_groups.yml - vars: - # Do not allow adding hosts during upgrade. - g_new_master_hosts: [] - g_new_node_hosts: [] - openshift_cluster_id: "{{ cluster_id | default('default') }}" - openshift_deployment_type: "{{ deployment_type }}" - -- name: Set oo_options - hosts: oo_all_hosts - tasks: - - set_fact: - openshift_docker_additional_registries: "{{ lookup('oo_option', 'docker_additional_registries') }}" - when: openshift_docker_additional_registries is not defined - - set_fact: - openshift_docker_insecure_registries: "{{ lookup('oo_option', 'docker_insecure_registries') }}" - when: openshift_docker_insecure_registries is not defined - - set_fact: - openshift_docker_blocked_registries: "{{ lookup('oo_option', 'docker_blocked_registries') }}" - when: openshift_docker_blocked_registries is not defined - - set_fact: - openshift_docker_options: "{{ lookup('oo_option', 'docker_options') }}" - when: openshift_docker_options is not defined - - set_fact: - openshift_docker_log_driver: "{{ lookup('oo_option', 'docker_log_driver') }}" - when: openshift_docker_log_driver is not defined - - set_fact: - openshift_docker_log_options: "{{ lookup('oo_option', 'docker_log_options') }}" - when: openshift_docker_log_options is not defined - +- include: ../../../../common/openshift-cluster/upgrades/init.yml # Configure the upgrade target for the common upgrade tasks: - hosts: l_oo_all_hosts diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_masters.yml b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_masters.yml new file mode 100644 index 000000000..c7d7eb3c6 --- /dev/null +++ b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_masters.yml @@ -0,0 +1,9 @@ +--- +- include: ../../../../common/openshift-cluster/upgrades/init.yml + +# Configure the upgrade target for the common upgrade tasks: +- hosts: l_oo_all_hosts + tasks: + - set_fact: + openshift_upgrade_target: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}" + openshift_upgrade_min: "{{ '1.2' if deployment_type == 'origin' else '3.2' }}" diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml new file mode 100644 index 000000000..c7d7eb3c6 --- /dev/null +++ b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml @@ -0,0 +1,9 @@ +--- +- include: ../../../../common/openshift-cluster/upgrades/init.yml + +# Configure the upgrade target for the common upgrade tasks: +- hosts: l_oo_all_hosts + tasks: + - set_fact: + openshift_upgrade_target: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}" + openshift_upgrade_min: "{{ '1.2' if deployment_type == 'origin' else '3.2' }}" diff --git a/playbooks/common/openshift-cluster/upgrades/init.yml b/playbooks/common/openshift-cluster/upgrades/init.yml new file mode 100644 index 000000000..f3bc70a72 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/init.yml @@ -0,0 +1,48 @@ +--- +- include: ../verify_ansible_version.yml + +- hosts: localhost + connection: local + become: no + gather_facts: no + tasks: + - include_vars: ../../../../byo/openshift-cluster/cluster_hosts.yml + - add_host: + name: "{{ item }}" + groups: l_oo_all_hosts + with_items: g_all_hosts | default([]) + +- hosts: l_oo_all_hosts + gather_facts: no + tasks: + - include_vars: ../../../byo/openshift-cluster/cluster_hosts.yml + +- include: ../evaluate_groups.yml + vars: + # Do not allow adding hosts during upgrade. + g_new_master_hosts: [] + g_new_node_hosts: [] + openshift_cluster_id: "{{ cluster_id | default('default') }}" + openshift_deployment_type: "{{ deployment_type }}" + +- name: Set oo_options + hosts: oo_all_hosts + tasks: + - set_fact: + openshift_docker_additional_registries: "{{ lookup('oo_option', 'docker_additional_registries') }}" + when: openshift_docker_additional_registries is not defined + - set_fact: + openshift_docker_insecure_registries: "{{ lookup('oo_option', 'docker_insecure_registries') }}" + when: openshift_docker_insecure_registries is not defined + - set_fact: + openshift_docker_blocked_registries: "{{ lookup('oo_option', 'docker_blocked_registries') }}" + when: openshift_docker_blocked_registries is not defined + - set_fact: + openshift_docker_options: "{{ lookup('oo_option', 'docker_options') }}" + when: openshift_docker_options is not defined + - set_fact: + openshift_docker_log_driver: "{{ lookup('oo_option', 'docker_log_driver') }}" + when: openshift_docker_log_driver is not defined + - set_fact: + openshift_docker_log_options: "{{ lookup('oo_option', 'docker_log_options') }}" + when: openshift_docker_log_options is not defined diff --git a/playbooks/common/openshift-cluster/upgrades/pre.yml b/playbooks/common/openshift-cluster/upgrades/pre.yml index 6052481c2..2aada4370 100644 --- a/playbooks/common/openshift-cluster/upgrades/pre.yml +++ b/playbooks/common/openshift-cluster/upgrades/pre.yml @@ -73,7 +73,7 @@ valid release for a {{ openshift_upgrade_target }} upgrade when: openshift_release is defined and not openshift_release | version_compare(openshift_upgrade_target ,'=') -- include: ../../../common/openshift-cluster/initialize_openshift_version.yml +- include: ../initialize_openshift_version.yml vars: # Request specific openshift_release and let the openshift_version role handle converting this # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if -- cgit v1.2.1 From cbe003803a4a7509ee7aa1fac776cc618818bb4e Mon Sep 17 00:00:00 2001 From: Devan Goodwin Date: Mon, 12 Sep 2016 13:17:32 -0300 Subject: Attempt to tease apart pre upgrade for masters/nodes. --- .../openshift-cluster/upgrades/v3_3/upgrade.yml | 53 +++- .../upgrades/v3_3/upgrade_masters.yml | 49 ++++ .../upgrades/v3_3/upgrade_nodes.yml | 47 ++++ .../common/openshift-cluster/upgrades/pre.yml | 305 --------------------- .../openshift-cluster/upgrades/pre/backup_etcd.yml | 96 +++++++ .../openshift-cluster/upgrades/pre/gate_checks.yml | 6 + .../upgrades/pre/verify_control_plane_running.yml | 31 +++ .../upgrades/pre/verify_docker_upgrade_targets.yml | 23 ++ .../upgrades/pre/verify_inventory_vars.yml | 37 +++ .../upgrades/pre/verify_nodes_running.yml | 13 + .../upgrades/pre/verify_upgrade_targets.yml | 45 +++ 11 files changed, 398 insertions(+), 307 deletions(-) create mode 100644 playbooks/common/openshift-cluster/upgrades/pre/backup_etcd.yml create mode 100644 playbooks/common/openshift-cluster/upgrades/pre/gate_checks.yml create mode 100644 playbooks/common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml create mode 100644 playbooks/common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml create mode 100644 playbooks/common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml create mode 100644 playbooks/common/openshift-cluster/upgrades/pre/verify_nodes_running.yml create mode 100644 playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml (limited to 'playbooks') diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml index 3a1712000..5a95b5fdb 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml +++ b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml @@ -1,4 +1,7 @@ --- +# +# Full Control Plane + Nodes Upgrade +# - include: ../../../../common/openshift-cluster/upgrades/init.yml # Configure the upgrade target for the common upgrade tasks: @@ -8,13 +11,59 @@ openshift_upgrade_target: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}" openshift_upgrade_min: "{{ '1.2' if deployment_type == 'origin' else '3.2' }}" -- include: ../../../../common/openshift-cluster/upgrades/pre.yml +# Pre-upgrade +- include: ../initialize_facts.yml + +- name: Update repos and initialize facts on all hosts + hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config:oo_lb_to_config + roles: + - openshift_repos + +- name: Set openshift_no_proxy_internal_hostnames + hosts: oo_masters_to_config:oo_nodes_to_config + tasks: + - set_fact: + openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] + | union(groups['oo_masters_to_config']) + | union(groups['oo_etcd_to_config'] | default([]))) + | oo_collect('openshift.common.hostname') | default([]) | join (',') + }}" + when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and + openshift_generate_no_proxy_hosts | default(True) | bool }}" + +- include: ../../../../common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml + +- include: ../initialize_openshift_version.yml vars: - openshift_deployment_type: "{{ deployment_type }}" + # Request specific openshift_release and let the openshift_version role handle converting this + # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if + # defined, and overriding the normal behavior of protecting the installed version + openshift_release: "{{ openshift_upgrade_target }}" + openshift_protect_installed_version: False + # Docker role (a dependency) should be told not to do anything to installed version + # of docker, we handle this separately during upgrade. (the inventory may have a + # docker_version defined, we don't want to actually do it until later) + docker_protect_installed_version: True + +- include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running + +- include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml + +- include: ../../../../common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml + +- include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml + +- include: ../../../../common/openshift-cluster/upgrades/pre/backup_etcd.yml + + # vars: + # openshift_deployment_type: "{{ deployment_type }}" + - include: ../../../../common/openshift-cluster/upgrades/upgrade.yml vars: openshift_deployment_type: "{{ deployment_type }}" master_config_hook: "v3_3/master_config_upgrade.yml" node_config_hook: "v3_3/node_config_upgrade.yml" + - include: ../../../openshift-master/restart.yml + - include: ../../../../common/openshift-cluster/upgrades/post.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_masters.yml b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_masters.yml index c7d7eb3c6..5c2bba209 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_masters.yml +++ b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_masters.yml @@ -1,4 +1,9 @@ --- +# +# Control Plane Upgrade Playbook +# +# Upgrades masters and etcd. +# - include: ../../../../common/openshift-cluster/upgrades/init.yml # Configure the upgrade target for the common upgrade tasks: @@ -7,3 +12,47 @@ - set_fact: openshift_upgrade_target: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}" openshift_upgrade_min: "{{ '1.2' if deployment_type == 'origin' else '3.2' }}" + +# Pre-upgrade +- include: ../initialize_facts.yml + +- name: Update repos and initialize facts on all hosts + hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config + roles: + - openshift_repos + +- name: Set openshift_no_proxy_internal_hostnames + hosts: oo_masters_to_config:oo_nodes_to_config + tasks: + - set_fact: + openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] + | union(groups['oo_masters_to_config']) + | union(groups['oo_etcd_to_config'] | default([]))) + | oo_collect('openshift.common.hostname') | default([]) | join (',') + }}" + when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and + openshift_generate_no_proxy_hosts | default(True) | bool }}" + +- include: ../../../../common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml + +- include: ../initialize_openshift_version.yml + vars: + # Request specific openshift_release and let the openshift_version role handle converting this + # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if + # defined, and overriding the normal behavior of protecting the installed version + openshift_release: "{{ openshift_upgrade_target }}" + openshift_protect_installed_version: False + # Docker role (a dependency) should be told not to do anything to installed version + # of docker, we handle this separately during upgrade. (the inventory may have a + # docker_version defined, we don't want to actually do it until later) + docker_protect_installed_version: True + +- include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running + +- include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml + +- include: ../../../../common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml + +- include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml + +- include: ../../../../common/openshift-cluster/upgrades/pre/backup_etcd.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml index c7d7eb3c6..d9c82d8dc 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml +++ b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml @@ -1,4 +1,9 @@ --- +# +# Node Upgrade Playbook +# +# Upgrades nodes only, but requires the control plane to have already been upgraded. +# - include: ../../../../common/openshift-cluster/upgrades/init.yml # Configure the upgrade target for the common upgrade tasks: @@ -7,3 +12,45 @@ - set_fact: openshift_upgrade_target: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}" openshift_upgrade_min: "{{ '1.2' if deployment_type == 'origin' else '3.2' }}" + +# Pre-upgrade +- include: ../initialize_facts.yml + +- name: Update repos and initialize facts on all hosts + hosts: oo_nodes_to_config + roles: + - openshift_repos + +- name: Set openshift_no_proxy_internal_hostnames + hosts: oo_masters_to_config:oo_nodes_to_config + tasks: + - set_fact: + openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] + | union(groups['oo_masters_to_config']) + | union(groups['oo_etcd_to_config'] | default([]))) + | oo_collect('openshift.common.hostname') | default([]) | join (',') + }}" + when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and + openshift_generate_no_proxy_hosts | default(True) | bool }}" + +- include: ../../../../common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml + +- include: ../initialize_openshift_version.yml + vars: + # Request specific openshift_release and let the openshift_version role handle converting this + # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if + # defined, and overriding the normal behavior of protecting the installed version + openshift_release: "{{ openshift_upgrade_target }}" + openshift_protect_installed_version: False + # Docker role (a dependency) should be told not to do anything to installed version + # of docker, we handle this separately during upgrade. (the inventory may have a + # docker_version defined, we don't want to actually do it until later) + docker_protect_installed_version: True + +- include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running + +- include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml + +- include: ../../../../common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml + +- include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml diff --git a/playbooks/common/openshift-cluster/upgrades/pre.yml b/playbooks/common/openshift-cluster/upgrades/pre.yml index 2aada4370..a2d231c59 100644 --- a/playbooks/common/openshift-cluster/upgrades/pre.yml +++ b/playbooks/common/openshift-cluster/upgrades/pre.yml @@ -1,310 +1,5 @@ --- -############################################################################### -# Evaluate host groups and gather facts -############################################################################### - -- include: ../initialize_facts.yml - -- name: Update repos and initialize facts on all hosts - hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config:oo_lb_to_config - roles: - - openshift_repos - -- name: Set openshift_no_proxy_internal_hostnames - hosts: oo_masters_to_config:oo_nodes_to_config - tasks: - - set_fact: - openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] - | union(groups['oo_masters_to_config']) - | union(groups['oo_etcd_to_config'] | default([]))) - | oo_collect('openshift.common.hostname') | default([]) | join (',') - }}" - when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and - openshift_generate_no_proxy_hosts | default(True) | bool }}" - -- name: Evaluate additional groups for upgrade - hosts: localhost - connection: local - become: no - tasks: - - name: Evaluate etcd_hosts_to_backup - add_host: - name: "{{ item }}" - groups: etcd_hosts_to_backup - with_items: groups.oo_etcd_to_config if groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config | length > 0 else groups.oo_first_master - -############################################################################### -# Pre-upgrade checks -############################################################################### -- name: Verify upgrade can proceed on first master - hosts: oo_first_master - gather_facts: no - tasks: - - fail: - msg: > - This upgrade is only supported for origin, openshift-enterprise, and online - deployment types - when: deployment_type not in ['origin','openshift-enterprise', 'online'] - - # Error out in situations where the user has older versions specified in their - # inventory in any of the openshift_release, openshift_image_tag, and - # openshift_pkg_version variables. These must be removed or updated to proceed - # with upgrade. - # TODO: Should we block if you're *over* the next major release version as well? - - fail: - msg: > - openshift_pkg_version is {{ openshift_pkg_version }} which is not a - valid version for a {{ openshift_upgrade_target }} upgrade - when: openshift_pkg_version is defined and openshift_pkg_version.split('-',1).1 | version_compare(openshift_upgrade_target ,'<') - - - fail: - msg: > - openshift_image_tag is {{ openshift_image_tag }} which is not a - valid version for a {{ openshift_upgrade_target }} upgrade - when: openshift_image_tag is defined and openshift_image_tag.split('v',1).1 | version_compare(openshift_upgrade_target ,'<') - - - set_fact: - openshift_release: "{{ openshift_release[1:] }}" - when: openshift_release is defined and openshift_release[0] == 'v' - - - fail: - msg: > - openshift_release is {{ openshift_release }} which is not a - valid release for a {{ openshift_upgrade_target }} upgrade - when: openshift_release is defined and not openshift_release | version_compare(openshift_upgrade_target ,'=') - -- include: ../initialize_openshift_version.yml - vars: - # Request specific openshift_release and let the openshift_version role handle converting this - # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if - # defined, and overriding the normal behavior of protecting the installed version - openshift_release: "{{ openshift_upgrade_target }}" - openshift_protect_installed_version: False - # Docker role (a dependency) should be told not to do anything to installed version - # of docker, we handle this separately during upgrade. (the inventory may have a - # docker_version defined, we don't want to actually do it until later) - docker_protect_installed_version: True - -- name: Verify master processes - hosts: oo_masters_to_config - roles: - - openshift_facts - tasks: - - openshift_facts: - role: master - local_facts: - ha: "{{ groups.oo_masters_to_config | length > 1 }}" - - - name: Ensure Master is running - service: - name: "{{ openshift.common.service_type }}-master" - state: started - enabled: yes - when: openshift.master.ha is defined and not openshift.master.ha | bool and openshift.common.is_containerized | bool - - - name: Ensure HA Master is running - service: - name: "{{ openshift.common.service_type }}-master-api" - state: started - enabled: yes - when: openshift.master.ha is defined and openshift.master.ha | bool and openshift.common.is_containerized | bool - - - name: Ensure HA Master is running - service: - name: "{{ openshift.common.service_type }}-master-controllers" - state: started - enabled: yes - when: openshift.master.ha is defined and openshift.master.ha | bool and openshift.common.is_containerized | bool - -- name: Verify node processes - hosts: oo_nodes_to_config - roles: - - openshift_facts - - openshift_docker_facts - tasks: - - name: Ensure Node is running - service: - name: "{{ openshift.common.service_type }}-node" - state: started - enabled: yes - when: openshift.common.is_containerized | bool - -- name: Verify upgrade targets - hosts: oo_masters_to_config:oo_nodes_to_config - vars: - openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}" - pre_tasks: - - fail: - msg: Verify OpenShift is already installed - when: openshift.common.version is not defined - - - fail: - msg: Verify the correct version was found - when: verify_upgrade_version is defined and openshift_version != verify_upgrade_version - - - name: Clean package cache - command: "{{ ansible_pkg_mgr }} clean all" - when: not openshift.common.is_atomic | bool - - - set_fact: - g_new_service_name: "{{ 'origin' if deployment_type =='origin' else 'atomic-openshift' }}" - when: not openshift.common.is_containerized | bool - - - name: Verify containers are available for upgrade - command: > - docker pull {{ openshift.common.cli_image }}:{{ openshift_image_tag }} - register: pull_result - changed_when: "'Downloaded newer image' in pull_result.stdout" - when: openshift.common.is_containerized | bool - - - name: Check latest available OpenShift RPM version - command: > - {{ repoquery_cmd }} --qf '%{version}' "{{ openshift.common.service_type }}" - failed_when: false - changed_when: false - register: avail_openshift_version - when: not openshift.common.is_containerized | bool - - - name: Verify OpenShift RPMs are available for upgrade - fail: - msg: "OpenShift {{ avail_openshift_version.stdout }} is available, but {{ openshift_upgrade_target }} or greater is required" - when: not openshift.common.is_containerized | bool and not avail_openshift_version | skipped and avail_openshift_version.stdout | default('0.0', True) | version_compare(openshift_release, '<') - - - fail: - msg: "This upgrade playbook must be run against OpenShift {{ openshift_upgrade_min }} or later" - when: deployment_type == 'origin' and openshift.common.version | version_compare(openshift_upgrade_min,'<') - -- name: Verify docker upgrade targets - hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config - tasks: - # Only check if docker upgrade is required if docker_upgrade is not - # already set to False. - - include: docker/upgrade_check.yml - when: docker_upgrade is not defined or docker_upgrade | bool and not openshift.common.is_atomic | bool - - # Additional checks for Atomic hosts: - - - name: Determine available Docker - shell: "rpm -q --queryformat '---\ncurr_version: %{VERSION}\navail_version: \n' docker" - register: g_atomic_docker_version_result - when: openshift.common.is_atomic | bool - - - set_fact: - l_docker_version: "{{ g_atomic_docker_version_result.stdout | from_yaml }}" - when: openshift.common.is_atomic | bool - - - fail: - msg: This playbook requires access to Docker 1.10 or later - when: openshift.common.is_atomic | bool and l_docker_version.avail_version | default(l_docker_version.curr_version, true) | version_compare('1.10','<') - - - set_fact: - pre_upgrade_complete: True - - -############################################################################## -# Gate on pre-upgrade checks -############################################################################## -- name: Gate on pre-upgrade checks - hosts: localhost - connection: local - become: no - vars: - pre_upgrade_hosts: "{{ groups.oo_masters_to_config | union(groups.oo_nodes_to_config) }}" - tasks: - - set_fact: - pre_upgrade_completed: "{{ hostvars - | oo_select_keys(pre_upgrade_hosts) - | oo_collect('inventory_hostname', {'pre_upgrade_complete': true}) }}" - - set_fact: - pre_upgrade_failed: "{{ pre_upgrade_hosts | difference(pre_upgrade_completed) }}" - - fail: - msg: "Upgrade cannot continue. The following hosts did not complete pre-upgrade checks: {{ pre_upgrade_failed | join(',') }}" - when: pre_upgrade_failed | length > 0 - ############################################################################### # Backup etcd ############################################################################### -- name: Backup etcd - hosts: etcd_hosts_to_backup - vars: - embedded_etcd: "{{ hostvars[groups.oo_first_master.0].openshift.master.embedded_etcd }}" - timestamp: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}" - roles: - - openshift_facts - tasks: - # Ensure we persist the etcd role for this host in openshift_facts - - openshift_facts: - role: etcd - local_facts: {} - when: "'etcd' not in openshift" - - - stat: path=/var/lib/openshift - register: var_lib_openshift - - - stat: path=/var/lib/origin - register: var_lib_origin - - - name: Create origin symlink if necessary - file: src=/var/lib/openshift/ dest=/var/lib/origin state=link - when: var_lib_openshift.stat.exists == True and var_lib_origin.stat.exists == False - - # TODO: replace shell module with command and update later checks - # We assume to be using the data dir for all backups. - - name: Check available disk space for etcd backup - shell: df --output=avail -k {{ openshift.common.data_dir }} | tail -n 1 - register: avail_disk - - # TODO: replace shell module with command and update later checks - - name: Check current embedded etcd disk usage - shell: du -k {{ openshift.etcd.etcd_data_dir }} | tail -n 1 | cut -f1 - register: etcd_disk_usage - when: embedded_etcd | bool - - - name: Abort if insufficient disk space for etcd backup - fail: - msg: > - {{ etcd_disk_usage.stdout }} Kb disk space required for etcd backup, - {{ avail_disk.stdout }} Kb available. - when: (embedded_etcd | bool) and (etcd_disk_usage.stdout|int > avail_disk.stdout|int) - - - name: Install etcd (for etcdctl) - action: "{{ ansible_pkg_mgr }} name=etcd state=latest" - when: not openshift.common.is_atomic | bool - - - name: Generate etcd backup - command: > - etcdctl backup --data-dir={{ openshift.etcd.etcd_data_dir }} - --backup-dir={{ openshift.common.data_dir }}/etcd-backup-{{ timestamp }} - - - set_fact: - etcd_backup_complete: True - - - name: Display location of etcd backup - debug: - msg: "Etcd backup created in {{ openshift.common.data_dir }}/etcd-backup-{{ timestamp }}" - - -############################################################################## -# Gate on etcd backup -############################################################################## -- name: Gate on etcd backup - hosts: localhost - connection: local - become: no - tasks: - - set_fact: - etcd_backup_completed: "{{ hostvars - | oo_select_keys(groups.etcd_hosts_to_backup) - | oo_collect('inventory_hostname', {'etcd_backup_complete': true}) }}" - - set_fact: - etcd_backup_failed: "{{ groups.etcd_hosts_to_backup | difference(etcd_backup_completed) }}" - - fail: - msg: "Upgrade cannot continue. The following hosts did not complete etcd backup: {{ etcd_backup_failed | join(',') }}" - when: etcd_backup_failed | length > 0 -- name: Exit upgrade if dry-run specified - hosts: oo_first_master - tasks: - - fail: - msg: "Pre-upgrade checks completed, exiting due to openshift_upgrade_dry_run variable." - when: openshift_upgrade_dry_run is defined and openshift_upgrade_dry_run | bool diff --git a/playbooks/common/openshift-cluster/upgrades/pre/backup_etcd.yml b/playbooks/common/openshift-cluster/upgrades/pre/backup_etcd.yml new file mode 100644 index 000000000..2af73c467 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/pre/backup_etcd.yml @@ -0,0 +1,96 @@ +--- +- name: Evaluate additional groups for upgrade + hosts: localhost + connection: local + become: no + tasks: + - name: Evaluate etcd_hosts_to_backup + add_host: + name: "{{ item }}" + groups: etcd_hosts_to_backup + with_items: groups.oo_etcd_to_config if groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config | length > 0 else groups.oo_first_master + +- name: Backup etcd + hosts: etcd_hosts_to_backup + vars: + embedded_etcd: "{{ hostvars[groups.oo_first_master.0].openshift.master.embedded_etcd }}" + timestamp: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}" + roles: + - openshift_facts + tasks: + # Ensure we persist the etcd role for this host in openshift_facts + - openshift_facts: + role: etcd + local_facts: {} + when: "'etcd' not in openshift" + + - stat: path=/var/lib/openshift + register: var_lib_openshift + + - stat: path=/var/lib/origin + register: var_lib_origin + + - name: Create origin symlink if necessary + file: src=/var/lib/openshift/ dest=/var/lib/origin state=link + when: var_lib_openshift.stat.exists == True and var_lib_origin.stat.exists == False + + # TODO: replace shell module with command and update later checks + # We assume to be using the data dir for all backups. + - name: Check available disk space for etcd backup + shell: df --output=avail -k {{ openshift.common.data_dir }} | tail -n 1 + register: avail_disk + + # TODO: replace shell module with command and update later checks + - name: Check current embedded etcd disk usage + shell: du -k {{ openshift.etcd.etcd_data_dir }} | tail -n 1 | cut -f1 + register: etcd_disk_usage + when: embedded_etcd | bool + + - name: Abort if insufficient disk space for etcd backup + fail: + msg: > + {{ etcd_disk_usage.stdout }} Kb disk space required for etcd backup, + {{ avail_disk.stdout }} Kb available. + when: (embedded_etcd | bool) and (etcd_disk_usage.stdout|int > avail_disk.stdout|int) + + - name: Install etcd (for etcdctl) + action: "{{ ansible_pkg_mgr }} name=etcd state=latest" + when: not openshift.common.is_atomic | bool + + - name: Generate etcd backup + command: > + etcdctl backup --data-dir={{ openshift.etcd.etcd_data_dir }} + --backup-dir={{ openshift.common.data_dir }}/etcd-backup-{{ timestamp }} + + - set_fact: + etcd_backup_complete: True + + - name: Display location of etcd backup + debug: + msg: "Etcd backup created in {{ openshift.common.data_dir }}/etcd-backup-{{ timestamp }}" + + +############################################################################## +# Gate on etcd backup +############################################################################## +- name: Gate on etcd backup + hosts: localhost + connection: local + become: no + tasks: + - set_fact: + etcd_backup_completed: "{{ hostvars + | oo_select_keys(groups.etcd_hosts_to_backup) + | oo_collect('inventory_hostname', {'etcd_backup_complete': true}) }}" + - set_fact: + etcd_backup_failed: "{{ groups.etcd_hosts_to_backup | difference(etcd_backup_completed) }}" + - fail: + msg: "Upgrade cannot continue. The following hosts did not complete etcd backup: {{ etcd_backup_failed | join(',') }}" + when: etcd_backup_failed | length > 0 + +- name: Exit upgrade if dry-run specified + hosts: oo_first_master + tasks: + - fail: + msg: "Pre-upgrade checks completed, exiting due to openshift_upgrade_dry_run variable." + when: openshift_upgrade_dry_run is defined and openshift_upgrade_dry_run | bool diff --git a/playbooks/common/openshift-cluster/upgrades/pre/gate_checks.yml b/playbooks/common/openshift-cluster/upgrades/pre/gate_checks.yml new file mode 100644 index 000000000..907c1aa87 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/pre/gate_checks.yml @@ -0,0 +1,6 @@ +--- +- name: Flag pre-upgrade checks complete for hosts without errors + hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config + tasks: + - set_fact: + pre_upgrade_complete: True diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml new file mode 100644 index 000000000..06eb5f936 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml @@ -0,0 +1,31 @@ +--- +- name: Verify master processes + hosts: oo_masters_to_config + roles: + - openshift_facts + tasks: + - openshift_facts: + role: master + local_facts: + ha: "{{ groups.oo_masters_to_config | length > 1 }}" + + - name: Ensure Master is running + service: + name: "{{ openshift.common.service_type }}-master" + state: started + enabled: yes + when: openshift.master.ha is defined and not openshift.master.ha | bool and openshift.common.is_containerized | bool + + - name: Ensure HA Master is running + service: + name: "{{ openshift.common.service_type }}-master-api" + state: started + enabled: yes + when: openshift.master.ha is defined and openshift.master.ha | bool and openshift.common.is_containerized | bool + + - name: Ensure HA Master is running + service: + name: "{{ openshift.common.service_type }}-master-controllers" + state: started + enabled: yes + when: openshift.master.ha is defined and openshift.master.ha | bool and openshift.common.is_containerized | bool diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml new file mode 100644 index 000000000..635172de9 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml @@ -0,0 +1,23 @@ +--- +- name: Verify docker upgrade targets + hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config + tasks: + # Only check if docker upgrade is required if docker_upgrade is not + # already set to False. + - include: docker/upgrade_check.yml + when: docker_upgrade is not defined or docker_upgrade | bool and not openshift.common.is_atomic | bool + + # Additional checks for Atomic hosts: + + - name: Determine available Docker + shell: "rpm -q --queryformat '---\ncurr_version: %{VERSION}\navail_version: \n' docker" + register: g_atomic_docker_version_result + when: openshift.common.is_atomic | bool + + - set_fact: + l_docker_version: "{{ g_atomic_docker_version_result.stdout | from_yaml }}" + when: openshift.common.is_atomic | bool + + - fail: + msg: This playbook requires access to Docker 1.10 or later + when: openshift.common.is_atomic | bool and l_docker_version.avail_version | default(l_docker_version.curr_version, true) | version_compare('1.10','<') diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml new file mode 100644 index 000000000..9a959a959 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml @@ -0,0 +1,37 @@ +--- +- name: Verify upgrade can proceed on first master + hosts: oo_first_master + gather_facts: no + tasks: + - fail: + msg: > + This upgrade is only supported for origin, openshift-enterprise, and online + deployment types + when: deployment_type not in ['origin','openshift-enterprise', 'online'] + + # Error out in situations where the user has older versions specified in their + # inventory in any of the openshift_release, openshift_image_tag, and + # openshift_pkg_version variables. These must be removed or updated to proceed + # with upgrade. + # TODO: Should we block if you're *over* the next major release version as well? + - fail: + msg: > + openshift_pkg_version is {{ openshift_pkg_version }} which is not a + valid version for a {{ openshift_upgrade_target }} upgrade + when: openshift_pkg_version is defined and openshift_pkg_version.split('-',1).1 | version_compare(openshift_upgrade_target ,'<') + + - fail: + msg: > + openshift_image_tag is {{ openshift_image_tag }} which is not a + valid version for a {{ openshift_upgrade_target }} upgrade + when: openshift_image_tag is defined and openshift_image_tag.split('v',1).1 | version_compare(openshift_upgrade_target ,'<') + + - set_fact: + openshift_release: "{{ openshift_release[1:] }}" + when: openshift_release is defined and openshift_release[0] == 'v' + + - fail: + msg: > + openshift_release is {{ openshift_release }} which is not a + valid release for a {{ openshift_upgrade_target }} upgrade + when: openshift_release is defined and not openshift_release | version_compare(openshift_upgrade_target ,'=') diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_nodes_running.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_nodes_running.yml new file mode 100644 index 000000000..354af3cde --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_nodes_running.yml @@ -0,0 +1,13 @@ +--- +- name: Verify node processes + hosts: oo_nodes_to_config + roles: + - openshift_facts + - openshift_docker_facts + tasks: + - name: Ensure Node is running + service: + name: "{{ openshift.common.service_type }}-node" + state: started + enabled: yes + when: openshift.common.is_containerized | bool diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml new file mode 100644 index 000000000..bb713349e --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml @@ -0,0 +1,45 @@ +--- +- name: Verify upgrade targets + hosts: oo_masters_to_config:oo_nodes_to_config + vars: + openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}" + pre_tasks: + - fail: + msg: Verify OpenShift is already installed + when: openshift.common.version is not defined + + - fail: + msg: Verify the correct version was found + when: verify_upgrade_version is defined and openshift_version != verify_upgrade_version + + - name: Clean package cache + command: "{{ ansible_pkg_mgr }} clean all" + when: not openshift.common.is_atomic | bool + + - set_fact: + g_new_service_name: "{{ 'origin' if deployment_type =='origin' else 'atomic-openshift' }}" + when: not openshift.common.is_containerized | bool + + - name: Verify containers are available for upgrade + command: > + docker pull {{ openshift.common.cli_image }}:{{ openshift_image_tag }} + register: pull_result + changed_when: "'Downloaded newer image' in pull_result.stdout" + when: openshift.common.is_containerized | bool + + - name: Check latest available OpenShift RPM version + command: > + {{ repoquery_cmd }} --qf '%{version}' "{{ openshift.common.service_type }}" + failed_when: false + changed_when: false + register: avail_openshift_version + when: not openshift.common.is_containerized | bool + + - name: Verify OpenShift RPMs are available for upgrade + fail: + msg: "OpenShift {{ avail_openshift_version.stdout }} is available, but {{ openshift_upgrade_target }} or greater is required" + when: not openshift.common.is_containerized | bool and not avail_openshift_version | skipped and avail_openshift_version.stdout | default('0.0', True) | version_compare(openshift_release, '<') + + - fail: + msg: "This upgrade playbook must be run against OpenShift {{ openshift_upgrade_min }} or later" + when: deployment_type == 'origin' and openshift.common.version | version_compare(openshift_upgrade_min,'<') -- cgit v1.2.1 From 6f056fd9673428c00b5e496a9a084cf09ad777cf Mon Sep 17 00:00:00 2001 From: Devan Goodwin Date: Mon, 12 Sep 2016 15:16:38 -0300 Subject: Verify masters are upgraded before proceeding with node only upgrade. --- playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_masters.yml | 2 +- playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml | 8 +++++++- playbooks/common/openshift-cluster/upgrades/pre/backup_etcd.yml | 3 --- 3 files changed, 8 insertions(+), 5 deletions(-) (limited to 'playbooks') diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_masters.yml b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_masters.yml index 5c2bba209..94339dd63 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_masters.yml +++ b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_masters.yml @@ -17,7 +17,7 @@ - include: ../initialize_facts.yml - name: Update repos and initialize facts on all hosts - hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config + hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config:oo_lb_to_config roles: - openshift_repos diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml index d9c82d8dc..9d29ba1ab 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml +++ b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml @@ -17,7 +17,7 @@ - include: ../initialize_facts.yml - name: Update repos and initialize facts on all hosts - hosts: oo_nodes_to_config + hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config:oo_lb_to_config roles: - openshift_repos @@ -54,3 +54,9 @@ - include: ../../../../common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml - include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml + +- name: Verify masters are already upgraded + hosts: oo_masters_to_config + tasks: + - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run." + when: openshift.common.version != openshift_version diff --git a/playbooks/common/openshift-cluster/upgrades/pre/backup_etcd.yml b/playbooks/common/openshift-cluster/upgrades/pre/backup_etcd.yml index 2af73c467..994ac2bb9 100644 --- a/playbooks/common/openshift-cluster/upgrades/pre/backup_etcd.yml +++ b/playbooks/common/openshift-cluster/upgrades/pre/backup_etcd.yml @@ -70,9 +70,6 @@ msg: "Etcd backup created in {{ openshift.common.data_dir }}/etcd-backup-{{ timestamp }}" -############################################################################## -# Gate on etcd backup -############################################################################## - name: Gate on etcd backup hosts: localhost connection: local -- cgit v1.2.1 From 9dcc8fc7123e1f13e945a658ffe7331730b0105f Mon Sep 17 00:00:00 2001 From: Devan Goodwin Date: Mon, 12 Sep 2016 15:50:32 -0300 Subject: Split upgrade for control plane/nodes. --- .../byo/openshift-cluster/upgrades/v3_3/roles | 1 + .../openshift-cluster/upgrades/v3_3/upgrade.yml | 30 ++- .../upgrades/v3_3/upgrade_control_plane.yml | 86 +++++++ .../upgrades/v3_3/upgrade_masters.yml | 58 ----- .../upgrades/v3_3/upgrade_nodes.yml | 35 ++- .../common/openshift-cluster/initialize_facts.yml | 2 + .../upgrades/cleanup_unused_images.yml | 22 ++ .../common/openshift-cluster/upgrades/init.yml | 2 +- .../common/openshift-cluster/upgrades/post.yml | 72 ------ .../upgrades/post_control_plane.yml | 72 ++++++ .../common/openshift-cluster/upgrades/pre.yml | 5 - .../openshift-cluster/upgrades/pre/backup_etcd.yml | 6 - .../common/openshift-cluster/upgrades/pre/roles | 1 + .../upgrades/pre/verify_docker_upgrade_targets.yml | 2 +- .../common/openshift-cluster/upgrades/upgrade.yml | 262 --------------------- .../upgrades/upgrade_control_plane.yml | 174 ++++++++++++++ .../openshift-cluster/upgrades/upgrade_nodes.yml | 60 +++++ 17 files changed, 469 insertions(+), 421 deletions(-) create mode 120000 playbooks/byo/openshift-cluster/upgrades/v3_3/roles create mode 100644 playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml delete mode 100644 playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_masters.yml create mode 100644 playbooks/common/openshift-cluster/upgrades/cleanup_unused_images.yml delete mode 100644 playbooks/common/openshift-cluster/upgrades/post.yml create mode 100644 playbooks/common/openshift-cluster/upgrades/post_control_plane.yml delete mode 100644 playbooks/common/openshift-cluster/upgrades/pre.yml create mode 120000 playbooks/common/openshift-cluster/upgrades/pre/roles delete mode 100644 playbooks/common/openshift-cluster/upgrades/upgrade.yml create mode 100644 playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml create mode 100644 playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml (limited to 'playbooks') diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_3/roles b/playbooks/byo/openshift-cluster/upgrades/v3_3/roles new file mode 120000 index 000000000..6bc1a7aef --- /dev/null +++ b/playbooks/byo/openshift-cluster/upgrades/v3_3/roles @@ -0,0 +1 @@ +../../../../../roles \ No newline at end of file diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml index 5a95b5fdb..87a8ef66c 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml +++ b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml @@ -12,7 +12,7 @@ openshift_upgrade_min: "{{ '1.2' if deployment_type == 'origin' else '3.2' }}" # Pre-upgrade -- include: ../initialize_facts.yml +- include: ../../../../common/openshift-cluster/initialize_facts.yml - name: Update repos and initialize facts on all hosts hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config:oo_lb_to_config @@ -33,7 +33,7 @@ - include: ../../../../common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml -- include: ../initialize_openshift_version.yml +- include: ../../../../common/openshift-cluster/initialize_openshift_version.yml vars: # Request specific openshift_release and let the openshift_version role handle converting this # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if @@ -45,7 +45,7 @@ # docker_version defined, we don't want to actually do it until later) docker_protect_installed_version: True -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running +- include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml - include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml @@ -55,15 +55,29 @@ - include: ../../../../common/openshift-cluster/upgrades/pre/backup_etcd.yml - # vars: - # openshift_deployment_type: "{{ deployment_type }}" +- name: Exit upgrade if dry-run specified + hosts: oo_all_hosts + tasks: + - fail: + msg: "Pre-upgrade checks completed, exiting due to openshift_upgrade_dry_run variable." + when: openshift_upgrade_dry_run is defined and openshift_upgrade_dry_run | bool + +# Separate step so we can execute in parallel and clear out anything unused +# before we get into the serialized upgrade process which will then remove +# remaining images if possible. +- name: Cleanup unused Docker images + hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config + tasks: + - include: ../../../../common/openshift-cluster/upgrades/cleanup_unused_images.yml -- include: ../../../../common/openshift-cluster/upgrades/upgrade.yml +- include: ../../../../common/openshift-cluster/upgrades/upgrade_control_plane.yml vars: - openshift_deployment_type: "{{ deployment_type }}" master_config_hook: "v3_3/master_config_upgrade.yml" + +- include: ../../../../common/openshift-cluster/upgrades/upgrade_nodes.yml + vars: node_config_hook: "v3_3/node_config_upgrade.yml" - include: ../../../openshift-master/restart.yml -- include: ../../../../common/openshift-cluster/upgrades/post.yml +- include: ../../../../common/openshift-cluster/upgrades/post_control_plane.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml new file mode 100644 index 000000000..bcc304141 --- /dev/null +++ b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml @@ -0,0 +1,86 @@ +--- +# +# Control Plane Upgrade Playbook +# +# Upgrades masters and Docker (only on standalone etcd hosts) +# +# This upgrade does not include: +# - node service running on masters +# - docker running on masters +# - node service running on dedicated nodes +# +# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately. +# +- include: ../../../../common/openshift-cluster/upgrades/init.yml + +# Configure the upgrade target for the common upgrade tasks: +- hosts: l_oo_all_hosts + tasks: + - set_fact: + openshift_upgrade_target: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}" + openshift_upgrade_min: "{{ '1.2' if deployment_type == 'origin' else '3.2' }}" + +# Pre-upgrade +- include: ../../../../common/openshift-cluster/initialize_facts.yml + +- name: Update repos on control plane hosts + hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config + roles: + - openshift_repos + +- name: Set openshift_no_proxy_internal_hostnames + hosts: oo_masters_to_config:oo_nodes_to_config + tasks: + - set_fact: + openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] + | union(groups['oo_masters_to_config']) + | union(groups['oo_etcd_to_config'] | default([]))) + | oo_collect('openshift.common.hostname') | default([]) | join (',') + }}" + when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and + openshift_generate_no_proxy_hosts | default(True) | bool }}" + +- include: ../../../../common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml + +- include: ../../../../common/openshift-cluster/initialize_openshift_version.yml + vars: + # Request specific openshift_release and let the openshift_version role handle converting this + # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if + # defined, and overriding the normal behavior of protecting the installed version + openshift_release: "{{ openshift_upgrade_target }}" + openshift_protect_installed_version: False + # Docker role (a dependency) should be told not to do anything to installed version + # of docker, we handle this separately during upgrade. (the inventory may have a + # docker_version defined, we don't want to actually do it until later) + docker_protect_installed_version: True + +- include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml + +- include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml + +- include: ../../../../common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml + +- include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml + +- include: ../../../../common/openshift-cluster/upgrades/pre/backup_etcd.yml + +- name: Exit upgrade if dry-run specified + hosts: oo_all_hosts + tasks: + - fail: + msg: "Pre-upgrade checks completed, exiting due to openshift_upgrade_dry_run variable." + when: openshift_upgrade_dry_run is defined and openshift_upgrade_dry_run | bool + +# Separate step so we can execute in parallel and clear out anything unused +# before we get into the serialized upgrade process which will then remove +# remaining images if possible. +- name: Cleanup unused Docker images + hosts: oo_masters_to_config:oo_etcd_to_config + tasks: + - include: ../../../../common/openshift-cluster/upgrades/cleanup_unused_images.yml + +- include: ../../../../common/openshift-cluster/upgrades/upgrade_control_plane.yml + vars: + master_config_hook: "v3_3/master_config_upgrade.yml" + +- include: ../../../../common/openshift-cluster/upgrades/post_control_plane.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_masters.yml b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_masters.yml deleted file mode 100644 index 94339dd63..000000000 --- a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_masters.yml +++ /dev/null @@ -1,58 +0,0 @@ ---- -# -# Control Plane Upgrade Playbook -# -# Upgrades masters and etcd. -# -- include: ../../../../common/openshift-cluster/upgrades/init.yml - -# Configure the upgrade target for the common upgrade tasks: -- hosts: l_oo_all_hosts - tasks: - - set_fact: - openshift_upgrade_target: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}" - openshift_upgrade_min: "{{ '1.2' if deployment_type == 'origin' else '3.2' }}" - -# Pre-upgrade -- include: ../initialize_facts.yml - -- name: Update repos and initialize facts on all hosts - hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config:oo_lb_to_config - roles: - - openshift_repos - -- name: Set openshift_no_proxy_internal_hostnames - hosts: oo_masters_to_config:oo_nodes_to_config - tasks: - - set_fact: - openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] - | union(groups['oo_masters_to_config']) - | union(groups['oo_etcd_to_config'] | default([]))) - | oo_collect('openshift.common.hostname') | default([]) | join (',') - }}" - when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and - openshift_generate_no_proxy_hosts | default(True) | bool }}" - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml - -- include: ../initialize_openshift_version.yml - vars: - # Request specific openshift_release and let the openshift_version role handle converting this - # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if - # defined, and overriding the normal behavior of protecting the installed version - openshift_release: "{{ openshift_upgrade_target }}" - openshift_protect_installed_version: False - # Docker role (a dependency) should be told not to do anything to installed version - # of docker, we handle this separately during upgrade. (the inventory may have a - # docker_version defined, we don't want to actually do it until later) - docker_protect_installed_version: True - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml - -- include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml - -- include: ../../../../common/openshift-cluster/upgrades/pre/backup_etcd.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml index 9d29ba1ab..e79df1a02 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml +++ b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml @@ -14,9 +14,9 @@ openshift_upgrade_min: "{{ '1.2' if deployment_type == 'origin' else '3.2' }}" # Pre-upgrade -- include: ../initialize_facts.yml +- include: ../../../../common/openshift-cluster/initialize_facts.yml -- name: Update repos and initialize facts on all hosts +- name: Update repos on nodes hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config:oo_lb_to_config roles: - openshift_repos @@ -35,7 +35,7 @@ - include: ../../../../common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml -- include: ../initialize_openshift_version.yml +- include: ../../../../common/openshift-cluster/initialize_openshift_version.yml vars: # Request specific openshift_release and let the openshift_version role handle converting this # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if @@ -47,7 +47,20 @@ # docker_version defined, we don't want to actually do it until later) docker_protect_installed_version: True -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running +- name: Verify masters are already upgraded + hosts: oo_masters_to_config + tasks: + - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run." + when: openshift.common.version != openshift_version + +- name: Exit upgrade if dry-run specified + hosts: oo_all_hosts + tasks: + - fail: + msg: "Pre-upgrade checks completed, exiting due to openshift_upgrade_dry_run variable." + when: openshift_upgrade_dry_run is defined and openshift_upgrade_dry_run | bool + +- include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml - include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml @@ -55,8 +68,14 @@ - include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml -- name: Verify masters are already upgraded - hosts: oo_masters_to_config +# Separate step so we can execute in parallel and clear out anything unused +# before we get into the serialized upgrade process which will then remove +# remaining images if possible. +- name: Cleanup unused Docker images + hosts: oo_nodes_to_config tasks: - - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run." - when: openshift.common.version != openshift_version + - include: ../../../../common/openshift-cluster/upgrades/cleanup_unused_images.yml + +- include: ../../../../common/openshift-cluster/upgrades/upgrade_nodes.yml + vars: + node_config_hook: "v3_3/node_config_upgrade.yml" diff --git a/playbooks/common/openshift-cluster/initialize_facts.yml b/playbooks/common/openshift-cluster/initialize_facts.yml index 04dde632b..6d83d2527 100644 --- a/playbooks/common/openshift-cluster/initialize_facts.yml +++ b/playbooks/common/openshift-cluster/initialize_facts.yml @@ -11,3 +11,5 @@ hostname: "{{ openshift_hostname | default(None) }}" - set_fact: openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}" + - set_fact: + openshift_deployment_type: "{{ deployment_type }}" diff --git a/playbooks/common/openshift-cluster/upgrades/cleanup_unused_images.yml b/playbooks/common/openshift-cluster/upgrades/cleanup_unused_images.yml new file mode 100644 index 000000000..6e953be69 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/cleanup_unused_images.yml @@ -0,0 +1,22 @@ +--- +- name: Check Docker image count + shell: "docker images -aq | wc -l" + register: docker_image_count + when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool + +- debug: var=docker_image_count.stdout + when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool + +- name: Remove unused Docker images for Docker 1.10+ migration + shell: "docker rmi `docker images -aq`" + # Will fail on images still in use: + failed_when: false + when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool + +- name: Check Docker image count + shell: "docker images -aq | wc -l" + register: docker_image_count + when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool + +- debug: var=docker_image_count.stdout + when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool diff --git a/playbooks/common/openshift-cluster/upgrades/init.yml b/playbooks/common/openshift-cluster/upgrades/init.yml index f3bc70a72..03c4a3112 100644 --- a/playbooks/common/openshift-cluster/upgrades/init.yml +++ b/playbooks/common/openshift-cluster/upgrades/init.yml @@ -6,7 +6,7 @@ become: no gather_facts: no tasks: - - include_vars: ../../../../byo/openshift-cluster/cluster_hosts.yml + - include_vars: ../../../byo/openshift-cluster/cluster_hosts.yml - add_host: name: "{{ item }}" groups: l_oo_all_hosts diff --git a/playbooks/common/openshift-cluster/upgrades/post.yml b/playbooks/common/openshift-cluster/upgrades/post.yml deleted file mode 100644 index e43954453..000000000 --- a/playbooks/common/openshift-cluster/upgrades/post.yml +++ /dev/null @@ -1,72 +0,0 @@ ---- -############################################################################### -# Post upgrade - Upgrade default router, default registry and examples -############################################################################### -- name: Upgrade default router and default registry - hosts: oo_first_master - vars: - openshift_deployment_type: "{{ deployment_type }}" - registry_image: "{{ openshift.master.registry_url | replace( '${component}', 'docker-registry' ) | replace ( '${version}', openshift_image_tag ) }}" - router_image: "{{ openshift.master.registry_url | replace( '${component}', 'haproxy-router' ) | replace ( '${version}', openshift_image_tag ) }}" - oc_cmd: "{{ openshift.common.client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig" - roles: - - openshift_manageiq - # Create the new templates shipped in 3.2, existing templates are left - # unmodified. This prevents the subsequent role definition for - # openshift_examples from failing when trying to replace templates that do - # not already exist. We could have potentially done a replace --force to - # create and update in one step. - - openshift_examples - # Update the existing templates - - role: openshift_examples - registry_url: "{{ openshift.master.registry_url }}" - openshift_examples_import_command: replace - pre_tasks: - - name: Collect all routers - command: > - {{ oc_cmd }} get pods --all-namespaces -l 'router' -o json - register: all_routers - failed_when: false - changed_when: false - - - set_fact: haproxy_routers="{{ (all_routers.stdout | from_json)['items'] | oo_pods_match_component(openshift_deployment_type, 'haproxy-router') | oo_select_keys_from_list(['metadata']) }}" - when: all_routers.rc == 0 - - - set_fact: haproxy_routers=[] - when: all_routers.rc != 0 - - - name: Update router image to current version - when: all_routers.rc == 0 - command: > - {{ oc_cmd }} patch dc/{{ item['labels']['deploymentconfig'] }} -n {{ item['namespace'] }} -p - '{"spec":{"template":{"spec":{"containers":[{"name":"router","image":"{{ router_image }}","livenessProbe":{"tcpSocket":null,"httpGet":{"path": "/healthz", "port": 1936, "host": "localhost", "scheme": "HTTP"},"initialDelaySeconds":10,"timeoutSeconds":1}}]}}}}' - --api-version=v1 - with_items: haproxy_routers - - - name: Check for default registry - command: > - {{ oc_cmd }} get -n default dc/docker-registry - register: _default_registry - failed_when: false - changed_when: false - - - name: Update registry image to current version - when: _default_registry.rc == 0 - command: > - {{ oc_cmd }} patch dc/docker-registry -n default -p - '{"spec":{"template":{"spec":{"containers":[{"name":"registry","image":"{{ registry_image }}"}]}}}}' - --api-version=v1 - -# Check for warnings to be printed at the end of the upgrade: -- name: Check for warnings - hosts: oo_masters_to_config - tasks: - # Check if any masters are using pluginOrderOverride and warn if so, only for 1.3/3.3 and beyond: - - command: > - grep pluginOrderOverride {{ openshift.common.config_base }}/master/master-config.yaml - register: grep_plugin_order_override - when: openshift.common.version_gte_3_3_or_1_3 | bool - failed_when: false - - name: Warn if pluginOrderOverride is in use in master-config.yaml - debug: msg="WARNING pluginOrderOverride is being deprecated in master-config.yaml, please see https://docs.openshift.com/enterprise/latest/architecture/additional_concepts/admission_controllers.html for more information." - when: not grep_plugin_order_override | skipped and grep_plugin_order_override.rc == 0 diff --git a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml new file mode 100644 index 000000000..e43954453 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml @@ -0,0 +1,72 @@ +--- +############################################################################### +# Post upgrade - Upgrade default router, default registry and examples +############################################################################### +- name: Upgrade default router and default registry + hosts: oo_first_master + vars: + openshift_deployment_type: "{{ deployment_type }}" + registry_image: "{{ openshift.master.registry_url | replace( '${component}', 'docker-registry' ) | replace ( '${version}', openshift_image_tag ) }}" + router_image: "{{ openshift.master.registry_url | replace( '${component}', 'haproxy-router' ) | replace ( '${version}', openshift_image_tag ) }}" + oc_cmd: "{{ openshift.common.client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig" + roles: + - openshift_manageiq + # Create the new templates shipped in 3.2, existing templates are left + # unmodified. This prevents the subsequent role definition for + # openshift_examples from failing when trying to replace templates that do + # not already exist. We could have potentially done a replace --force to + # create and update in one step. + - openshift_examples + # Update the existing templates + - role: openshift_examples + registry_url: "{{ openshift.master.registry_url }}" + openshift_examples_import_command: replace + pre_tasks: + - name: Collect all routers + command: > + {{ oc_cmd }} get pods --all-namespaces -l 'router' -o json + register: all_routers + failed_when: false + changed_when: false + + - set_fact: haproxy_routers="{{ (all_routers.stdout | from_json)['items'] | oo_pods_match_component(openshift_deployment_type, 'haproxy-router') | oo_select_keys_from_list(['metadata']) }}" + when: all_routers.rc == 0 + + - set_fact: haproxy_routers=[] + when: all_routers.rc != 0 + + - name: Update router image to current version + when: all_routers.rc == 0 + command: > + {{ oc_cmd }} patch dc/{{ item['labels']['deploymentconfig'] }} -n {{ item['namespace'] }} -p + '{"spec":{"template":{"spec":{"containers":[{"name":"router","image":"{{ router_image }}","livenessProbe":{"tcpSocket":null,"httpGet":{"path": "/healthz", "port": 1936, "host": "localhost", "scheme": "HTTP"},"initialDelaySeconds":10,"timeoutSeconds":1}}]}}}}' + --api-version=v1 + with_items: haproxy_routers + + - name: Check for default registry + command: > + {{ oc_cmd }} get -n default dc/docker-registry + register: _default_registry + failed_when: false + changed_when: false + + - name: Update registry image to current version + when: _default_registry.rc == 0 + command: > + {{ oc_cmd }} patch dc/docker-registry -n default -p + '{"spec":{"template":{"spec":{"containers":[{"name":"registry","image":"{{ registry_image }}"}]}}}}' + --api-version=v1 + +# Check for warnings to be printed at the end of the upgrade: +- name: Check for warnings + hosts: oo_masters_to_config + tasks: + # Check if any masters are using pluginOrderOverride and warn if so, only for 1.3/3.3 and beyond: + - command: > + grep pluginOrderOverride {{ openshift.common.config_base }}/master/master-config.yaml + register: grep_plugin_order_override + when: openshift.common.version_gte_3_3_or_1_3 | bool + failed_when: false + - name: Warn if pluginOrderOverride is in use in master-config.yaml + debug: msg="WARNING pluginOrderOverride is being deprecated in master-config.yaml, please see https://docs.openshift.com/enterprise/latest/architecture/additional_concepts/admission_controllers.html for more information." + when: not grep_plugin_order_override | skipped and grep_plugin_order_override.rc == 0 diff --git a/playbooks/common/openshift-cluster/upgrades/pre.yml b/playbooks/common/openshift-cluster/upgrades/pre.yml deleted file mode 100644 index a2d231c59..000000000 --- a/playbooks/common/openshift-cluster/upgrades/pre.yml +++ /dev/null @@ -1,5 +0,0 @@ ---- -############################################################################### -# Backup etcd -############################################################################### - diff --git a/playbooks/common/openshift-cluster/upgrades/pre/backup_etcd.yml b/playbooks/common/openshift-cluster/upgrades/pre/backup_etcd.yml index 994ac2bb9..3164b43ee 100644 --- a/playbooks/common/openshift-cluster/upgrades/pre/backup_etcd.yml +++ b/playbooks/common/openshift-cluster/upgrades/pre/backup_etcd.yml @@ -85,9 +85,3 @@ msg: "Upgrade cannot continue. The following hosts did not complete etcd backup: {{ etcd_backup_failed | join(',') }}" when: etcd_backup_failed | length > 0 -- name: Exit upgrade if dry-run specified - hosts: oo_first_master - tasks: - - fail: - msg: "Pre-upgrade checks completed, exiting due to openshift_upgrade_dry_run variable." - when: openshift_upgrade_dry_run is defined and openshift_upgrade_dry_run | bool diff --git a/playbooks/common/openshift-cluster/upgrades/pre/roles b/playbooks/common/openshift-cluster/upgrades/pre/roles new file mode 120000 index 000000000..415645be6 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/pre/roles @@ -0,0 +1 @@ +../../../../../roles/ \ No newline at end of file diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml index 635172de9..d8b282b41 100644 --- a/playbooks/common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml +++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml @@ -4,7 +4,7 @@ tasks: # Only check if docker upgrade is required if docker_upgrade is not # already set to False. - - include: docker/upgrade_check.yml + - include: ../docker/upgrade_check.yml when: docker_upgrade is not defined or docker_upgrade | bool and not openshift.common.is_atomic | bool # Additional checks for Atomic hosts: diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/upgrade.yml deleted file mode 100644 index c4ce5fef6..000000000 --- a/playbooks/common/openshift-cluster/upgrades/upgrade.yml +++ /dev/null @@ -1,262 +0,0 @@ ---- -############################################################################### -# The restart playbook should be run after this playbook completes. -############################################################################### - -# Separate step so we can execute in parallel and clear out anything unused -# before we get into the serialized upgrade process which will then remove -# remaining images if possible. -- name: Cleanup unused Docker images - hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config - tasks: - - name: Check Docker image count - shell: "docker images -aq | wc -l" - register: docker_image_count - when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool - - - debug: var=docker_image_count.stdout - when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool - - - name: Remove unused Docker images for Docker 1.10+ migration - shell: "docker rmi `docker images -aq`" - # Will fail on images still in use: - failed_when: false - when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool - - - name: Check Docker image count - shell: "docker images -aq | wc -l" - register: docker_image_count - when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool - - - debug: var=docker_image_count.stdout - when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool - -############################################################################### -# Upgrade Masters -############################################################################### -- name: Upgrade master packages - hosts: oo_masters_to_config - handlers: - - include: ../../../../roles/openshift_master/handlers/main.yml - static: yes - roles: - - openshift_facts - tasks: - - include: rpm_upgrade.yml component=master - when: not openshift.common.is_containerized | bool - -- name: Determine if service signer cert must be created - hosts: oo_first_master - tasks: - - name: Determine if service signer certificate must be created - stat: - path: "{{ openshift.common.config_base }}/master/service-signer.crt" - register: service_signer_cert_stat - changed_when: false - -# Create service signer cert when missing. Service signer certificate -# is added to master config in the master config hook for v3_3. -- include: create_service_signer_cert.yml - when: not (hostvars[groups.oo_first_master.0].service_signer_cert_stat.stat.exists | bool) - -- name: Upgrade master config and systemd units - hosts: oo_masters_to_config - handlers: - - include: ../../../../roles/openshift_master/handlers/main.yml - static: yes - roles: - - openshift_facts - tasks: - - include: "{{ master_config_hook }}" - when: master_config_hook is defined - - - include_vars: ../../../../roles/openshift_master/vars/main.yml - - - name: Update systemd units - include: ../../../../roles/openshift_master/tasks/systemd_units.yml - -# - name: Upgrade master configuration -# openshift_upgrade_config: -# from_version: '3.1' -# to_version: '3.2' -# role: master -# config_base: "{{ hostvars[inventory_hostname].openshift.common.config_base }}" - - - name: Check for ca-bundle.crt - stat: - path: "{{ openshift.common.config_base }}/master/ca-bundle.crt" - register: ca_bundle_stat - failed_when: false - - - name: Check for ca.crt - stat: - path: "{{ openshift.common.config_base }}/master/ca.crt" - register: ca_crt_stat - failed_when: false - - - name: Migrate ca.crt to ca-bundle.crt - command: mv ca.crt ca-bundle.crt - args: - chdir: "{{ openshift.common.config_base }}/master" - when: ca_crt_stat.stat.isreg and not ca_bundle_stat.stat.exists - - - name: Link ca.crt to ca-bundle.crt - file: - src: "{{ openshift.common.config_base }}/master/ca-bundle.crt" - path: "{{ openshift.common.config_base }}/master/ca.crt" - state: link - when: ca_crt_stat.stat.isreg and not ca_bundle_stat.stat.exists - -- name: Set master update status to complete - hosts: oo_masters_to_config - tasks: - - set_fact: - master_update_complete: True - -############################################################################## -# Gate on master update complete -############################################################################## -- name: Gate on master update - hosts: localhost - connection: local - become: no - tasks: - - set_fact: - master_update_completed: "{{ hostvars - | oo_select_keys(groups.oo_masters_to_config) - | oo_collect('inventory_hostname', {'master_update_complete': true}) }}" - - set_fact: - master_update_failed: "{{ groups.oo_masters_to_config | difference(master_update_completed) }}" - - fail: - msg: "Upgrade cannot continue. The following masters did not finish updating: {{ master_update_failed | join(',') }}" - when: master_update_failed | length > 0 - -############################################################################### -# Reconcile Cluster Roles, Cluster Role Bindings and Security Context Constraints -############################################################################### - -- name: Reconcile Cluster Roles and Cluster Role Bindings and Security Context Constraints - hosts: oo_masters_to_config - roles: - - { role: openshift_cli } - vars: - origin_reconcile_bindings: "{{ deployment_type == 'origin' and openshift_version | version_compare('1.0.6', '>') }}" - ent_reconcile_bindings: true - openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}" - # Similar to pre.yml, we don't want to upgrade docker during the openshift_cli role, - # it will be updated when we perform node upgrade. - docker_protect_installed_version: True - tasks: - - name: Verifying the correct commandline tools are available - shell: grep {{ verify_upgrade_version }} {{ openshift.common.admin_binary}} - when: openshift.common.is_containerized | bool and verify_upgrade_version is defined - - - name: Reconcile Cluster Roles - command: > - {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig - policy reconcile-cluster-roles --additive-only=true --confirm - run_once: true - - - name: Reconcile Cluster Role Bindings - command: > - {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig - policy reconcile-cluster-role-bindings - --exclude-groups=system:authenticated - --exclude-groups=system:authenticated:oauth - --exclude-groups=system:unauthenticated - --exclude-users=system:anonymous - --additive-only=true --confirm - when: origin_reconcile_bindings | bool or ent_reconcile_bindings | bool - run_once: true - - - name: Reconcile Security Context Constraints - command: > - {{ openshift.common.admin_binary}} policy reconcile-sccs --confirm --additive-only=true - run_once: true - - - set_fact: - reconcile_complete: True - -############################################################################## -# Gate on reconcile -############################################################################## -- name: Gate on reconcile - hosts: localhost - connection: local - become: no - tasks: - - set_fact: - reconcile_completed: "{{ hostvars - | oo_select_keys(groups.oo_masters_to_config) - | oo_collect('inventory_hostname', {'reconcile_complete': true}) }}" - - set_fact: - reconcile_failed: "{{ groups.oo_masters_to_config | difference(reconcile_completed) }}" - - fail: - msg: "Upgrade cannot continue. The following masters did not finish reconciling: {{ reconcile_failed | join(',') }}" - when: reconcile_failed | length > 0 - -############################################################################### -# Upgrade Nodes -############################################################################### - -# Here we handle all tasks that might require a node evac. (upgrading docker, and the node service) -- name: Perform upgrades that may require node evacuation - hosts: oo_masters_to_config:oo_etcd_to_config:oo_nodes_to_config - serial: 1 - any_errors_fatal: true - roles: - - openshift_facts - handlers: - - include: ../../../../roles/openshift_node/handlers/main.yml - static: yes - tasks: - # TODO: To better handle re-trying failed upgrades, it would be nice to check if the node - # or docker actually needs an upgrade before proceeding. Perhaps best to save this until - # we merge upgrade functionality into the base roles and a normal config.yml playbook run. - - name: Determine if node is currently scheduleable - command: > - {{ openshift.common.client_binary }} get node {{ openshift.node.nodename }} -o json - register: node_output - delegate_to: "{{ groups.oo_first_master.0 }}" - changed_when: false - when: inventory_hostname in groups.oo_nodes_to_config - - - set_fact: - was_schedulable: "{{ 'unschedulable' not in (node_output.stdout | from_json).spec }}" - when: inventory_hostname in groups.oo_nodes_to_config - - - name: Mark unschedulable if host is a node - command: > - {{ openshift.common.admin_binary }} manage-node {{ openshift.node.nodename }} --schedulable=false - delegate_to: "{{ groups.oo_first_master.0 }}" - when: inventory_hostname in groups.oo_nodes_to_config - - - name: Evacuate Node for Kubelet upgrade - command: > - {{ openshift.common.admin_binary }} manage-node {{ openshift.node.nodename }} --evacuate --force - delegate_to: "{{ groups.oo_first_master.0 }}" - when: inventory_hostname in groups.oo_nodes_to_config - - - include: docker/upgrade.yml - when: l_docker_upgrade is defined and l_docker_upgrade | bool and not openshift.common.is_atomic | bool - - include: "{{ node_config_hook }}" - when: node_config_hook is defined and inventory_hostname in groups.oo_nodes_to_config - - - include: rpm_upgrade.yml - vars: - component: "node" - openshift_version: "{{ openshift_pkg_version | default('') }}" - when: inventory_hostname in groups.oo_nodes_to_config and not openshift.common.is_containerized | bool - - - include: containerized_node_upgrade.yml - when: inventory_hostname in groups.oo_nodes_to_config and openshift.common.is_containerized | bool - - - meta: flush_handlers - - - name: Set node schedulability - command: > - {{ openshift.common.admin_binary }} manage-node {{ openshift.node.nodename }} --schedulable=true - delegate_to: "{{ groups.oo_first_master.0 }}" - when: inventory_hostname in groups.oo_nodes_to_config and was_schedulable | bool - - diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml new file mode 100644 index 000000000..5d74e0d10 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml @@ -0,0 +1,174 @@ +--- +############################################################################### +# Upgrade Masters +############################################################################### +- name: Upgrade master packages + hosts: oo_masters_to_config + handlers: + - include: ../../../../roles/openshift_master/handlers/main.yml + static: yes + roles: + - openshift_facts + tasks: + - include: rpm_upgrade.yml component=master + when: not openshift.common.is_containerized | bool + +- name: Determine if service signer cert must be created + hosts: oo_first_master + tasks: + - name: Determine if service signer certificate must be created + stat: + path: "{{ openshift.common.config_base }}/master/service-signer.crt" + register: service_signer_cert_stat + changed_when: false + +# Create service signer cert when missing. Service signer certificate +# is added to master config in the master config hook for v3_3. +- include: create_service_signer_cert.yml + when: not (hostvars[groups.oo_first_master.0].service_signer_cert_stat.stat.exists | bool) + +- name: Upgrade master config and systemd units + hosts: oo_masters_to_config + handlers: + - include: ../../../../roles/openshift_master/handlers/main.yml + static: yes + roles: + - openshift_facts + tasks: + - include: "{{ master_config_hook }}" + when: master_config_hook is defined + + - include_vars: ../../../../roles/openshift_master/vars/main.yml + + - name: Update systemd units + include: ../../../../roles/openshift_master/tasks/systemd_units.yml + +# - name: Upgrade master configuration +# openshift_upgrade_config: +# from_version: '3.1' +# to_version: '3.2' +# role: master +# config_base: "{{ hostvars[inventory_hostname].openshift.common.config_base }}" + + - name: Check for ca-bundle.crt + stat: + path: "{{ openshift.common.config_base }}/master/ca-bundle.crt" + register: ca_bundle_stat + failed_when: false + + - name: Check for ca.crt + stat: + path: "{{ openshift.common.config_base }}/master/ca.crt" + register: ca_crt_stat + failed_when: false + + - name: Migrate ca.crt to ca-bundle.crt + command: mv ca.crt ca-bundle.crt + args: + chdir: "{{ openshift.common.config_base }}/master" + when: ca_crt_stat.stat.isreg and not ca_bundle_stat.stat.exists + + - name: Link ca.crt to ca-bundle.crt + file: + src: "{{ openshift.common.config_base }}/master/ca-bundle.crt" + path: "{{ openshift.common.config_base }}/master/ca.crt" + state: link + when: ca_crt_stat.stat.isreg and not ca_bundle_stat.stat.exists + +- name: Set master update status to complete + hosts: oo_masters_to_config + tasks: + - set_fact: + master_update_complete: True + +############################################################################## +# Gate on master update complete +############################################################################## +- name: Gate on master update + hosts: localhost + connection: local + become: no + tasks: + - set_fact: + master_update_completed: "{{ hostvars + | oo_select_keys(groups.oo_masters_to_config) + | oo_collect('inventory_hostname', {'master_update_complete': true}) }}" + - set_fact: + master_update_failed: "{{ groups.oo_masters_to_config | difference(master_update_completed) }}" + - fail: + msg: "Upgrade cannot continue. The following masters did not finish updating: {{ master_update_failed | join(',') }}" + when: master_update_failed | length > 0 + +############################################################################### +# Reconcile Cluster Roles, Cluster Role Bindings and Security Context Constraints +############################################################################### + +- name: Reconcile Cluster Roles and Cluster Role Bindings and Security Context Constraints + hosts: oo_masters_to_config + roles: + - { role: openshift_cli } + vars: + origin_reconcile_bindings: "{{ deployment_type == 'origin' and openshift_version | version_compare('1.0.6', '>') }}" + ent_reconcile_bindings: true + openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}" + # Similar to pre.yml, we don't want to upgrade docker during the openshift_cli role, + # it will be updated when we perform node upgrade. + docker_protect_installed_version: True + tasks: + - name: Verifying the correct commandline tools are available + shell: grep {{ verify_upgrade_version }} {{ openshift.common.admin_binary}} + when: openshift.common.is_containerized | bool and verify_upgrade_version is defined + + - name: Reconcile Cluster Roles + command: > + {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig + policy reconcile-cluster-roles --additive-only=true --confirm + run_once: true + + - name: Reconcile Cluster Role Bindings + command: > + {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig + policy reconcile-cluster-role-bindings + --exclude-groups=system:authenticated + --exclude-groups=system:authenticated:oauth + --exclude-groups=system:unauthenticated + --exclude-users=system:anonymous + --additive-only=true --confirm + when: origin_reconcile_bindings | bool or ent_reconcile_bindings | bool + run_once: true + + - name: Reconcile Security Context Constraints + command: > + {{ openshift.common.admin_binary}} policy reconcile-sccs --confirm --additive-only=true + run_once: true + + - set_fact: + reconcile_complete: True + +############################################################################## +# Gate on reconcile +############################################################################## +- name: Gate on reconcile + hosts: localhost + connection: local + become: no + tasks: + - set_fact: + reconcile_completed: "{{ hostvars + | oo_select_keys(groups.oo_masters_to_config) + | oo_collect('inventory_hostname', {'reconcile_complete': true}) }}" + - set_fact: + reconcile_failed: "{{ groups.oo_masters_to_config | difference(reconcile_completed) }}" + - fail: + msg: "Upgrade cannot continue. The following masters did not finish reconciling: {{ reconcile_failed | join(',') }}" + when: reconcile_failed | length > 0 + +- name: Upgrade Docker on dedicated containerized etcd hosts + hosts: oo_etcd_to_config:!oo_nodes_to_config + serial: 1 + any_errors_fatal: true + roles: + - openshift_facts + tasks: + - include: docker/upgrade.yml + when: l_docker_upgrade is defined and l_docker_upgrade | bool and not openshift.common.is_atomic | bool diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml new file mode 100644 index 000000000..0ab8ba23c --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml @@ -0,0 +1,60 @@ +--- +- name: Evacuate and upgrade nodes + hosts: oo_nodes_to_config + serial: 1 + any_errors_fatal: true + roles: + - openshift_facts + handlers: + - include: ../../../../roles/openshift_node/handlers/main.yml + static: yes + tasks: + # TODO: To better handle re-trying failed upgrades, it would be nice to check if the node + # or docker actually needs an upgrade before proceeding. Perhaps best to save this until + # we merge upgrade functionality into the base roles and a normal config.yml playbook run. + - name: Determine if node is currently scheduleable + command: > + {{ openshift.common.client_binary }} get node {{ openshift.node.nodename | lower }} -o json + register: node_output + delegate_to: "{{ groups.oo_first_master.0 }}" + changed_when: false + when: inventory_hostname in groups.oo_nodes_to_config + + - set_fact: + was_schedulable: "{{ 'unschedulable' not in (node_output.stdout | from_json).spec }}" + when: inventory_hostname in groups.oo_nodes_to_config + + - name: Mark unschedulable if host is a node + command: > + {{ openshift.common.admin_binary }} manage-node {{ openshift.node.nodename | lower }} --schedulable=false + delegate_to: "{{ groups.oo_first_master.0 }}" + when: inventory_hostname in groups.oo_nodes_to_config + + - name: Evacuate Node for Kubelet upgrade + command: > + {{ openshift.common.admin_binary }} manage-node {{ openshift.node.nodename | lower }} --evacuate --force + delegate_to: "{{ groups.oo_first_master.0 }}" + when: inventory_hostname in groups.oo_nodes_to_config + + - include: docker/upgrade.yml + when: l_docker_upgrade is defined and l_docker_upgrade | bool and not openshift.common.is_atomic | bool + + - include: "{{ node_config_hook }}" + when: node_config_hook is defined and inventory_hostname in groups.oo_nodes_to_config + + - include: rpm_upgrade.yml + vars: + component: "node" + openshift_version: "{{ openshift_pkg_version | default('') }}" + when: inventory_hostname in groups.oo_nodes_to_config and not openshift.common.is_containerized | bool + + - include: containerized_node_upgrade.yml + when: inventory_hostname in groups.oo_nodes_to_config and openshift.common.is_containerized | bool + + - meta: flush_handlers + + - name: Set node schedulability + command: > + {{ openshift.common.admin_binary }} manage-node {{ openshift.node.nodename | lower }} --schedulable=true + delegate_to: "{{ groups.oo_first_master.0 }}" + when: inventory_hostname in groups.oo_nodes_to_config and was_schedulable | bool -- cgit v1.2.1 From 67fbf22ec96d07b8646de701b60e2718a4d6bef3 Mon Sep 17 00:00:00 2001 From: Devan Goodwin Date: Wed, 14 Sep 2016 14:51:47 -0300 Subject: Allow customizing node upgrade serial value. --- playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'playbooks') diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml index 0ab8ba23c..5190ab4d8 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml @@ -1,7 +1,9 @@ --- - name: Evacuate and upgrade nodes hosts: oo_nodes_to_config - serial: 1 + # This var must be set with -e on invocation, as it is not a per-host inventory var + # and is evaluated early. Values such as "20%" can also be used. + serial: "{{ openshift_upgrade_nodes_serial | default(1) }}" any_errors_fatal: true roles: - openshift_facts -- cgit v1.2.1 From 0e7e7f6b45ace8a3a9516c8800e5cf84d7aa14fa Mon Sep 17 00:00:00 2001 From: Devan Goodwin Date: Mon, 19 Sep 2016 14:57:13 -0300 Subject: Allow filtering nodes to upgrade by label. --- .../upgrades/docker/docker_upgrade.yml | 10 +++--- .../openshift-cluster/upgrades/v3_3/upgrade.yml | 9 ++--- .../upgrades/v3_3/upgrade_control_plane.yml | 3 +- .../upgrades/v3_3/upgrade_nodes.yml | 10 +++--- .../common/openshift-cluster/upgrades/init.yml | 2 ++ .../upgrades/initialize_nodes_to_upgrade.yml | 40 ++++++++++++++++++++++ .../openshift-cluster/upgrades/pre/gate_checks.yml | 2 +- .../upgrades/pre/verify_docker_upgrade_targets.yml | 2 +- .../upgrades/pre/verify_upgrade_targets.yml | 2 +- .../upgrades/upgrade_control_plane.yml | 2 +- .../openshift-cluster/upgrades/upgrade_nodes.yml | 18 +++++----- 11 files changed, 71 insertions(+), 29 deletions(-) create mode 100644 playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml (limited to 'playbooks') diff --git a/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml index 1fa32570c..9be6becc1 100644 --- a/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml +++ b/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml @@ -1,6 +1,6 @@ - name: Check for appropriate Docker versions - hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config + hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config roles: - openshift_facts tasks: @@ -19,7 +19,7 @@ # don't want to carry on, potentially taking out every node. The playbook can safely be re-run # and will not take any action on a node already running the requested docker version. - name: Evacuate and upgrade nodes - hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config + hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config serial: 1 any_errors_fatal: true tasks: @@ -27,13 +27,13 @@ command: > {{ openshift.common.admin_binary }} manage-node {{ openshift.node.nodename }} --schedulable=false delegate_to: "{{ groups.oo_first_master.0 }}" - when: l_docker_upgrade is defined and l_docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_config + when: l_docker_upgrade is defined and l_docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_upgrade - name: Evacuate Node for Kubelet upgrade command: > {{ openshift.common.admin_binary }} manage-node {{ openshift.node.nodename }} --evacuate --force delegate_to: "{{ groups.oo_first_master.0 }}" - when: l_docker_upgrade is defined and l_docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_config + when: l_docker_upgrade is defined and l_docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_upgrade - include: ../../../../common/openshift-cluster/upgrades/docker/upgrade.yml when: l_docker_upgrade is defined and l_docker_upgrade | bool @@ -43,5 +43,5 @@ {{ openshift.common.admin_binary }} manage-node {{ openshift.node.nodename }} --schedulable=true delegate_to: "{{ groups.oo_first_master.0 }}" when: openshift.node.schedulable | bool - when: l_docker_upgrade is defined and l_docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_config and openshift.node.schedulable | bool + when: l_docker_upgrade is defined and l_docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_upgrade and openshift.node.schedulable | bool diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml index 87a8ef66c..f704dfa7c 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml +++ b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml @@ -12,15 +12,16 @@ openshift_upgrade_min: "{{ '1.2' if deployment_type == 'origin' else '3.2' }}" # Pre-upgrade -- include: ../../../../common/openshift-cluster/initialize_facts.yml + +- include: ../../../../common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml - name: Update repos and initialize facts on all hosts - hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config:oo_lb_to_config + hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config roles: - openshift_repos - name: Set openshift_no_proxy_internal_hostnames - hosts: oo_masters_to_config:oo_nodes_to_config + hosts: oo_masters_to_config:oo_nodes_to_upgrade tasks: - set_fact: openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] @@ -66,7 +67,7 @@ # before we get into the serialized upgrade process which will then remove # remaining images if possible. - name: Cleanup unused Docker images - hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config + hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config tasks: - include: ../../../../common/openshift-cluster/upgrades/cleanup_unused_images.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml index bcc304141..7030c2ce0 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml +++ b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml @@ -21,7 +21,6 @@ openshift_upgrade_min: "{{ '1.2' if deployment_type == 'origin' else '3.2' }}" # Pre-upgrade -- include: ../../../../common/openshift-cluster/initialize_facts.yml - name: Update repos on control plane hosts hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config @@ -29,7 +28,7 @@ - openshift_repos - name: Set openshift_no_proxy_internal_hostnames - hosts: oo_masters_to_config:oo_nodes_to_config + hosts: oo_masters_to_config:oo_nodes_to_upgrade tasks: - set_fact: openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml index e79df1a02..dcce0ff04 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml +++ b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml @@ -14,18 +14,18 @@ openshift_upgrade_min: "{{ '1.2' if deployment_type == 'origin' else '3.2' }}" # Pre-upgrade -- include: ../../../../common/openshift-cluster/initialize_facts.yml +- include: ../../../../common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml - name: Update repos on nodes - hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config:oo_lb_to_config + hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config roles: - openshift_repos - name: Set openshift_no_proxy_internal_hostnames - hosts: oo_masters_to_config:oo_nodes_to_config + hosts: oo_masters_to_config:oo_nodes_to_upgrade tasks: - set_fact: - openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] + openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade'] | union(groups['oo_masters_to_config']) | union(groups['oo_etcd_to_config'] | default([]))) | oo_collect('openshift.common.hostname') | default([]) | join (',') @@ -72,7 +72,7 @@ # before we get into the serialized upgrade process which will then remove # remaining images if possible. - name: Cleanup unused Docker images - hosts: oo_nodes_to_config + hosts: oo_nodes_to_upgrade tasks: - include: ../../../../common/openshift-cluster/upgrades/cleanup_unused_images.yml diff --git a/playbooks/common/openshift-cluster/upgrades/init.yml b/playbooks/common/openshift-cluster/upgrades/init.yml index 03c4a3112..f3b3abe0d 100644 --- a/playbooks/common/openshift-cluster/upgrades/init.yml +++ b/playbooks/common/openshift-cluster/upgrades/init.yml @@ -46,3 +46,5 @@ - set_fact: openshift_docker_log_options: "{{ lookup('oo_option', 'docker_log_options') }}" when: openshift_docker_log_options is not defined + +- include: ../initialize_facts.yml diff --git a/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml new file mode 100644 index 000000000..4e375ac26 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml @@ -0,0 +1,40 @@ +--- +- name: Filter list of nodes to be upgraded if necessary + hosts: oo_first_master + tasks: + - name: Retrieve list of openshift nodes matching upgrade label + command: > + {{ openshift.common.client_binary }} + get nodes + --config={{ openshift.common.config_base }}/master/admin.kubeconfig + --selector={{ openshift_upgrade_nodes_label }} + -o jsonpath='{.items[*].metadata.name}' + register: matching_nodes + changed_when: false + when: openshift_upgrade_nodes_label is defined + + - set_fact: + nodes_to_upgrade: "{{ matching_nodes.stdout.split(' ') }}" + when: openshift_upgrade_nodes_label is defined + + # We got a list of nodes with the label, now we need to match these with inventory hosts + # using their openshift.common.hostname fact. + - name: Map labelled nodes to inventory hosts + add_host: + name: "{{ item }}" + groups: temp_nodes_to_upgrade + ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" + ansible_become: "{{ g_sudo | default(omit) }}" + with_items: " {{ groups['oo_nodes_to_config'] }}" + when: openshift_upgrade_nodes_label is defined and hostvars[item].openshift.common.hostname in nodes_to_upgrade + changed_when: false + + # Build up the oo_nodes_to_upgrade group, use the list filtered by label if + # present, otherwise hit all nodes: + - name: Evaluate oo_nodes_to_upgrade + add_host: + name: "{{ item }}" + groups: oo_nodes_to_upgrade + ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" + ansible_become: "{{ g_sudo | default(omit) }}" + with_items: "{{ groups['temp_nodes_to_upgrade'] | default(groups['oo_nodes_to_config']) }}" diff --git a/playbooks/common/openshift-cluster/upgrades/pre/gate_checks.yml b/playbooks/common/openshift-cluster/upgrades/pre/gate_checks.yml index 907c1aa87..8ecae4539 100644 --- a/playbooks/common/openshift-cluster/upgrades/pre/gate_checks.yml +++ b/playbooks/common/openshift-cluster/upgrades/pre/gate_checks.yml @@ -1,6 +1,6 @@ --- - name: Flag pre-upgrade checks complete for hosts without errors - hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config + hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config tasks: - set_fact: pre_upgrade_complete: True diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml index d8b282b41..ba4d77617 100644 --- a/playbooks/common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml +++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml @@ -1,6 +1,6 @@ --- - name: Verify docker upgrade targets - hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config + hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config tasks: # Only check if docker upgrade is required if docker_upgrade is not # already set to False. diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml index bb713349e..9632626a4 100644 --- a/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml +++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml @@ -1,6 +1,6 @@ --- - name: Verify upgrade targets - hosts: oo_masters_to_config:oo_nodes_to_config + hosts: oo_masters_to_config:oo_nodes_to_upgrade vars: openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}" pre_tasks: diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml index 5d74e0d10..75ae92716 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml @@ -164,7 +164,7 @@ when: reconcile_failed | length > 0 - name: Upgrade Docker on dedicated containerized etcd hosts - hosts: oo_etcd_to_config:!oo_nodes_to_config + hosts: oo_etcd_to_config:!oo_nodes_to_upgrade serial: 1 any_errors_fatal: true roles: diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml index 5190ab4d8..a54349220 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml @@ -1,6 +1,6 @@ --- - name: Evacuate and upgrade nodes - hosts: oo_nodes_to_config + hosts: oo_nodes_to_upgrade # This var must be set with -e on invocation, as it is not a per-host inventory var # and is evaluated early. Values such as "20%" can also be used. serial: "{{ openshift_upgrade_nodes_serial | default(1) }}" @@ -20,38 +20,38 @@ register: node_output delegate_to: "{{ groups.oo_first_master.0 }}" changed_when: false - when: inventory_hostname in groups.oo_nodes_to_config + when: inventory_hostname in groups.oo_nodes_to_upgrade - set_fact: was_schedulable: "{{ 'unschedulable' not in (node_output.stdout | from_json).spec }}" - when: inventory_hostname in groups.oo_nodes_to_config + when: inventory_hostname in groups.oo_nodes_to_upgrade - name: Mark unschedulable if host is a node command: > {{ openshift.common.admin_binary }} manage-node {{ openshift.node.nodename | lower }} --schedulable=false delegate_to: "{{ groups.oo_first_master.0 }}" - when: inventory_hostname in groups.oo_nodes_to_config + when: inventory_hostname in groups.oo_nodes_to_upgrade - name: Evacuate Node for Kubelet upgrade command: > {{ openshift.common.admin_binary }} manage-node {{ openshift.node.nodename | lower }} --evacuate --force delegate_to: "{{ groups.oo_first_master.0 }}" - when: inventory_hostname in groups.oo_nodes_to_config + when: inventory_hostname in groups.oo_nodes_to_upgrade - include: docker/upgrade.yml when: l_docker_upgrade is defined and l_docker_upgrade | bool and not openshift.common.is_atomic | bool - include: "{{ node_config_hook }}" - when: node_config_hook is defined and inventory_hostname in groups.oo_nodes_to_config + when: node_config_hook is defined and inventory_hostname in groups.oo_nodes_to_upgrade - include: rpm_upgrade.yml vars: component: "node" openshift_version: "{{ openshift_pkg_version | default('') }}" - when: inventory_hostname in groups.oo_nodes_to_config and not openshift.common.is_containerized | bool + when: inventory_hostname in groups.oo_nodes_to_upgrade and not openshift.common.is_containerized | bool - include: containerized_node_upgrade.yml - when: inventory_hostname in groups.oo_nodes_to_config and openshift.common.is_containerized | bool + when: inventory_hostname in groups.oo_nodes_to_upgrade and openshift.common.is_containerized | bool - meta: flush_handlers @@ -59,4 +59,4 @@ command: > {{ openshift.common.admin_binary }} manage-node {{ openshift.node.nodename | lower }} --schedulable=true delegate_to: "{{ groups.oo_first_master.0 }}" - when: inventory_hostname in groups.oo_nodes_to_config and was_schedulable | bool + when: inventory_hostname in groups.oo_nodes_to_upgrade and was_schedulable | bool -- cgit v1.2.1 From 910e23336da02b8d1eec75276ce77ec269e2216c Mon Sep 17 00:00:00 2001 From: Devan Goodwin Date: Thu, 22 Sep 2016 11:48:46 -0300 Subject: Skip the docker role in early upgrade stages. This improves the situation further and prevents configuration changes from accidentally triggering docker restarts, before we've evacuated nodes. Now in two places, we skip the role entirely, instead of previous implementation which only skipped upgrading the installed version. (which did not catch config issues) --- playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml | 10 ++++++---- .../openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml | 10 ++++++---- .../byo/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml | 10 ++++++---- .../openshift-cluster/upgrades/upgrade_control_plane.yml | 6 +++--- playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml | 5 +++-- 5 files changed, 24 insertions(+), 17 deletions(-) (limited to 'playbooks') diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml index f704dfa7c..cd3ca0817 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml +++ b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml @@ -41,10 +41,12 @@ # defined, and overriding the normal behavior of protecting the installed version openshift_release: "{{ openshift_upgrade_target }}" openshift_protect_installed_version: False - # Docker role (a dependency) should be told not to do anything to installed version - # of docker, we handle this separately during upgrade. (the inventory may have a - # docker_version defined, we don't want to actually do it until later) - docker_protect_installed_version: True + + # We skip the docker role at this point in upgrade to prevent + # unintended package, container, or config upgrades which trigger + # docker restarts. At this early stage of upgrade we can assume + # docker is configured and running. + skip_docker_role: True - include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml index 7030c2ce0..bf18a2a66 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml +++ b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml @@ -48,10 +48,12 @@ # defined, and overriding the normal behavior of protecting the installed version openshift_release: "{{ openshift_upgrade_target }}" openshift_protect_installed_version: False - # Docker role (a dependency) should be told not to do anything to installed version - # of docker, we handle this separately during upgrade. (the inventory may have a - # docker_version defined, we don't want to actually do it until later) - docker_protect_installed_version: True + + # We skip the docker role at this point in upgrade to prevent + # unintended package, container, or config upgrades which trigger + # docker restarts. At this early stage of upgrade we can assume + # docker is configured and running. + skip_docker_role: True - include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml index dcce0ff04..201bb5d16 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml +++ b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml @@ -42,10 +42,12 @@ # defined, and overriding the normal behavior of protecting the installed version openshift_release: "{{ openshift_upgrade_target }}" openshift_protect_installed_version: False - # Docker role (a dependency) should be told not to do anything to installed version - # of docker, we handle this separately during upgrade. (the inventory may have a - # docker_version defined, we don't want to actually do it until later) - docker_protect_installed_version: True + + # We skip the docker role at this point in upgrade to prevent + # unintended package, container, or config upgrades which trigger + # docker restarts. At this early stage of upgrade we can assume + # docker is configured and running. + skip_docker_role: True - name: Verify masters are already upgraded hosts: oo_masters_to_config diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml index 75ae92716..22d56f6d3 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml @@ -111,9 +111,9 @@ origin_reconcile_bindings: "{{ deployment_type == 'origin' and openshift_version | version_compare('1.0.6', '>') }}" ent_reconcile_bindings: true openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}" - # Similar to pre.yml, we don't want to upgrade docker during the openshift_cli role, - # it will be updated when we perform node upgrade. - docker_protect_installed_version: True + # Another spot where we assume docker is running and do not want to accidentally trigger an unsafe + # restart. + skip_docker_role: True tasks: - name: Verifying the correct commandline tools are available shell: grep {{ verify_upgrade_version }} {{ openshift.common.admin_binary}} diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml index a54349220..917c95e29 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml @@ -7,10 +7,11 @@ any_errors_fatal: true roles: - openshift_facts + - docker handlers: - include: ../../../../roles/openshift_node/handlers/main.yml static: yes - tasks: + pre_tasks: # TODO: To better handle re-trying failed upgrades, it would be nice to check if the node # or docker actually needs an upgrade before proceeding. Perhaps best to save this until # we merge upgrade functionality into the base roles and a normal config.yml playbook run. @@ -37,7 +38,7 @@ {{ openshift.common.admin_binary }} manage-node {{ openshift.node.nodename | lower }} --evacuate --force delegate_to: "{{ groups.oo_first_master.0 }}" when: inventory_hostname in groups.oo_nodes_to_upgrade - + tasks: - include: docker/upgrade.yml when: l_docker_upgrade is defined and l_docker_upgrade | bool and not openshift.common.is_atomic | bool -- cgit v1.2.1 From ad47ff1df5147d35572ee3ae08f0fd74fade5e33 Mon Sep 17 00:00:00 2001 From: Devan Goodwin Date: Thu, 22 Sep 2016 14:31:00 -0300 Subject: Allow a couple retries when unscheduling/rescheduling nodes in upgrade. This can fail with a transient "object has been modified" error asking you to re-try your changes on the latest version of the object. Allow up to three retries to see if we can get the change to take effect. --- .../common/openshift-cluster/upgrades/upgrade_nodes.yml | 12 ++++++++++++ 1 file changed, 12 insertions(+) (limited to 'playbooks') diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml index 917c95e29..9b572dcdf 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml @@ -32,6 +32,12 @@ {{ openshift.common.admin_binary }} manage-node {{ openshift.node.nodename | lower }} --schedulable=false delegate_to: "{{ groups.oo_first_master.0 }}" when: inventory_hostname in groups.oo_nodes_to_upgrade + # NOTE: There is a transient "object has been modified" error here, allow a couple + # retries for a more reliable upgrade. + register: node_unsched + until: node_unsched.rc == 0 + retries: 3 + delay: 1 - name: Evacuate Node for Kubelet upgrade command: > @@ -61,3 +67,9 @@ {{ openshift.common.admin_binary }} manage-node {{ openshift.node.nodename | lower }} --schedulable=true delegate_to: "{{ groups.oo_first_master.0 }}" when: inventory_hostname in groups.oo_nodes_to_upgrade and was_schedulable | bool + register: node_sched + until: node_sched.rc == 0 + retries: 3 + delay: 1 + + -- cgit v1.2.1 From 0ad53cbbe8d6f26e98796734896be6e827051ce6 Mon Sep 17 00:00:00 2001 From: Devan Goodwin Date: Wed, 28 Sep 2016 08:51:05 -0300 Subject: Move etcd backup from pre-upgrade to upgrade itself. --- .../openshift-cluster/upgrades/v3_3/upgrade.yml | 2 - .../upgrades/v3_3/upgrade_control_plane.yml | 2 - .../openshift-cluster/upgrades/pre/backup_etcd.yml | 87 ---------------------- .../upgrades/upgrade_control_plane.yml | 86 +++++++++++++++++++++ 4 files changed, 86 insertions(+), 91 deletions(-) delete mode 100644 playbooks/common/openshift-cluster/upgrades/pre/backup_etcd.yml (limited to 'playbooks') diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml index cd3ca0817..6bd1aea07 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml +++ b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml @@ -56,8 +56,6 @@ - include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml -- include: ../../../../common/openshift-cluster/upgrades/pre/backup_etcd.yml - - name: Exit upgrade if dry-run specified hosts: oo_all_hosts tasks: diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml index bf18a2a66..35951ccda 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml +++ b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml @@ -63,8 +63,6 @@ - include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml -- include: ../../../../common/openshift-cluster/upgrades/pre/backup_etcd.yml - - name: Exit upgrade if dry-run specified hosts: oo_all_hosts tasks: diff --git a/playbooks/common/openshift-cluster/upgrades/pre/backup_etcd.yml b/playbooks/common/openshift-cluster/upgrades/pre/backup_etcd.yml deleted file mode 100644 index 3164b43ee..000000000 --- a/playbooks/common/openshift-cluster/upgrades/pre/backup_etcd.yml +++ /dev/null @@ -1,87 +0,0 @@ ---- -- name: Evaluate additional groups for upgrade - hosts: localhost - connection: local - become: no - tasks: - - name: Evaluate etcd_hosts_to_backup - add_host: - name: "{{ item }}" - groups: etcd_hosts_to_backup - with_items: groups.oo_etcd_to_config if groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config | length > 0 else groups.oo_first_master - -- name: Backup etcd - hosts: etcd_hosts_to_backup - vars: - embedded_etcd: "{{ hostvars[groups.oo_first_master.0].openshift.master.embedded_etcd }}" - timestamp: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}" - roles: - - openshift_facts - tasks: - # Ensure we persist the etcd role for this host in openshift_facts - - openshift_facts: - role: etcd - local_facts: {} - when: "'etcd' not in openshift" - - - stat: path=/var/lib/openshift - register: var_lib_openshift - - - stat: path=/var/lib/origin - register: var_lib_origin - - - name: Create origin symlink if necessary - file: src=/var/lib/openshift/ dest=/var/lib/origin state=link - when: var_lib_openshift.stat.exists == True and var_lib_origin.stat.exists == False - - # TODO: replace shell module with command and update later checks - # We assume to be using the data dir for all backups. - - name: Check available disk space for etcd backup - shell: df --output=avail -k {{ openshift.common.data_dir }} | tail -n 1 - register: avail_disk - - # TODO: replace shell module with command and update later checks - - name: Check current embedded etcd disk usage - shell: du -k {{ openshift.etcd.etcd_data_dir }} | tail -n 1 | cut -f1 - register: etcd_disk_usage - when: embedded_etcd | bool - - - name: Abort if insufficient disk space for etcd backup - fail: - msg: > - {{ etcd_disk_usage.stdout }} Kb disk space required for etcd backup, - {{ avail_disk.stdout }} Kb available. - when: (embedded_etcd | bool) and (etcd_disk_usage.stdout|int > avail_disk.stdout|int) - - - name: Install etcd (for etcdctl) - action: "{{ ansible_pkg_mgr }} name=etcd state=latest" - when: not openshift.common.is_atomic | bool - - - name: Generate etcd backup - command: > - etcdctl backup --data-dir={{ openshift.etcd.etcd_data_dir }} - --backup-dir={{ openshift.common.data_dir }}/etcd-backup-{{ timestamp }} - - - set_fact: - etcd_backup_complete: True - - - name: Display location of etcd backup - debug: - msg: "Etcd backup created in {{ openshift.common.data_dir }}/etcd-backup-{{ timestamp }}" - - -- name: Gate on etcd backup - hosts: localhost - connection: local - become: no - tasks: - - set_fact: - etcd_backup_completed: "{{ hostvars - | oo_select_keys(groups.etcd_hosts_to_backup) - | oo_collect('inventory_hostname', {'etcd_backup_complete': true}) }}" - - set_fact: - etcd_backup_failed: "{{ groups.etcd_hosts_to_backup | difference(etcd_backup_completed) }}" - - fail: - msg: "Upgrade cannot continue. The following hosts did not complete etcd backup: {{ etcd_backup_failed | join(',') }}" - when: etcd_backup_failed | length > 0 - diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml index 22d56f6d3..0063bdb2f 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml @@ -2,6 +2,92 @@ ############################################################################### # Upgrade Masters ############################################################################### +- name: Evaluate additional groups for upgrade + hosts: localhost + connection: local + become: no + tasks: + - name: Evaluate etcd_hosts_to_backup + add_host: + name: "{{ item }}" + groups: etcd_hosts_to_backup + with_items: groups.oo_etcd_to_config if groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config | length > 0 else groups.oo_first_master + +- name: Backup etcd + hosts: etcd_hosts_to_backup + vars: + embedded_etcd: "{{ hostvars[groups.oo_first_master.0].openshift.master.embedded_etcd }}" + timestamp: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}" + roles: + - openshift_facts + tasks: + # Ensure we persist the etcd role for this host in openshift_facts + - openshift_facts: + role: etcd + local_facts: {} + when: "'etcd' not in openshift" + + - stat: path=/var/lib/openshift + register: var_lib_openshift + + - stat: path=/var/lib/origin + register: var_lib_origin + + - name: Create origin symlink if necessary + file: src=/var/lib/openshift/ dest=/var/lib/origin state=link + when: var_lib_openshift.stat.exists == True and var_lib_origin.stat.exists == False + + # TODO: replace shell module with command and update later checks + # We assume to be using the data dir for all backups. + - name: Check available disk space for etcd backup + shell: df --output=avail -k {{ openshift.common.data_dir }} | tail -n 1 + register: avail_disk + + # TODO: replace shell module with command and update later checks + - name: Check current embedded etcd disk usage + shell: du -k {{ openshift.etcd.etcd_data_dir }} | tail -n 1 | cut -f1 + register: etcd_disk_usage + when: embedded_etcd | bool + + - name: Abort if insufficient disk space for etcd backup + fail: + msg: > + {{ etcd_disk_usage.stdout }} Kb disk space required for etcd backup, + {{ avail_disk.stdout }} Kb available. + when: (embedded_etcd | bool) and (etcd_disk_usage.stdout|int > avail_disk.stdout|int) + + - name: Install etcd (for etcdctl) + action: "{{ ansible_pkg_mgr }} name=etcd state=latest" + when: not openshift.common.is_atomic | bool + + - name: Generate etcd backup + command: > + etcdctl backup --data-dir={{ openshift.etcd.etcd_data_dir }} + --backup-dir={{ openshift.common.data_dir }}/etcd-backup-{{ timestamp }} + + - set_fact: + etcd_backup_complete: True + + - name: Display location of etcd backup + debug: + msg: "Etcd backup created in {{ openshift.common.data_dir }}/etcd-backup-{{ timestamp }}" + + +- name: Gate on etcd backup + hosts: localhost + connection: local + become: no + tasks: + - set_fact: + etcd_backup_completed: "{{ hostvars + | oo_select_keys(groups.etcd_hosts_to_backup) + | oo_collect('inventory_hostname', {'etcd_backup_complete': true}) }}" + - set_fact: + etcd_backup_failed: "{{ groups.etcd_hosts_to_backup | difference(etcd_backup_completed) }}" + - fail: + msg: "Upgrade cannot continue. The following hosts did not complete etcd backup: {{ etcd_backup_failed | join(',') }}" + when: etcd_backup_failed | length > 0 + - name: Upgrade master packages hosts: oo_masters_to_config handlers: -- cgit v1.2.1 From de196a56aec48a6545d993dae9e739ad9ab511ba Mon Sep 17 00:00:00 2001 From: Devan Goodwin Date: Wed, 28 Sep 2016 11:53:49 -0300 Subject: Use pre_upgrade tag instread of a dry run variable. --- .../openshift-cluster/upgrades/v3_3/upgrade.yml | 29 +++++++++++++++---- .../upgrades/v3_3/upgrade_control_plane.yml | 27 ++++++++++++++---- .../upgrades/v3_3/upgrade_nodes.yml | 33 +++++++++++++++++----- .../upgrades/docker/upgrade_check.yml | 2 +- 4 files changed, 71 insertions(+), 20 deletions(-) (limited to 'playbooks') diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml index 6bd1aea07..7a3829283 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml +++ b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml @@ -3,9 +3,13 @@ # Full Control Plane + Nodes Upgrade # - include: ../../../../common/openshift-cluster/upgrades/init.yml + tags: + - pre_upgrade # Configure the upgrade target for the common upgrade tasks: - hosts: l_oo_all_hosts + tags: + - pre_upgrade tasks: - set_fact: openshift_upgrade_target: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}" @@ -14,14 +18,20 @@ # Pre-upgrade - include: ../../../../common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml + tags: + - pre_upgrade - name: Update repos and initialize facts on all hosts hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config + tags: + - pre_upgrade roles: - openshift_repos - name: Set openshift_no_proxy_internal_hostnames hosts: oo_masters_to_config:oo_nodes_to_upgrade + tags: + - pre_upgrade tasks: - set_fact: openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] @@ -33,8 +43,12 @@ openshift_generate_no_proxy_hosts | default(True) | bool }}" - include: ../../../../common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml + tags: + - pre_upgrade - include: ../../../../common/openshift-cluster/initialize_openshift_version.yml + tags: + - pre_upgrade vars: # Request specific openshift_release and let the openshift_version role handle converting this # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if @@ -49,19 +63,22 @@ skip_docker_role: True - include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml + tags: + - pre_upgrade - include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml + tags: + - pre_upgrade - include: ../../../../common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml + tags: + - pre_upgrade - include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml + tags: + - pre_upgrade -- name: Exit upgrade if dry-run specified - hosts: oo_all_hosts - tasks: - - fail: - msg: "Pre-upgrade checks completed, exiting due to openshift_upgrade_dry_run variable." - when: openshift_upgrade_dry_run is defined and openshift_upgrade_dry_run | bool +# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. # Separate step so we can execute in parallel and clear out anything unused # before we get into the serialized upgrade process which will then remove diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml index 35951ccda..d6af71827 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml +++ b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml @@ -12,9 +12,13 @@ # You can run the upgrade_nodes.yml playbook after this to upgrade these components separately. # - include: ../../../../common/openshift-cluster/upgrades/init.yml + tags: + - pre_upgrade # Configure the upgrade target for the common upgrade tasks: - hosts: l_oo_all_hosts + tags: + - pre_upgrade tasks: - set_fact: openshift_upgrade_target: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}" @@ -24,11 +28,15 @@ - name: Update repos on control plane hosts hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config + tags: + - pre_upgrade roles: - openshift_repos - name: Set openshift_no_proxy_internal_hostnames hosts: oo_masters_to_config:oo_nodes_to_upgrade + tags: + - pre_upgrade tasks: - set_fact: openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] @@ -40,8 +48,12 @@ openshift_generate_no_proxy_hosts | default(True) | bool }}" - include: ../../../../common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml + tags: + - pre_upgrade - include: ../../../../common/openshift-cluster/initialize_openshift_version.yml + tags: + - pre_upgrade vars: # Request specific openshift_release and let the openshift_version role handle converting this # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if @@ -56,19 +68,22 @@ skip_docker_role: True - include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml + tags: + - pre_upgrade - include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml + tags: + - pre_upgrade - include: ../../../../common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml + tags: + - pre_upgrade - include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml + tags: + - pre_upgrade -- name: Exit upgrade if dry-run specified - hosts: oo_all_hosts - tasks: - - fail: - msg: "Pre-upgrade checks completed, exiting due to openshift_upgrade_dry_run variable." - when: openshift_upgrade_dry_run is defined and openshift_upgrade_dry_run | bool +# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. # Separate step so we can execute in parallel and clear out anything unused # before we get into the serialized upgrade process which will then remove diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml index 201bb5d16..e2a33cc00 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml +++ b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml @@ -5,9 +5,13 @@ # Upgrades nodes only, but requires the control plane to have already been upgraded. # - include: ../../../../common/openshift-cluster/upgrades/init.yml + tags: + - pre_upgrade # Configure the upgrade target for the common upgrade tasks: - hosts: l_oo_all_hosts + tags: + - pre_upgrade tasks: - set_fact: openshift_upgrade_target: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}" @@ -15,14 +19,20 @@ # Pre-upgrade - include: ../../../../common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml + tags: + - pre_upgrade - name: Update repos on nodes hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config roles: - openshift_repos + tags: + - pre_upgrade - name: Set openshift_no_proxy_internal_hostnames hosts: oo_masters_to_config:oo_nodes_to_upgrade + tags: + - pre_upgrade tasks: - set_fact: openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade'] @@ -34,8 +44,12 @@ openshift_generate_no_proxy_hosts | default(True) | bool }}" - include: ../../../../common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml + tags: + - pre_upgrade - include: ../../../../common/openshift-cluster/initialize_openshift_version.yml + tags: + - pre_upgrade vars: # Request specific openshift_release and let the openshift_version role handle converting this # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if @@ -51,24 +65,29 @@ - name: Verify masters are already upgraded hosts: oo_masters_to_config + tags: + - pre_upgrade tasks: - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run." when: openshift.common.version != openshift_version -- name: Exit upgrade if dry-run specified - hosts: oo_all_hosts - tasks: - - fail: - msg: "Pre-upgrade checks completed, exiting due to openshift_upgrade_dry_run variable." - when: openshift_upgrade_dry_run is defined and openshift_upgrade_dry_run | bool - - include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml + tags: + - pre_upgrade - include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml + tags: + - pre_upgrade - include: ../../../../common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml + tags: + - pre_upgrade - include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml + tags: + - pre_upgrade + +# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. # Separate step so we can execute in parallel and clear out anything unused # before we get into the serialized upgrade process which will then remove diff --git a/playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml b/playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml index 8002af4fc..fc26d029e 100644 --- a/playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml +++ b/playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml @@ -1,7 +1,7 @@ --- # This snippet determines if a Docker upgrade is required by checking the inventory -# variables, the available packages, and sets l_docker_version to True if so. +# variables, the available packages, and sets l_docker_upgrade to True if so. - set_fact: docker_upgrade: True -- cgit v1.2.1 From 9461cbf44d75c657ed400324b1cc2c39a2d6b9ff Mon Sep 17 00:00:00 2001 From: Devan Goodwin Date: Thu, 29 Sep 2016 12:41:16 -0300 Subject: Fix bug with service signer cert on upgrade. It is invalid Ansible to use a when on an include that contains plays, as it cannot be applied to plays. Issue filed upstream for a better error, or to get it working. --- .../openshift-cluster/upgrades/create_service_signer_cert.yml | 7 +++++++ .../common/openshift-cluster/upgrades/upgrade_control_plane.yml | 1 - 2 files changed, 7 insertions(+), 1 deletion(-) (limited to 'playbooks') diff --git a/playbooks/common/openshift-cluster/upgrades/create_service_signer_cert.yml b/playbooks/common/openshift-cluster/upgrades/create_service_signer_cert.yml index e8a20aa2b..78f6c46f3 100644 --- a/playbooks/common/openshift-cluster/upgrades/create_service_signer_cert.yml +++ b/playbooks/common/openshift-cluster/upgrades/create_service_signer_cert.yml @@ -9,6 +9,7 @@ local_action: command mktemp -d /tmp/openshift-ansible-XXXXXXX register: local_cert_sync_tmpdir changed_when: false + when: not (hostvars[groups.oo_first_master.0].service_signer_cert_stat.stat.exists | bool) - name: Create service signer certificate hosts: oo_first_master @@ -17,6 +18,7 @@ command: mktemp -d /tmp/openshift-ansible-XXXXXXX register: remote_cert_create_tmpdir changed_when: false + when: not (hostvars[groups.oo_first_master.0].service_signer_cert_stat.stat.exists | bool) - name: Create service signer certificate command: > @@ -27,6 +29,7 @@ --serial=service-signer.serial.txt args: chdir: "{{ remote_cert_create_tmpdir.stdout }}/" + when: not (hostvars[groups.oo_first_master.0].service_signer_cert_stat.stat.exists | bool) - name: Retrieve service signer certificate fetch: @@ -38,12 +41,14 @@ with_items: - "service-signer.crt" - "service-signer.key" + when: not (hostvars[groups.oo_first_master.0].service_signer_cert_stat.stat.exists | bool) - name: Delete remote temp directory file: name: "{{ remote_cert_create_tmpdir.stdout }}" state: absent changed_when: false + when: not (hostvars[groups.oo_first_master.0].service_signer_cert_stat.stat.exists | bool) - name: Deploy service signer certificate hosts: oo_masters_to_config @@ -55,6 +60,7 @@ with_items: - "service-signer.crt" - "service-signer.key" + when: not (hostvars[groups.oo_first_master.0].service_signer_cert_stat.stat.exists | bool) - name: Delete local temp directory hosts: localhost @@ -67,3 +73,4 @@ name: "{{ local_cert_sync_tmpdir.stdout }}" state: absent changed_when: false + when: not (hostvars[groups.oo_first_master.0].service_signer_cert_stat.stat.exists | bool) diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml index 0063bdb2f..2c641e21e 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml @@ -111,7 +111,6 @@ # Create service signer cert when missing. Service signer certificate # is added to master config in the master config hook for v3_3. - include: create_service_signer_cert.yml - when: not (hostvars[groups.oo_first_master.0].service_signer_cert_stat.stat.exists | bool) - name: Upgrade master config and systemd units hosts: oo_masters_to_config -- cgit v1.2.1