From 9dcc8fc7123e1f13e945a658ffe7331730b0105f Mon Sep 17 00:00:00 2001 From: Devan Goodwin Date: Mon, 12 Sep 2016 15:50:32 -0300 Subject: Split upgrade for control plane/nodes. --- .../byo/openshift-cluster/upgrades/v3_3/roles | 1 + .../openshift-cluster/upgrades/v3_3/upgrade.yml | 30 ++- .../upgrades/v3_3/upgrade_control_plane.yml | 86 +++++++ .../upgrades/v3_3/upgrade_masters.yml | 58 ----- .../upgrades/v3_3/upgrade_nodes.yml | 35 ++- .../common/openshift-cluster/initialize_facts.yml | 2 + .../upgrades/cleanup_unused_images.yml | 22 ++ .../common/openshift-cluster/upgrades/init.yml | 2 +- .../common/openshift-cluster/upgrades/post.yml | 72 ------ .../upgrades/post_control_plane.yml | 72 ++++++ .../common/openshift-cluster/upgrades/pre.yml | 5 - .../openshift-cluster/upgrades/pre/backup_etcd.yml | 6 - .../common/openshift-cluster/upgrades/pre/roles | 1 + .../upgrades/pre/verify_docker_upgrade_targets.yml | 2 +- .../common/openshift-cluster/upgrades/upgrade.yml | 262 --------------------- .../upgrades/upgrade_control_plane.yml | 174 ++++++++++++++ .../openshift-cluster/upgrades/upgrade_nodes.yml | 60 +++++ 17 files changed, 469 insertions(+), 421 deletions(-) create mode 120000 playbooks/byo/openshift-cluster/upgrades/v3_3/roles create mode 100644 playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml delete mode 100644 playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_masters.yml create mode 100644 playbooks/common/openshift-cluster/upgrades/cleanup_unused_images.yml delete mode 100644 playbooks/common/openshift-cluster/upgrades/post.yml create mode 100644 playbooks/common/openshift-cluster/upgrades/post_control_plane.yml delete mode 100644 playbooks/common/openshift-cluster/upgrades/pre.yml create mode 120000 playbooks/common/openshift-cluster/upgrades/pre/roles delete mode 100644 playbooks/common/openshift-cluster/upgrades/upgrade.yml create mode 100644 playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml create mode 100644 playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml (limited to 'playbooks') diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_3/roles b/playbooks/byo/openshift-cluster/upgrades/v3_3/roles new file mode 120000 index 000000000..6bc1a7aef --- /dev/null +++ b/playbooks/byo/openshift-cluster/upgrades/v3_3/roles @@ -0,0 +1 @@ +../../../../../roles \ No newline at end of file diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml index 5a95b5fdb..87a8ef66c 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml +++ b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml @@ -12,7 +12,7 @@ openshift_upgrade_min: "{{ '1.2' if deployment_type == 'origin' else '3.2' }}" # Pre-upgrade -- include: ../initialize_facts.yml +- include: ../../../../common/openshift-cluster/initialize_facts.yml - name: Update repos and initialize facts on all hosts hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config:oo_lb_to_config @@ -33,7 +33,7 @@ - include: ../../../../common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml -- include: ../initialize_openshift_version.yml +- include: ../../../../common/openshift-cluster/initialize_openshift_version.yml vars: # Request specific openshift_release and let the openshift_version role handle converting this # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if @@ -45,7 +45,7 @@ # docker_version defined, we don't want to actually do it until later) docker_protect_installed_version: True -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running +- include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml - include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml @@ -55,15 +55,29 @@ - include: ../../../../common/openshift-cluster/upgrades/pre/backup_etcd.yml - # vars: - # openshift_deployment_type: "{{ deployment_type }}" +- name: Exit upgrade if dry-run specified + hosts: oo_all_hosts + tasks: + - fail: + msg: "Pre-upgrade checks completed, exiting due to openshift_upgrade_dry_run variable." + when: openshift_upgrade_dry_run is defined and openshift_upgrade_dry_run | bool + +# Separate step so we can execute in parallel and clear out anything unused +# before we get into the serialized upgrade process which will then remove +# remaining images if possible. +- name: Cleanup unused Docker images + hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config + tasks: + - include: ../../../../common/openshift-cluster/upgrades/cleanup_unused_images.yml -- include: ../../../../common/openshift-cluster/upgrades/upgrade.yml +- include: ../../../../common/openshift-cluster/upgrades/upgrade_control_plane.yml vars: - openshift_deployment_type: "{{ deployment_type }}" master_config_hook: "v3_3/master_config_upgrade.yml" + +- include: ../../../../common/openshift-cluster/upgrades/upgrade_nodes.yml + vars: node_config_hook: "v3_3/node_config_upgrade.yml" - include: ../../../openshift-master/restart.yml -- include: ../../../../common/openshift-cluster/upgrades/post.yml +- include: ../../../../common/openshift-cluster/upgrades/post_control_plane.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml new file mode 100644 index 000000000..bcc304141 --- /dev/null +++ b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml @@ -0,0 +1,86 @@ +--- +# +# Control Plane Upgrade Playbook +# +# Upgrades masters and Docker (only on standalone etcd hosts) +# +# This upgrade does not include: +# - node service running on masters +# - docker running on masters +# - node service running on dedicated nodes +# +# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately. +# +- include: ../../../../common/openshift-cluster/upgrades/init.yml + +# Configure the upgrade target for the common upgrade tasks: +- hosts: l_oo_all_hosts + tasks: + - set_fact: + openshift_upgrade_target: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}" + openshift_upgrade_min: "{{ '1.2' if deployment_type == 'origin' else '3.2' }}" + +# Pre-upgrade +- include: ../../../../common/openshift-cluster/initialize_facts.yml + +- name: Update repos on control plane hosts + hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config + roles: + - openshift_repos + +- name: Set openshift_no_proxy_internal_hostnames + hosts: oo_masters_to_config:oo_nodes_to_config + tasks: + - set_fact: + openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] + | union(groups['oo_masters_to_config']) + | union(groups['oo_etcd_to_config'] | default([]))) + | oo_collect('openshift.common.hostname') | default([]) | join (',') + }}" + when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and + openshift_generate_no_proxy_hosts | default(True) | bool }}" + +- include: ../../../../common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml + +- include: ../../../../common/openshift-cluster/initialize_openshift_version.yml + vars: + # Request specific openshift_release and let the openshift_version role handle converting this + # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if + # defined, and overriding the normal behavior of protecting the installed version + openshift_release: "{{ openshift_upgrade_target }}" + openshift_protect_installed_version: False + # Docker role (a dependency) should be told not to do anything to installed version + # of docker, we handle this separately during upgrade. (the inventory may have a + # docker_version defined, we don't want to actually do it until later) + docker_protect_installed_version: True + +- include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml + +- include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml + +- include: ../../../../common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml + +- include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml + +- include: ../../../../common/openshift-cluster/upgrades/pre/backup_etcd.yml + +- name: Exit upgrade if dry-run specified + hosts: oo_all_hosts + tasks: + - fail: + msg: "Pre-upgrade checks completed, exiting due to openshift_upgrade_dry_run variable." + when: openshift_upgrade_dry_run is defined and openshift_upgrade_dry_run | bool + +# Separate step so we can execute in parallel and clear out anything unused +# before we get into the serialized upgrade process which will then remove +# remaining images if possible. +- name: Cleanup unused Docker images + hosts: oo_masters_to_config:oo_etcd_to_config + tasks: + - include: ../../../../common/openshift-cluster/upgrades/cleanup_unused_images.yml + +- include: ../../../../common/openshift-cluster/upgrades/upgrade_control_plane.yml + vars: + master_config_hook: "v3_3/master_config_upgrade.yml" + +- include: ../../../../common/openshift-cluster/upgrades/post_control_plane.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_masters.yml b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_masters.yml deleted file mode 100644 index 94339dd63..000000000 --- a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_masters.yml +++ /dev/null @@ -1,58 +0,0 @@ ---- -# -# Control Plane Upgrade Playbook -# -# Upgrades masters and etcd. -# -- include: ../../../../common/openshift-cluster/upgrades/init.yml - -# Configure the upgrade target for the common upgrade tasks: -- hosts: l_oo_all_hosts - tasks: - - set_fact: - openshift_upgrade_target: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}" - openshift_upgrade_min: "{{ '1.2' if deployment_type == 'origin' else '3.2' }}" - -# Pre-upgrade -- include: ../initialize_facts.yml - -- name: Update repos and initialize facts on all hosts - hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config:oo_lb_to_config - roles: - - openshift_repos - -- name: Set openshift_no_proxy_internal_hostnames - hosts: oo_masters_to_config:oo_nodes_to_config - tasks: - - set_fact: - openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] - | union(groups['oo_masters_to_config']) - | union(groups['oo_etcd_to_config'] | default([]))) - | oo_collect('openshift.common.hostname') | default([]) | join (',') - }}" - when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and - openshift_generate_no_proxy_hosts | default(True) | bool }}" - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml - -- include: ../initialize_openshift_version.yml - vars: - # Request specific openshift_release and let the openshift_version role handle converting this - # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if - # defined, and overriding the normal behavior of protecting the installed version - openshift_release: "{{ openshift_upgrade_target }}" - openshift_protect_installed_version: False - # Docker role (a dependency) should be told not to do anything to installed version - # of docker, we handle this separately during upgrade. (the inventory may have a - # docker_version defined, we don't want to actually do it until later) - docker_protect_installed_version: True - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml - -- include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml - -- include: ../../../../common/openshift-cluster/upgrades/pre/backup_etcd.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml index 9d29ba1ab..e79df1a02 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml +++ b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml @@ -14,9 +14,9 @@ openshift_upgrade_min: "{{ '1.2' if deployment_type == 'origin' else '3.2' }}" # Pre-upgrade -- include: ../initialize_facts.yml +- include: ../../../../common/openshift-cluster/initialize_facts.yml -- name: Update repos and initialize facts on all hosts +- name: Update repos on nodes hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config:oo_lb_to_config roles: - openshift_repos @@ -35,7 +35,7 @@ - include: ../../../../common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml -- include: ../initialize_openshift_version.yml +- include: ../../../../common/openshift-cluster/initialize_openshift_version.yml vars: # Request specific openshift_release and let the openshift_version role handle converting this # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if @@ -47,7 +47,20 @@ # docker_version defined, we don't want to actually do it until later) docker_protect_installed_version: True -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running +- name: Verify masters are already upgraded + hosts: oo_masters_to_config + tasks: + - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run." + when: openshift.common.version != openshift_version + +- name: Exit upgrade if dry-run specified + hosts: oo_all_hosts + tasks: + - fail: + msg: "Pre-upgrade checks completed, exiting due to openshift_upgrade_dry_run variable." + when: openshift_upgrade_dry_run is defined and openshift_upgrade_dry_run | bool + +- include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml - include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml @@ -55,8 +68,14 @@ - include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml -- name: Verify masters are already upgraded - hosts: oo_masters_to_config +# Separate step so we can execute in parallel and clear out anything unused +# before we get into the serialized upgrade process which will then remove +# remaining images if possible. +- name: Cleanup unused Docker images + hosts: oo_nodes_to_config tasks: - - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run." - when: openshift.common.version != openshift_version + - include: ../../../../common/openshift-cluster/upgrades/cleanup_unused_images.yml + +- include: ../../../../common/openshift-cluster/upgrades/upgrade_nodes.yml + vars: + node_config_hook: "v3_3/node_config_upgrade.yml" diff --git a/playbooks/common/openshift-cluster/initialize_facts.yml b/playbooks/common/openshift-cluster/initialize_facts.yml index 04dde632b..6d83d2527 100644 --- a/playbooks/common/openshift-cluster/initialize_facts.yml +++ b/playbooks/common/openshift-cluster/initialize_facts.yml @@ -11,3 +11,5 @@ hostname: "{{ openshift_hostname | default(None) }}" - set_fact: openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}" + - set_fact: + openshift_deployment_type: "{{ deployment_type }}" diff --git a/playbooks/common/openshift-cluster/upgrades/cleanup_unused_images.yml b/playbooks/common/openshift-cluster/upgrades/cleanup_unused_images.yml new file mode 100644 index 000000000..6e953be69 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/cleanup_unused_images.yml @@ -0,0 +1,22 @@ +--- +- name: Check Docker image count + shell: "docker images -aq | wc -l" + register: docker_image_count + when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool + +- debug: var=docker_image_count.stdout + when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool + +- name: Remove unused Docker images for Docker 1.10+ migration + shell: "docker rmi `docker images -aq`" + # Will fail on images still in use: + failed_when: false + when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool + +- name: Check Docker image count + shell: "docker images -aq | wc -l" + register: docker_image_count + when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool + +- debug: var=docker_image_count.stdout + when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool diff --git a/playbooks/common/openshift-cluster/upgrades/init.yml b/playbooks/common/openshift-cluster/upgrades/init.yml index f3bc70a72..03c4a3112 100644 --- a/playbooks/common/openshift-cluster/upgrades/init.yml +++ b/playbooks/common/openshift-cluster/upgrades/init.yml @@ -6,7 +6,7 @@ become: no gather_facts: no tasks: - - include_vars: ../../../../byo/openshift-cluster/cluster_hosts.yml + - include_vars: ../../../byo/openshift-cluster/cluster_hosts.yml - add_host: name: "{{ item }}" groups: l_oo_all_hosts diff --git a/playbooks/common/openshift-cluster/upgrades/post.yml b/playbooks/common/openshift-cluster/upgrades/post.yml deleted file mode 100644 index e43954453..000000000 --- a/playbooks/common/openshift-cluster/upgrades/post.yml +++ /dev/null @@ -1,72 +0,0 @@ ---- -############################################################################### -# Post upgrade - Upgrade default router, default registry and examples -############################################################################### -- name: Upgrade default router and default registry - hosts: oo_first_master - vars: - openshift_deployment_type: "{{ deployment_type }}" - registry_image: "{{ openshift.master.registry_url | replace( '${component}', 'docker-registry' ) | replace ( '${version}', openshift_image_tag ) }}" - router_image: "{{ openshift.master.registry_url | replace( '${component}', 'haproxy-router' ) | replace ( '${version}', openshift_image_tag ) }}" - oc_cmd: "{{ openshift.common.client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig" - roles: - - openshift_manageiq - # Create the new templates shipped in 3.2, existing templates are left - # unmodified. This prevents the subsequent role definition for - # openshift_examples from failing when trying to replace templates that do - # not already exist. We could have potentially done a replace --force to - # create and update in one step. - - openshift_examples - # Update the existing templates - - role: openshift_examples - registry_url: "{{ openshift.master.registry_url }}" - openshift_examples_import_command: replace - pre_tasks: - - name: Collect all routers - command: > - {{ oc_cmd }} get pods --all-namespaces -l 'router' -o json - register: all_routers - failed_when: false - changed_when: false - - - set_fact: haproxy_routers="{{ (all_routers.stdout | from_json)['items'] | oo_pods_match_component(openshift_deployment_type, 'haproxy-router') | oo_select_keys_from_list(['metadata']) }}" - when: all_routers.rc == 0 - - - set_fact: haproxy_routers=[] - when: all_routers.rc != 0 - - - name: Update router image to current version - when: all_routers.rc == 0 - command: > - {{ oc_cmd }} patch dc/{{ item['labels']['deploymentconfig'] }} -n {{ item['namespace'] }} -p - '{"spec":{"template":{"spec":{"containers":[{"name":"router","image":"{{ router_image }}","livenessProbe":{"tcpSocket":null,"httpGet":{"path": "/healthz", "port": 1936, "host": "localhost", "scheme": "HTTP"},"initialDelaySeconds":10,"timeoutSeconds":1}}]}}}}' - --api-version=v1 - with_items: haproxy_routers - - - name: Check for default registry - command: > - {{ oc_cmd }} get -n default dc/docker-registry - register: _default_registry - failed_when: false - changed_when: false - - - name: Update registry image to current version - when: _default_registry.rc == 0 - command: > - {{ oc_cmd }} patch dc/docker-registry -n default -p - '{"spec":{"template":{"spec":{"containers":[{"name":"registry","image":"{{ registry_image }}"}]}}}}' - --api-version=v1 - -# Check for warnings to be printed at the end of the upgrade: -- name: Check for warnings - hosts: oo_masters_to_config - tasks: - # Check if any masters are using pluginOrderOverride and warn if so, only for 1.3/3.3 and beyond: - - command: > - grep pluginOrderOverride {{ openshift.common.config_base }}/master/master-config.yaml - register: grep_plugin_order_override - when: openshift.common.version_gte_3_3_or_1_3 | bool - failed_when: false - - name: Warn if pluginOrderOverride is in use in master-config.yaml - debug: msg="WARNING pluginOrderOverride is being deprecated in master-config.yaml, please see https://docs.openshift.com/enterprise/latest/architecture/additional_concepts/admission_controllers.html for more information." - when: not grep_plugin_order_override | skipped and grep_plugin_order_override.rc == 0 diff --git a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml new file mode 100644 index 000000000..e43954453 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml @@ -0,0 +1,72 @@ +--- +############################################################################### +# Post upgrade - Upgrade default router, default registry and examples +############################################################################### +- name: Upgrade default router and default registry + hosts: oo_first_master + vars: + openshift_deployment_type: "{{ deployment_type }}" + registry_image: "{{ openshift.master.registry_url | replace( '${component}', 'docker-registry' ) | replace ( '${version}', openshift_image_tag ) }}" + router_image: "{{ openshift.master.registry_url | replace( '${component}', 'haproxy-router' ) | replace ( '${version}', openshift_image_tag ) }}" + oc_cmd: "{{ openshift.common.client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig" + roles: + - openshift_manageiq + # Create the new templates shipped in 3.2, existing templates are left + # unmodified. This prevents the subsequent role definition for + # openshift_examples from failing when trying to replace templates that do + # not already exist. We could have potentially done a replace --force to + # create and update in one step. + - openshift_examples + # Update the existing templates + - role: openshift_examples + registry_url: "{{ openshift.master.registry_url }}" + openshift_examples_import_command: replace + pre_tasks: + - name: Collect all routers + command: > + {{ oc_cmd }} get pods --all-namespaces -l 'router' -o json + register: all_routers + failed_when: false + changed_when: false + + - set_fact: haproxy_routers="{{ (all_routers.stdout | from_json)['items'] | oo_pods_match_component(openshift_deployment_type, 'haproxy-router') | oo_select_keys_from_list(['metadata']) }}" + when: all_routers.rc == 0 + + - set_fact: haproxy_routers=[] + when: all_routers.rc != 0 + + - name: Update router image to current version + when: all_routers.rc == 0 + command: > + {{ oc_cmd }} patch dc/{{ item['labels']['deploymentconfig'] }} -n {{ item['namespace'] }} -p + '{"spec":{"template":{"spec":{"containers":[{"name":"router","image":"{{ router_image }}","livenessProbe":{"tcpSocket":null,"httpGet":{"path": "/healthz", "port": 1936, "host": "localhost", "scheme": "HTTP"},"initialDelaySeconds":10,"timeoutSeconds":1}}]}}}}' + --api-version=v1 + with_items: haproxy_routers + + - name: Check for default registry + command: > + {{ oc_cmd }} get -n default dc/docker-registry + register: _default_registry + failed_when: false + changed_when: false + + - name: Update registry image to current version + when: _default_registry.rc == 0 + command: > + {{ oc_cmd }} patch dc/docker-registry -n default -p + '{"spec":{"template":{"spec":{"containers":[{"name":"registry","image":"{{ registry_image }}"}]}}}}' + --api-version=v1 + +# Check for warnings to be printed at the end of the upgrade: +- name: Check for warnings + hosts: oo_masters_to_config + tasks: + # Check if any masters are using pluginOrderOverride and warn if so, only for 1.3/3.3 and beyond: + - command: > + grep pluginOrderOverride {{ openshift.common.config_base }}/master/master-config.yaml + register: grep_plugin_order_override + when: openshift.common.version_gte_3_3_or_1_3 | bool + failed_when: false + - name: Warn if pluginOrderOverride is in use in master-config.yaml + debug: msg="WARNING pluginOrderOverride is being deprecated in master-config.yaml, please see https://docs.openshift.com/enterprise/latest/architecture/additional_concepts/admission_controllers.html for more information." + when: not grep_plugin_order_override | skipped and grep_plugin_order_override.rc == 0 diff --git a/playbooks/common/openshift-cluster/upgrades/pre.yml b/playbooks/common/openshift-cluster/upgrades/pre.yml deleted file mode 100644 index a2d231c59..000000000 --- a/playbooks/common/openshift-cluster/upgrades/pre.yml +++ /dev/null @@ -1,5 +0,0 @@ ---- -############################################################################### -# Backup etcd -############################################################################### - diff --git a/playbooks/common/openshift-cluster/upgrades/pre/backup_etcd.yml b/playbooks/common/openshift-cluster/upgrades/pre/backup_etcd.yml index 994ac2bb9..3164b43ee 100644 --- a/playbooks/common/openshift-cluster/upgrades/pre/backup_etcd.yml +++ b/playbooks/common/openshift-cluster/upgrades/pre/backup_etcd.yml @@ -85,9 +85,3 @@ msg: "Upgrade cannot continue. The following hosts did not complete etcd backup: {{ etcd_backup_failed | join(',') }}" when: etcd_backup_failed | length > 0 -- name: Exit upgrade if dry-run specified - hosts: oo_first_master - tasks: - - fail: - msg: "Pre-upgrade checks completed, exiting due to openshift_upgrade_dry_run variable." - when: openshift_upgrade_dry_run is defined and openshift_upgrade_dry_run | bool diff --git a/playbooks/common/openshift-cluster/upgrades/pre/roles b/playbooks/common/openshift-cluster/upgrades/pre/roles new file mode 120000 index 000000000..415645be6 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/pre/roles @@ -0,0 +1 @@ +../../../../../roles/ \ No newline at end of file diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml index 635172de9..d8b282b41 100644 --- a/playbooks/common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml +++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml @@ -4,7 +4,7 @@ tasks: # Only check if docker upgrade is required if docker_upgrade is not # already set to False. - - include: docker/upgrade_check.yml + - include: ../docker/upgrade_check.yml when: docker_upgrade is not defined or docker_upgrade | bool and not openshift.common.is_atomic | bool # Additional checks for Atomic hosts: diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/upgrade.yml deleted file mode 100644 index c4ce5fef6..000000000 --- a/playbooks/common/openshift-cluster/upgrades/upgrade.yml +++ /dev/null @@ -1,262 +0,0 @@ ---- -############################################################################### -# The restart playbook should be run after this playbook completes. -############################################################################### - -# Separate step so we can execute in parallel and clear out anything unused -# before we get into the serialized upgrade process which will then remove -# remaining images if possible. -- name: Cleanup unused Docker images - hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config - tasks: - - name: Check Docker image count - shell: "docker images -aq | wc -l" - register: docker_image_count - when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool - - - debug: var=docker_image_count.stdout - when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool - - - name: Remove unused Docker images for Docker 1.10+ migration - shell: "docker rmi `docker images -aq`" - # Will fail on images still in use: - failed_when: false - when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool - - - name: Check Docker image count - shell: "docker images -aq | wc -l" - register: docker_image_count - when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool - - - debug: var=docker_image_count.stdout - when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool - -############################################################################### -# Upgrade Masters -############################################################################### -- name: Upgrade master packages - hosts: oo_masters_to_config - handlers: - - include: ../../../../roles/openshift_master/handlers/main.yml - static: yes - roles: - - openshift_facts - tasks: - - include: rpm_upgrade.yml component=master - when: not openshift.common.is_containerized | bool - -- name: Determine if service signer cert must be created - hosts: oo_first_master - tasks: - - name: Determine if service signer certificate must be created - stat: - path: "{{ openshift.common.config_base }}/master/service-signer.crt" - register: service_signer_cert_stat - changed_when: false - -# Create service signer cert when missing. Service signer certificate -# is added to master config in the master config hook for v3_3. -- include: create_service_signer_cert.yml - when: not (hostvars[groups.oo_first_master.0].service_signer_cert_stat.stat.exists | bool) - -- name: Upgrade master config and systemd units - hosts: oo_masters_to_config - handlers: - - include: ../../../../roles/openshift_master/handlers/main.yml - static: yes - roles: - - openshift_facts - tasks: - - include: "{{ master_config_hook }}" - when: master_config_hook is defined - - - include_vars: ../../../../roles/openshift_master/vars/main.yml - - - name: Update systemd units - include: ../../../../roles/openshift_master/tasks/systemd_units.yml - -# - name: Upgrade master configuration -# openshift_upgrade_config: -# from_version: '3.1' -# to_version: '3.2' -# role: master -# config_base: "{{ hostvars[inventory_hostname].openshift.common.config_base }}" - - - name: Check for ca-bundle.crt - stat: - path: "{{ openshift.common.config_base }}/master/ca-bundle.crt" - register: ca_bundle_stat - failed_when: false - - - name: Check for ca.crt - stat: - path: "{{ openshift.common.config_base }}/master/ca.crt" - register: ca_crt_stat - failed_when: false - - - name: Migrate ca.crt to ca-bundle.crt - command: mv ca.crt ca-bundle.crt - args: - chdir: "{{ openshift.common.config_base }}/master" - when: ca_crt_stat.stat.isreg and not ca_bundle_stat.stat.exists - - - name: Link ca.crt to ca-bundle.crt - file: - src: "{{ openshift.common.config_base }}/master/ca-bundle.crt" - path: "{{ openshift.common.config_base }}/master/ca.crt" - state: link - when: ca_crt_stat.stat.isreg and not ca_bundle_stat.stat.exists - -- name: Set master update status to complete - hosts: oo_masters_to_config - tasks: - - set_fact: - master_update_complete: True - -############################################################################## -# Gate on master update complete -############################################################################## -- name: Gate on master update - hosts: localhost - connection: local - become: no - tasks: - - set_fact: - master_update_completed: "{{ hostvars - | oo_select_keys(groups.oo_masters_to_config) - | oo_collect('inventory_hostname', {'master_update_complete': true}) }}" - - set_fact: - master_update_failed: "{{ groups.oo_masters_to_config | difference(master_update_completed) }}" - - fail: - msg: "Upgrade cannot continue. The following masters did not finish updating: {{ master_update_failed | join(',') }}" - when: master_update_failed | length > 0 - -############################################################################### -# Reconcile Cluster Roles, Cluster Role Bindings and Security Context Constraints -############################################################################### - -- name: Reconcile Cluster Roles and Cluster Role Bindings and Security Context Constraints - hosts: oo_masters_to_config - roles: - - { role: openshift_cli } - vars: - origin_reconcile_bindings: "{{ deployment_type == 'origin' and openshift_version | version_compare('1.0.6', '>') }}" - ent_reconcile_bindings: true - openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}" - # Similar to pre.yml, we don't want to upgrade docker during the openshift_cli role, - # it will be updated when we perform node upgrade. - docker_protect_installed_version: True - tasks: - - name: Verifying the correct commandline tools are available - shell: grep {{ verify_upgrade_version }} {{ openshift.common.admin_binary}} - when: openshift.common.is_containerized | bool and verify_upgrade_version is defined - - - name: Reconcile Cluster Roles - command: > - {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig - policy reconcile-cluster-roles --additive-only=true --confirm - run_once: true - - - name: Reconcile Cluster Role Bindings - command: > - {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig - policy reconcile-cluster-role-bindings - --exclude-groups=system:authenticated - --exclude-groups=system:authenticated:oauth - --exclude-groups=system:unauthenticated - --exclude-users=system:anonymous - --additive-only=true --confirm - when: origin_reconcile_bindings | bool or ent_reconcile_bindings | bool - run_once: true - - - name: Reconcile Security Context Constraints - command: > - {{ openshift.common.admin_binary}} policy reconcile-sccs --confirm --additive-only=true - run_once: true - - - set_fact: - reconcile_complete: True - -############################################################################## -# Gate on reconcile -############################################################################## -- name: Gate on reconcile - hosts: localhost - connection: local - become: no - tasks: - - set_fact: - reconcile_completed: "{{ hostvars - | oo_select_keys(groups.oo_masters_to_config) - | oo_collect('inventory_hostname', {'reconcile_complete': true}) }}" - - set_fact: - reconcile_failed: "{{ groups.oo_masters_to_config | difference(reconcile_completed) }}" - - fail: - msg: "Upgrade cannot continue. The following masters did not finish reconciling: {{ reconcile_failed | join(',') }}" - when: reconcile_failed | length > 0 - -############################################################################### -# Upgrade Nodes -############################################################################### - -# Here we handle all tasks that might require a node evac. (upgrading docker, and the node service) -- name: Perform upgrades that may require node evacuation - hosts: oo_masters_to_config:oo_etcd_to_config:oo_nodes_to_config - serial: 1 - any_errors_fatal: true - roles: - - openshift_facts - handlers: - - include: ../../../../roles/openshift_node/handlers/main.yml - static: yes - tasks: - # TODO: To better handle re-trying failed upgrades, it would be nice to check if the node - # or docker actually needs an upgrade before proceeding. Perhaps best to save this until - # we merge upgrade functionality into the base roles and a normal config.yml playbook run. - - name: Determine if node is currently scheduleable - command: > - {{ openshift.common.client_binary }} get node {{ openshift.node.nodename }} -o json - register: node_output - delegate_to: "{{ groups.oo_first_master.0 }}" - changed_when: false - when: inventory_hostname in groups.oo_nodes_to_config - - - set_fact: - was_schedulable: "{{ 'unschedulable' not in (node_output.stdout | from_json).spec }}" - when: inventory_hostname in groups.oo_nodes_to_config - - - name: Mark unschedulable if host is a node - command: > - {{ openshift.common.admin_binary }} manage-node {{ openshift.node.nodename }} --schedulable=false - delegate_to: "{{ groups.oo_first_master.0 }}" - when: inventory_hostname in groups.oo_nodes_to_config - - - name: Evacuate Node for Kubelet upgrade - command: > - {{ openshift.common.admin_binary }} manage-node {{ openshift.node.nodename }} --evacuate --force - delegate_to: "{{ groups.oo_first_master.0 }}" - when: inventory_hostname in groups.oo_nodes_to_config - - - include: docker/upgrade.yml - when: l_docker_upgrade is defined and l_docker_upgrade | bool and not openshift.common.is_atomic | bool - - include: "{{ node_config_hook }}" - when: node_config_hook is defined and inventory_hostname in groups.oo_nodes_to_config - - - include: rpm_upgrade.yml - vars: - component: "node" - openshift_version: "{{ openshift_pkg_version | default('') }}" - when: inventory_hostname in groups.oo_nodes_to_config and not openshift.common.is_containerized | bool - - - include: containerized_node_upgrade.yml - when: inventory_hostname in groups.oo_nodes_to_config and openshift.common.is_containerized | bool - - - meta: flush_handlers - - - name: Set node schedulability - command: > - {{ openshift.common.admin_binary }} manage-node {{ openshift.node.nodename }} --schedulable=true - delegate_to: "{{ groups.oo_first_master.0 }}" - when: inventory_hostname in groups.oo_nodes_to_config and was_schedulable | bool - - diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml new file mode 100644 index 000000000..5d74e0d10 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml @@ -0,0 +1,174 @@ +--- +############################################################################### +# Upgrade Masters +############################################################################### +- name: Upgrade master packages + hosts: oo_masters_to_config + handlers: + - include: ../../../../roles/openshift_master/handlers/main.yml + static: yes + roles: + - openshift_facts + tasks: + - include: rpm_upgrade.yml component=master + when: not openshift.common.is_containerized | bool + +- name: Determine if service signer cert must be created + hosts: oo_first_master + tasks: + - name: Determine if service signer certificate must be created + stat: + path: "{{ openshift.common.config_base }}/master/service-signer.crt" + register: service_signer_cert_stat + changed_when: false + +# Create service signer cert when missing. Service signer certificate +# is added to master config in the master config hook for v3_3. +- include: create_service_signer_cert.yml + when: not (hostvars[groups.oo_first_master.0].service_signer_cert_stat.stat.exists | bool) + +- name: Upgrade master config and systemd units + hosts: oo_masters_to_config + handlers: + - include: ../../../../roles/openshift_master/handlers/main.yml + static: yes + roles: + - openshift_facts + tasks: + - include: "{{ master_config_hook }}" + when: master_config_hook is defined + + - include_vars: ../../../../roles/openshift_master/vars/main.yml + + - name: Update systemd units + include: ../../../../roles/openshift_master/tasks/systemd_units.yml + +# - name: Upgrade master configuration +# openshift_upgrade_config: +# from_version: '3.1' +# to_version: '3.2' +# role: master +# config_base: "{{ hostvars[inventory_hostname].openshift.common.config_base }}" + + - name: Check for ca-bundle.crt + stat: + path: "{{ openshift.common.config_base }}/master/ca-bundle.crt" + register: ca_bundle_stat + failed_when: false + + - name: Check for ca.crt + stat: + path: "{{ openshift.common.config_base }}/master/ca.crt" + register: ca_crt_stat + failed_when: false + + - name: Migrate ca.crt to ca-bundle.crt + command: mv ca.crt ca-bundle.crt + args: + chdir: "{{ openshift.common.config_base }}/master" + when: ca_crt_stat.stat.isreg and not ca_bundle_stat.stat.exists + + - name: Link ca.crt to ca-bundle.crt + file: + src: "{{ openshift.common.config_base }}/master/ca-bundle.crt" + path: "{{ openshift.common.config_base }}/master/ca.crt" + state: link + when: ca_crt_stat.stat.isreg and not ca_bundle_stat.stat.exists + +- name: Set master update status to complete + hosts: oo_masters_to_config + tasks: + - set_fact: + master_update_complete: True + +############################################################################## +# Gate on master update complete +############################################################################## +- name: Gate on master update + hosts: localhost + connection: local + become: no + tasks: + - set_fact: + master_update_completed: "{{ hostvars + | oo_select_keys(groups.oo_masters_to_config) + | oo_collect('inventory_hostname', {'master_update_complete': true}) }}" + - set_fact: + master_update_failed: "{{ groups.oo_masters_to_config | difference(master_update_completed) }}" + - fail: + msg: "Upgrade cannot continue. The following masters did not finish updating: {{ master_update_failed | join(',') }}" + when: master_update_failed | length > 0 + +############################################################################### +# Reconcile Cluster Roles, Cluster Role Bindings and Security Context Constraints +############################################################################### + +- name: Reconcile Cluster Roles and Cluster Role Bindings and Security Context Constraints + hosts: oo_masters_to_config + roles: + - { role: openshift_cli } + vars: + origin_reconcile_bindings: "{{ deployment_type == 'origin' and openshift_version | version_compare('1.0.6', '>') }}" + ent_reconcile_bindings: true + openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}" + # Similar to pre.yml, we don't want to upgrade docker during the openshift_cli role, + # it will be updated when we perform node upgrade. + docker_protect_installed_version: True + tasks: + - name: Verifying the correct commandline tools are available + shell: grep {{ verify_upgrade_version }} {{ openshift.common.admin_binary}} + when: openshift.common.is_containerized | bool and verify_upgrade_version is defined + + - name: Reconcile Cluster Roles + command: > + {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig + policy reconcile-cluster-roles --additive-only=true --confirm + run_once: true + + - name: Reconcile Cluster Role Bindings + command: > + {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig + policy reconcile-cluster-role-bindings + --exclude-groups=system:authenticated + --exclude-groups=system:authenticated:oauth + --exclude-groups=system:unauthenticated + --exclude-users=system:anonymous + --additive-only=true --confirm + when: origin_reconcile_bindings | bool or ent_reconcile_bindings | bool + run_once: true + + - name: Reconcile Security Context Constraints + command: > + {{ openshift.common.admin_binary}} policy reconcile-sccs --confirm --additive-only=true + run_once: true + + - set_fact: + reconcile_complete: True + +############################################################################## +# Gate on reconcile +############################################################################## +- name: Gate on reconcile + hosts: localhost + connection: local + become: no + tasks: + - set_fact: + reconcile_completed: "{{ hostvars + | oo_select_keys(groups.oo_masters_to_config) + | oo_collect('inventory_hostname', {'reconcile_complete': true}) }}" + - set_fact: + reconcile_failed: "{{ groups.oo_masters_to_config | difference(reconcile_completed) }}" + - fail: + msg: "Upgrade cannot continue. The following masters did not finish reconciling: {{ reconcile_failed | join(',') }}" + when: reconcile_failed | length > 0 + +- name: Upgrade Docker on dedicated containerized etcd hosts + hosts: oo_etcd_to_config:!oo_nodes_to_config + serial: 1 + any_errors_fatal: true + roles: + - openshift_facts + tasks: + - include: docker/upgrade.yml + when: l_docker_upgrade is defined and l_docker_upgrade | bool and not openshift.common.is_atomic | bool diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml new file mode 100644 index 000000000..0ab8ba23c --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml @@ -0,0 +1,60 @@ +--- +- name: Evacuate and upgrade nodes + hosts: oo_nodes_to_config + serial: 1 + any_errors_fatal: true + roles: + - openshift_facts + handlers: + - include: ../../../../roles/openshift_node/handlers/main.yml + static: yes + tasks: + # TODO: To better handle re-trying failed upgrades, it would be nice to check if the node + # or docker actually needs an upgrade before proceeding. Perhaps best to save this until + # we merge upgrade functionality into the base roles and a normal config.yml playbook run. + - name: Determine if node is currently scheduleable + command: > + {{ openshift.common.client_binary }} get node {{ openshift.node.nodename | lower }} -o json + register: node_output + delegate_to: "{{ groups.oo_first_master.0 }}" + changed_when: false + when: inventory_hostname in groups.oo_nodes_to_config + + - set_fact: + was_schedulable: "{{ 'unschedulable' not in (node_output.stdout | from_json).spec }}" + when: inventory_hostname in groups.oo_nodes_to_config + + - name: Mark unschedulable if host is a node + command: > + {{ openshift.common.admin_binary }} manage-node {{ openshift.node.nodename | lower }} --schedulable=false + delegate_to: "{{ groups.oo_first_master.0 }}" + when: inventory_hostname in groups.oo_nodes_to_config + + - name: Evacuate Node for Kubelet upgrade + command: > + {{ openshift.common.admin_binary }} manage-node {{ openshift.node.nodename | lower }} --evacuate --force + delegate_to: "{{ groups.oo_first_master.0 }}" + when: inventory_hostname in groups.oo_nodes_to_config + + - include: docker/upgrade.yml + when: l_docker_upgrade is defined and l_docker_upgrade | bool and not openshift.common.is_atomic | bool + + - include: "{{ node_config_hook }}" + when: node_config_hook is defined and inventory_hostname in groups.oo_nodes_to_config + + - include: rpm_upgrade.yml + vars: + component: "node" + openshift_version: "{{ openshift_pkg_version | default('') }}" + when: inventory_hostname in groups.oo_nodes_to_config and not openshift.common.is_containerized | bool + + - include: containerized_node_upgrade.yml + when: inventory_hostname in groups.oo_nodes_to_config and openshift.common.is_containerized | bool + + - meta: flush_handlers + + - name: Set node schedulability + command: > + {{ openshift.common.admin_binary }} manage-node {{ openshift.node.nodename | lower }} --schedulable=true + delegate_to: "{{ groups.oo_first_master.0 }}" + when: inventory_hostname in groups.oo_nodes_to_config and was_schedulable | bool -- cgit v1.2.1