--- ############################################################################### # Upgrade Masters ############################################################################### # oc adm migrate storage should be run prior to etcd v3 upgrade # See: https://github.com/openshift/origin/pull/14625#issuecomment-308467060 - name: Pre master upgrade - Upgrade all storage hosts: oo_first_master tasks: - name: Upgrade all storage command: > {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig migrate storage --include=* --confirm register: l_pb_upgrade_control_plane_pre_upgrade_storage when: openshift_upgrade_pre_storage_migration_enabled | default(true,true) | bool failed_when: - openshift_upgrade_pre_storage_migration_enabled | default(true,true) | bool - l_pb_upgrade_control_plane_pre_upgrade_storage.rc != 0 - openshift_upgrade_pre_storage_migration_fatal | default(true,true) | bool # If facts cache were for some reason deleted, this fact may not be set, and if not set # it will always default to true. This causes problems for the etcd data dir fact detection # so we must first make sure this is set correctly before attempting the backup. - name: Set master embedded_etcd fact hosts: oo_masters_to_config roles: - openshift_facts tasks: - openshift_facts: role: master local_facts: embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}" debug_level: "{{ openshift_master_debug_level | default(openshift.common.debug_level | default(2)) }}" - name: Upgrade and backup etcd include: ./etcd/main.yml # Create service signer cert when missing. Service signer certificate # is added to master config in the master_config_upgrade hook. - name: Determine if service signer cert must be created hosts: oo_first_master tasks: - name: Determine if service signer certificate must be created stat: path: "{{ openshift.common.config_base }}/master/service-signer.crt" register: service_signer_cert_stat changed_when: false - include: create_service_signer_cert.yml # Set openshift_master_facts separately. In order to reconcile # admission_config's, we currently must run openshift_master_facts and # then run openshift_facts. - name: Set OpenShift master facts hosts: oo_masters_to_config roles: - openshift_master_facts # The main master upgrade play. Should handle all changes to the system in one pass, with # support for optional hooks to be defined. - name: Upgrade master hosts: oo_masters_to_config vars: openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}" serial: 1 handlers: - include: ../../../../roles/openshift_master/handlers/main.yml static: yes roles: - openshift_facts - lib_utils post_tasks: # Run the pre-upgrade hook if defined: - debug: msg="Running master pre-upgrade hook {{ openshift_master_upgrade_pre_hook }}" when: openshift_master_upgrade_pre_hook is defined - include: "{{ openshift_master_upgrade_pre_hook }}" when: openshift_master_upgrade_pre_hook is defined - include: rpm_upgrade.yml component=master when: not openshift.common.is_containerized | bool - include_vars: ../../../../roles/openshift_master_facts/vars/main.yml - include: upgrade_scheduler.yml - include: "{{ master_config_hook }}" when: master_config_hook is defined - include_vars: ../../../../roles/openshift_master/vars/main.yml - name: Remove any legacy systemd units include: ../../../../roles/openshift_master/tasks/clean_systemd_units.yml - name: Update systemd units include: ../../../../roles/openshift_master/tasks/systemd_units.yml - name: Check for ca-bundle.crt stat: path: "{{ openshift.common.config_base }}/master/ca-bundle.crt" register: ca_bundle_stat failed_when: false - name: Check for ca.crt stat: path: "{{ openshift.common.config_base }}/master/ca.crt" register: ca_crt_stat failed_when: false - name: Migrate ca.crt to ca-bundle.crt command: mv ca.crt ca-bundle.crt args: chdir: "{{ openshift.common.config_base }}/master" when: ca_crt_stat.stat.isreg and not ca_bundle_stat.stat.exists - name: Link ca.crt to ca-bundle.crt file: src: "{{ openshift.common.config_base }}/master/ca-bundle.crt" path: "{{ openshift.common.config_base }}/master/ca.crt" state: link when: ca_crt_stat.stat.isreg and not ca_bundle_stat.stat.exists - name: Update oreg value yedit: src: "{{ openshift.common.config_base }}/master/master-config.yaml" key: 'imageConfig.format' value: "{{ oreg_url | default(oreg_url_master) }}" when: oreg_url is defined or oreg_url_master is defined # Run the upgrade hook prior to restarting services/system if defined: - debug: msg="Running master upgrade hook {{ openshift_master_upgrade_hook }}" when: openshift_master_upgrade_hook is defined - include: "{{ openshift_master_upgrade_hook }}" when: openshift_master_upgrade_hook is defined - include: ../../openshift-master/restart_hosts.yml when: openshift.common.rolling_restart_mode == 'system' - include: ../../openshift-master/restart_services.yml when: openshift.common.rolling_restart_mode == 'services' # Run the post-upgrade hook if defined: - debug: msg="Running master post-upgrade hook {{ openshift_master_upgrade_post_hook }}" when: openshift_master_upgrade_post_hook is defined - include: "{{ openshift_master_upgrade_post_hook }}" when: openshift_master_upgrade_post_hook is defined - name: Post master upgrade - Upgrade clusterpolicies storage command: > {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig migrate storage --include=clusterpolicies --confirm register: l_pb_upgrade_control_plane_post_upgrade_storage when: openshift_upgrade_post_storage_migration_enabled | default(true,true) | bool failed_when: - openshift_upgrade_post_storage_migration_enabled | default(true,true) | bool - l_pb_upgrade_control_plane_post_upgrade_storage.rc != 0 - openshift_upgrade_post_storage_migration_fatal | default(false,true) | bool run_once: true delegate_to: "{{ groups.oo_first_master.0 }}" - set_fact: master_update_complete: True ############################################################################## # Gate on master update complete ############################################################################## - name: Gate on master update hosts: localhost connection: local become: no tasks: - set_fact: master_update_completed: "{{ hostvars | oo_select_keys(groups.oo_masters_to_config) | oo_collect('inventory_hostname', {'master_update_complete': true}) }}" - set_fact: master_update_failed: "{{ groups.oo_masters_to_config | difference(master_update_completed) }}" - fail: msg: "Upgrade cannot continue. The following masters did not finish updating: {{ master_update_failed | join(',') }}" when: master_update_failed | length > 0 ############################################################################### # Reconcile Cluster Roles, Cluster Role Bindings and Security Context Constraints ############################################################################### - name: Reconcile Cluster Roles and Cluster Role Bindings and Security Context Constraints hosts: oo_masters_to_config roles: - { role: openshift_cli } vars: origin_reconcile_bindings: "{{ deployment_type == 'origin' and openshift_version | version_compare('1.0.6', '>') }}" ent_reconcile_bindings: true openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}" # Another spot where we assume docker is running and do not want to accidentally trigger an unsafe # restart. skip_docker_role: True tasks: - name: Reconcile Cluster Roles command: > {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig policy reconcile-cluster-roles --additive-only=true --confirm -o name register: reconcile_cluster_role_result changed_when: - reconcile_cluster_role_result.stdout != '' - reconcile_cluster_role_result.rc == 0 run_once: true - name: Reconcile Cluster Role Bindings command: > {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig policy reconcile-cluster-role-bindings --exclude-groups=system:authenticated --exclude-groups=system:authenticated:oauth --exclude-groups=system:unauthenticated --exclude-users=system:anonymous --additive-only=true --confirm -o name when: origin_reconcile_bindings | bool or ent_reconcile_bindings | bool register: reconcile_bindings_result changed_when: - reconcile_bindings_result.stdout != '' - reconcile_bindings_result.rc == 0 run_once: true - name: Reconcile Jenkins Pipeline Role Bindings command: > {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig policy reconcile-cluster-role-bindings system:build-strategy-jenkinspipeline --confirm -o name run_once: true register: reconcile_jenkins_role_binding_result changed_when: - reconcile_jenkins_role_binding_result.stdout != '' - reconcile_jenkins_role_binding_result.rc == 0 when: openshift.common.version_gte_3_4_or_1_4 | bool - name: Reconcile Security Context Constraints command: > {{ openshift.common.client_binary }} adm policy --config={{ openshift.common.config_base }}/master/admin.kubeconfig reconcile-sccs --confirm --additive-only=true -o name register: reconcile_scc_result changed_when: - reconcile_scc_result.stdout != '' - reconcile_scc_result.rc == 0 run_once: true - name: Migrate storage post policy reconciliation command: > {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig migrate storage --include=* --confirm run_once: true register: l_pb_upgrade_control_plane_post_upgrade_storage when: openshift_upgrade_post_storage_migration_enabled | default(true,true) | bool failed_when: - openshift_upgrade_post_storage_migration_enabled | default(true,true) | bool - l_pb_upgrade_control_plane_post_upgrade_storage.rc != 0 - openshift_upgrade_post_storage_migration_fatal | default(false,true) | bool - set_fact: reconcile_complete: True ############################################################################## # Gate on reconcile ############################################################################## - name: Gate on reconcile hosts: localhost connection: local become: no tasks: - set_fact: reconcile_completed: "{{ hostvars | oo_select_keys(groups.oo_masters_to_config) | oo_collect('inventory_hostname', {'reconcile_complete': true}) }}" - set_fact: reconcile_failed: "{{ groups.oo_masters_to_config | difference(reconcile_completed) }}" - fail: msg: "Upgrade cannot continue. The following masters did not finish reconciling: {{ reconcile_failed | join(',') }}" when: reconcile_failed | length > 0 - name: Upgrade Docker on dedicated containerized etcd hosts hosts: oo_etcd_to_config:!oo_nodes_to_upgrade serial: 1 any_errors_fatal: true roles: - openshift_facts tasks: - include: docker/upgrade.yml when: l_docker_upgrade is defined and l_docker_upgrade | bool and not openshift.common.is_atomic | bool - name: Drain and upgrade master nodes hosts: oo_masters_to_config:&oo_nodes_to_upgrade # This var must be set with -e on invocation, as it is not a per-host inventory var # and is evaluated early. Values such as "20%" can also be used. serial: "{{ openshift_upgrade_control_plane_nodes_serial | default(1) }}" max_fail_percentage: "{{ openshift_upgrade_control_plane_nodes_max_fail_percentage | default(0) }}" pre_tasks: - name: Load lib_openshift modules include_role: name: lib_openshift # TODO: To better handle re-trying failed upgrades, it would be nice to check if the node # or docker actually needs an upgrade before proceeding. Perhaps best to save this until # we merge upgrade functionality into the base roles and a normal config.yml playbook run. - name: Mark node unschedulable oc_adm_manage_node: node: "{{ openshift.node.nodename | lower }}" schedulable: False delegate_to: "{{ groups.oo_first_master.0 }}" retries: 10 delay: 5 register: node_unschedulable until: node_unschedulable|succeeded - name: Drain Node for Kubelet upgrade command: > {{ hostvars[groups.oo_first_master.0].openshift.common.admin_binary }} drain {{ openshift.node.nodename | lower }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets delegate_to: "{{ groups.oo_first_master.0 }}" register: l_upgrade_control_plane_drain_result until: not l_upgrade_control_plane_drain_result | failed retries: 60 delay: 60 roles: - lib_openshift - openshift_facts - docker - openshift_node_dnsmasq - openshift_node_upgrade post_tasks: - name: Set node schedulability oc_adm_manage_node: node: "{{ openshift.node.nodename | lower }}" schedulable: True delegate_to: "{{ groups.oo_first_master.0 }}" retries: 10 delay: 5 register: node_schedulable until: node_schedulable|succeeded when: node_unschedulable|changed