summaryrefslogtreecommitdiffstats
path: root/playbooks/common
diff options
context:
space:
mode:
authorDevan Goodwin <dgoodwin@redhat.com>2016-09-12 15:50:32 -0300
committerDevan Goodwin <dgoodwin@redhat.com>2016-09-29 10:25:58 -0300
commit9dcc8fc7123e1f13e945a658ffe7331730b0105f (patch)
tree3bd75059ff30727706a82c43ac6378a34a03e114 /playbooks/common
parent6f056fd9673428c00b5e496a9a084cf09ad777cf (diff)
downloadopenshift-9dcc8fc7123e1f13e945a658ffe7331730b0105f.tar.gz
openshift-9dcc8fc7123e1f13e945a658ffe7331730b0105f.tar.bz2
openshift-9dcc8fc7123e1f13e945a658ffe7331730b0105f.tar.xz
openshift-9dcc8fc7123e1f13e945a658ffe7331730b0105f.zip
Split upgrade for control plane/nodes.
Diffstat (limited to 'playbooks/common')
-rw-r--r--playbooks/common/openshift-cluster/initialize_facts.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/cleanup_unused_images.yml22
-rw-r--r--playbooks/common/openshift-cluster/upgrades/init.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/post_control_plane.yml (renamed from playbooks/common/openshift-cluster/upgrades/post.yml)0
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre.yml5
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/backup_etcd.yml6
l---------playbooks/common/openshift-cluster/upgrades/pre/roles1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml (renamed from playbooks/common/openshift-cluster/upgrades/upgrade.yml)92
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml60
10 files changed, 89 insertions, 103 deletions
diff --git a/playbooks/common/openshift-cluster/initialize_facts.yml b/playbooks/common/openshift-cluster/initialize_facts.yml
index 04dde632b..6d83d2527 100644
--- a/playbooks/common/openshift-cluster/initialize_facts.yml
+++ b/playbooks/common/openshift-cluster/initialize_facts.yml
@@ -11,3 +11,5 @@
hostname: "{{ openshift_hostname | default(None) }}"
- set_fact:
openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}"
+ - set_fact:
+ openshift_deployment_type: "{{ deployment_type }}"
diff --git a/playbooks/common/openshift-cluster/upgrades/cleanup_unused_images.yml b/playbooks/common/openshift-cluster/upgrades/cleanup_unused_images.yml
new file mode 100644
index 000000000..6e953be69
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/cleanup_unused_images.yml
@@ -0,0 +1,22 @@
+---
+- name: Check Docker image count
+ shell: "docker images -aq | wc -l"
+ register: docker_image_count
+ when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool
+
+- debug: var=docker_image_count.stdout
+ when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool
+
+- name: Remove unused Docker images for Docker 1.10+ migration
+ shell: "docker rmi `docker images -aq`"
+ # Will fail on images still in use:
+ failed_when: false
+ when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool
+
+- name: Check Docker image count
+ shell: "docker images -aq | wc -l"
+ register: docker_image_count
+ when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool
+
+- debug: var=docker_image_count.stdout
+ when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/init.yml b/playbooks/common/openshift-cluster/upgrades/init.yml
index f3bc70a72..03c4a3112 100644
--- a/playbooks/common/openshift-cluster/upgrades/init.yml
+++ b/playbooks/common/openshift-cluster/upgrades/init.yml
@@ -6,7 +6,7 @@
become: no
gather_facts: no
tasks:
- - include_vars: ../../../../byo/openshift-cluster/cluster_hosts.yml
+ - include_vars: ../../../byo/openshift-cluster/cluster_hosts.yml
- add_host:
name: "{{ item }}"
groups: l_oo_all_hosts
diff --git a/playbooks/common/openshift-cluster/upgrades/post.yml b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml
index e43954453..e43954453 100644
--- a/playbooks/common/openshift-cluster/upgrades/post.yml
+++ b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/pre.yml b/playbooks/common/openshift-cluster/upgrades/pre.yml
deleted file mode 100644
index a2d231c59..000000000
--- a/playbooks/common/openshift-cluster/upgrades/pre.yml
+++ /dev/null
@@ -1,5 +0,0 @@
----
-###############################################################################
-# Backup etcd
-###############################################################################
-
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/backup_etcd.yml b/playbooks/common/openshift-cluster/upgrades/pre/backup_etcd.yml
index 994ac2bb9..3164b43ee 100644
--- a/playbooks/common/openshift-cluster/upgrades/pre/backup_etcd.yml
+++ b/playbooks/common/openshift-cluster/upgrades/pre/backup_etcd.yml
@@ -85,9 +85,3 @@
msg: "Upgrade cannot continue. The following hosts did not complete etcd backup: {{ etcd_backup_failed | join(',') }}"
when: etcd_backup_failed | length > 0
-- name: Exit upgrade if dry-run specified
- hosts: oo_first_master
- tasks:
- - fail:
- msg: "Pre-upgrade checks completed, exiting due to openshift_upgrade_dry_run variable."
- when: openshift_upgrade_dry_run is defined and openshift_upgrade_dry_run | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/roles b/playbooks/common/openshift-cluster/upgrades/pre/roles
new file mode 120000
index 000000000..415645be6
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/pre/roles
@@ -0,0 +1 @@
+../../../../../roles/ \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml
index 635172de9..d8b282b41 100644
--- a/playbooks/common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml
+++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml
@@ -4,7 +4,7 @@
tasks:
# Only check if docker upgrade is required if docker_upgrade is not
# already set to False.
- - include: docker/upgrade_check.yml
+ - include: ../docker/upgrade_check.yml
when: docker_upgrade is not defined or docker_upgrade | bool and not openshift.common.is_atomic | bool
# Additional checks for Atomic hosts:
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
index c4ce5fef6..5d74e0d10 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
@@ -1,37 +1,5 @@
---
###############################################################################
-# The restart playbook should be run after this playbook completes.
-###############################################################################
-
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
- hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config
- tasks:
- - name: Check Docker image count
- shell: "docker images -aq | wc -l"
- register: docker_image_count
- when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool
-
- - debug: var=docker_image_count.stdout
- when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool
-
- - name: Remove unused Docker images for Docker 1.10+ migration
- shell: "docker rmi `docker images -aq`"
- # Will fail on images still in use:
- failed_when: false
- when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool
-
- - name: Check Docker image count
- shell: "docker images -aq | wc -l"
- register: docker_image_count
- when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool
-
- - debug: var=docker_image_count.stdout
- when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool
-
-###############################################################################
# Upgrade Masters
###############################################################################
- name: Upgrade master packages
@@ -195,68 +163,12 @@
msg: "Upgrade cannot continue. The following masters did not finish reconciling: {{ reconcile_failed | join(',') }}"
when: reconcile_failed | length > 0
-###############################################################################
-# Upgrade Nodes
-###############################################################################
-
-# Here we handle all tasks that might require a node evac. (upgrading docker, and the node service)
-- name: Perform upgrades that may require node evacuation
- hosts: oo_masters_to_config:oo_etcd_to_config:oo_nodes_to_config
+- name: Upgrade Docker on dedicated containerized etcd hosts
+ hosts: oo_etcd_to_config:!oo_nodes_to_config
serial: 1
any_errors_fatal: true
roles:
- openshift_facts
- handlers:
- - include: ../../../../roles/openshift_node/handlers/main.yml
- static: yes
tasks:
- # TODO: To better handle re-trying failed upgrades, it would be nice to check if the node
- # or docker actually needs an upgrade before proceeding. Perhaps best to save this until
- # we merge upgrade functionality into the base roles and a normal config.yml playbook run.
- - name: Determine if node is currently scheduleable
- command: >
- {{ openshift.common.client_binary }} get node {{ openshift.node.nodename }} -o json
- register: node_output
- delegate_to: "{{ groups.oo_first_master.0 }}"
- changed_when: false
- when: inventory_hostname in groups.oo_nodes_to_config
-
- - set_fact:
- was_schedulable: "{{ 'unschedulable' not in (node_output.stdout | from_json).spec }}"
- when: inventory_hostname in groups.oo_nodes_to_config
-
- - name: Mark unschedulable if host is a node
- command: >
- {{ openshift.common.admin_binary }} manage-node {{ openshift.node.nodename }} --schedulable=false
- delegate_to: "{{ groups.oo_first_master.0 }}"
- when: inventory_hostname in groups.oo_nodes_to_config
-
- - name: Evacuate Node for Kubelet upgrade
- command: >
- {{ openshift.common.admin_binary }} manage-node {{ openshift.node.nodename }} --evacuate --force
- delegate_to: "{{ groups.oo_first_master.0 }}"
- when: inventory_hostname in groups.oo_nodes_to_config
-
- include: docker/upgrade.yml
when: l_docker_upgrade is defined and l_docker_upgrade | bool and not openshift.common.is_atomic | bool
- - include: "{{ node_config_hook }}"
- when: node_config_hook is defined and inventory_hostname in groups.oo_nodes_to_config
-
- - include: rpm_upgrade.yml
- vars:
- component: "node"
- openshift_version: "{{ openshift_pkg_version | default('') }}"
- when: inventory_hostname in groups.oo_nodes_to_config and not openshift.common.is_containerized | bool
-
- - include: containerized_node_upgrade.yml
- when: inventory_hostname in groups.oo_nodes_to_config and openshift.common.is_containerized | bool
-
- - meta: flush_handlers
-
- - name: Set node schedulability
- command: >
- {{ openshift.common.admin_binary }} manage-node {{ openshift.node.nodename }} --schedulable=true
- delegate_to: "{{ groups.oo_first_master.0 }}"
- when: inventory_hostname in groups.oo_nodes_to_config and was_schedulable | bool
-
-
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
new file mode 100644
index 000000000..0ab8ba23c
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
@@ -0,0 +1,60 @@
+---
+- name: Evacuate and upgrade nodes
+ hosts: oo_nodes_to_config
+ serial: 1
+ any_errors_fatal: true
+ roles:
+ - openshift_facts
+ handlers:
+ - include: ../../../../roles/openshift_node/handlers/main.yml
+ static: yes
+ tasks:
+ # TODO: To better handle re-trying failed upgrades, it would be nice to check if the node
+ # or docker actually needs an upgrade before proceeding. Perhaps best to save this until
+ # we merge upgrade functionality into the base roles and a normal config.yml playbook run.
+ - name: Determine if node is currently scheduleable
+ command: >
+ {{ openshift.common.client_binary }} get node {{ openshift.node.nodename | lower }} -o json
+ register: node_output
+ delegate_to: "{{ groups.oo_first_master.0 }}"
+ changed_when: false
+ when: inventory_hostname in groups.oo_nodes_to_config
+
+ - set_fact:
+ was_schedulable: "{{ 'unschedulable' not in (node_output.stdout | from_json).spec }}"
+ when: inventory_hostname in groups.oo_nodes_to_config
+
+ - name: Mark unschedulable if host is a node
+ command: >
+ {{ openshift.common.admin_binary }} manage-node {{ openshift.node.nodename | lower }} --schedulable=false
+ delegate_to: "{{ groups.oo_first_master.0 }}"
+ when: inventory_hostname in groups.oo_nodes_to_config
+
+ - name: Evacuate Node for Kubelet upgrade
+ command: >
+ {{ openshift.common.admin_binary }} manage-node {{ openshift.node.nodename | lower }} --evacuate --force
+ delegate_to: "{{ groups.oo_first_master.0 }}"
+ when: inventory_hostname in groups.oo_nodes_to_config
+
+ - include: docker/upgrade.yml
+ when: l_docker_upgrade is defined and l_docker_upgrade | bool and not openshift.common.is_atomic | bool
+
+ - include: "{{ node_config_hook }}"
+ when: node_config_hook is defined and inventory_hostname in groups.oo_nodes_to_config
+
+ - include: rpm_upgrade.yml
+ vars:
+ component: "node"
+ openshift_version: "{{ openshift_pkg_version | default('') }}"
+ when: inventory_hostname in groups.oo_nodes_to_config and not openshift.common.is_containerized | bool
+
+ - include: containerized_node_upgrade.yml
+ when: inventory_hostname in groups.oo_nodes_to_config and openshift.common.is_containerized | bool
+
+ - meta: flush_handlers
+
+ - name: Set node schedulability
+ command: >
+ {{ openshift.common.admin_binary }} manage-node {{ openshift.node.nodename | lower }} --schedulable=true
+ delegate_to: "{{ groups.oo_first_master.0 }}"
+ when: inventory_hostname in groups.oo_nodes_to_config and was_schedulable | bool