summaryrefslogtreecommitdiffstats
path: root/playbooks
diff options
context:
space:
mode:
authorScott Dodson <sdodson@redhat.com>2017-12-01 15:28:49 -0500
committerGitHub <noreply@github.com>2017-12-01 15:28:49 -0500
commit7adf84ed20d2e5feb9b728f7047017179b6dfcee (patch)
tree02f002872c2457e7ca4667a2103f7e4ebbcd4869 /playbooks
parente0e10698184c9a7cf4bf65787771686e46d26603 (diff)
parent6154f7d49847813dfdea9ad73aaaed86f18aa9de (diff)
downloadopenshift-7adf84ed20d2e5feb9b728f7047017179b6dfcee.tar.gz
openshift-7adf84ed20d2e5feb9b728f7047017179b6dfcee.tar.bz2
openshift-7adf84ed20d2e5feb9b728f7047017179b6dfcee.tar.xz
openshift-7adf84ed20d2e5feb9b728f7047017179b6dfcee.zip
Merge pull request #6069 from kwoodson/upgrade_scale_groups
Initial upgrade for scale groups.
Diffstat (limited to 'playbooks')
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_scale_groups.yml7
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml59
2 files changed, 66 insertions, 0 deletions
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_scale_groups.yml b/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_scale_groups.yml
new file mode 100644
index 000000000..14b0f85d4
--- /dev/null
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_scale_groups.yml
@@ -0,0 +1,7 @@
+---
+#
+# Node Scale Group Upgrade Playbook
+#
+# Upgrades scale group nodes only.
+#
+- include: ../../../../common/openshift-cluster/upgrades/upgrade_scale_group.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml
new file mode 100644
index 000000000..d9ce3a7e3
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml
@@ -0,0 +1,59 @@
+---
+- name: create new scale group
+ hosts: localhost
+ tasks:
+ - name: build upgrade scale groups
+ include_role:
+ name: openshift_aws
+ tasks_from: upgrade_node_group.yml
+
+ - fail:
+ msg: "Ensure that new scale groups were provisioned before proceeding to update."
+ when:
+ - "'oo_sg_new_nodes' not in groups or groups.oo_sg_new_nodes|length == 0"
+
+- name: initialize upgrade bits
+ include: init.yml
+
+- name: Drain and upgrade nodes
+ hosts: oo_sg_current_nodes
+ # This var must be set with -e on invocation, as it is not a per-host inventory var
+ # and is evaluated early. Values such as "20%" can also be used.
+ serial: "{{ openshift_upgrade_nodes_serial | default(1) }}"
+ max_fail_percentage: "{{ openshift_upgrade_nodes_max_fail_percentage | default(0) }}"
+
+ pre_tasks:
+ - name: Load lib_openshift modules
+ include_role:
+ name: ../roles/lib_openshift
+
+ # TODO: To better handle re-trying failed upgrades, it would be nice to check if the node
+ # or docker actually needs an upgrade before proceeding. Perhaps best to save this until
+ # we merge upgrade functionality into the base roles and a normal config.yml playbook run.
+ - name: Mark node unschedulable
+ oc_adm_manage_node:
+ node: "{{ openshift.node.nodename | lower }}"
+ schedulable: False
+ delegate_to: "{{ groups.oo_first_master.0 }}"
+ retries: 10
+ delay: 5
+ register: node_unschedulable
+ until: node_unschedulable|succeeded
+
+ - name: Drain Node for Kubelet upgrade
+ command: >
+ {{ hostvars[groups.oo_first_master.0].openshift.common.admin_binary }} drain {{ openshift.node.nodename | lower }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets
+ delegate_to: "{{ groups.oo_first_master.0 }}"
+ register: l_upgrade_nodes_drain_result
+ until: not l_upgrade_nodes_drain_result | failed
+ retries: 60
+ delay: 60
+
+# Alright, let's clean up!
+- name: clean up the old scale group
+ hosts: localhost
+ tasks:
+ - name: clean up scale group
+ include_role:
+ name: openshift_aws
+ tasks_from: remove_scale_group.yml