summaryrefslogtreecommitdiffstats
path: root/roles/openshift_storage_glusterfs/tasks/glusterfs_uninstall.yml
diff options
context:
space:
mode:
authorOpenShift Merge Robot <openshift-merge-robot@users.noreply.github.com>2018-02-13 11:00:57 -0800
committerGitHub <noreply@github.com>2018-02-13 11:00:57 -0800
commitb1cca03835322c1fb325834112b4a002229dc00e (patch)
tree8367cd07e5fbd954ca9ea174d26b90554a106497 /roles/openshift_storage_glusterfs/tasks/glusterfs_uninstall.yml
parent4d1e757b1e84400f8049b91afc6b9fba5d5d1d9b (diff)
parent2861f28c05a10f0b85b019e36e6a61c4a6dbf442 (diff)
downloadopenshift-b1cca03835322c1fb325834112b4a002229dc00e.tar.gz
openshift-b1cca03835322c1fb325834112b4a002229dc00e.tar.bz2
openshift-b1cca03835322c1fb325834112b4a002229dc00e.tar.xz
openshift-b1cca03835322c1fb325834112b4a002229dc00e.zip
Merge pull request #6918 from SaravanaStorageNetwork/uninstall_playbook
Automatic merge from submit-queue. Uninstall playbook for Glusterfs Uninstall playbook for Glusterfs
Diffstat (limited to 'roles/openshift_storage_glusterfs/tasks/glusterfs_uninstall.yml')
-rw-r--r--roles/openshift_storage_glusterfs/tasks/glusterfs_uninstall.yml116
1 files changed, 116 insertions, 0 deletions
diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_uninstall.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_uninstall.yml
new file mode 100644
index 000000000..a5774cc75
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_uninstall.yml
@@ -0,0 +1,116 @@
+---
+
+- name: Delete pre-existing heketi resources
+ oc_obj:
+ namespace: "{{ glusterfs_namespace }}"
+ kind: "{{ item.kind }}"
+ name: "{{ item.name | default(omit) }}"
+ selector: "{{ item.selector | default(omit) }}"
+ state: absent
+ with_items:
+ - kind: "template,route,service,dc,jobs,secret"
+ selector: "deploy-heketi"
+ - kind: "svc"
+ name: "heketi-storage-endpoints"
+ - kind: "svc"
+ name: "heketi-storage"
+ - kind: "secret"
+ name: "heketi-{{ glusterfs_name }}-topology-secret"
+ - kind: "template,route,service,dc"
+ name: "heketi-{{ glusterfs_name }}"
+ - kind: "svc"
+ name: "heketi-db-{{ glusterfs_name }}-endpoints"
+ - kind: "sa"
+ name: "heketi-{{ glusterfs_name }}-service-account"
+ - kind: "secret"
+ name: "heketi-{{ glusterfs_name }}-admin-secret"
+ - kind: "secret"
+ name: "heketi-{{ glusterfs_name }}-config-secret"
+ failed_when: False
+
+- name: Delete pre-existing GlusterFS resources
+ oc_obj:
+ namespace: "{{ glusterfs_namespace }}"
+ kind: "{{ item.kind }}"
+ name: "{{ item.name }}"
+ state: absent
+ with_items:
+ - kind: template
+ name: glusterfs
+ - kind: daemonset
+ name: "glusterfs-{{ glusterfs_name }}"
+ - kind: storageclass
+ name: "glusterfs-{{ glusterfs_name }}"
+
+- name: Unlabel any existing GlusterFS nodes
+ oc_label:
+ name: "{{ hostvars[item].openshift.node.nodename }}"
+ kind: node
+ state: absent
+ labels: "{{ glusterfs_nodeselector | lib_utils_oo_dict_to_list_of_dict }}"
+ with_items: "{{ groups.all }}"
+
+- name: Delete pre-existing GlusterFS config
+ file:
+ path: /var/lib/glusterd
+ state: absent
+ delegate_to: "{{ item }}"
+ with_items: "{{ glusterfs_nodes | default([]) }}"
+
+- name: Delete pre-existing additional GlusterFS config
+ file:
+ path: /etc/glusterfs
+ state: absent
+ delegate_to: "{{ item }}"
+ with_items: "{{ glusterfs_nodes | default([]) }}"
+
+- name: Delete pre-existing Heketi config
+ file:
+ path: /var/lib/heketi
+ state: absent
+ delegate_to: "{{ item }}"
+ with_items: "{{ glusterfs_nodes | default([]) }}"
+
+- name: Delete Glusterfs logs
+ file:
+ path: /var/log/glusterfs
+ state: absent
+ delegate_to: "{{ item }}"
+ with_items: "{{ glusterfs_nodes | default([]) }}"
+
+- name: Delete deploy resources
+ oc_obj:
+ namespace: "{{ glusterfs_namespace }}"
+ kind: "{{ item.kind }}"
+ name: "{{ item.name | default(omit) }}"
+ selector: "{{ item.selector | default(omit) }}"
+ state: absent
+ with_items:
+ - kind: "template,route,service,jobs,dc,secret"
+ selector: "deploy-heketi"
+ - kind: "svc"
+ name: "heketi-storage-endpoints"
+ - kind: "secret"
+ name: "heketi-{{ glusterfs_name }}-topology-secret"
+
+- name: Get GlusterFS storage devices state
+ command: "pvdisplay -C --noheadings -o pv_name,vg_name {% for device in hostvars[item].glusterfs_devices %}{{ device }} {% endfor %}"
+ register: devices_info
+ delegate_to: "{{ item }}"
+ with_items: "{{ glusterfs_nodes | default([]) }}"
+ failed_when: False
+ when: glusterfs_wipe
+
+ # Runs "lvremove -ff <vg>; vgremove -fy <vg>; pvremove -fy <pv>" for every device found to be a physical volume.
+- name: Clear GlusterFS storage device contents
+ shell: "{% for line in item.stdout_lines %}{% set fields = line.split() %}{% if fields | count > 1 %}lvremove -ff {{ fields[1] }}; vgremove -fy {{ fields[1] }}; {% endif %}pvremove -fy {{ fields[0] }}; {% endfor %}"
+ delegate_to: "{{ item.item }}"
+ with_items: "{{ devices_info.results }}"
+ register: clear_devices
+ until:
+ - "'contains a filesystem in use' not in clear_devices.stderr"
+ delay: 1
+ retries: 30
+ when:
+ - glusterfs_wipe
+ - item.stdout_lines | count > 0