From 81033d58030f9d1c1207cbfe487e0421c7cdd7be Mon Sep 17 00:00:00 2001 From: Saravanakumar Arumugam Date: Wed, 7 Feb 2018 14:52:46 +0530 Subject: uninstall playbook for GlusterFS Signed-off-by: Saravanakumar Arumugam --- .../tasks/glusterfs_uninstall.yml | 114 +++++++++++++++++++++ 1 file changed, 114 insertions(+) create mode 100644 roles/openshift_storage_glusterfs/tasks/glusterfs_uninstall.yml (limited to 'roles/openshift_storage_glusterfs/tasks/glusterfs_uninstall.yml') diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_uninstall.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_uninstall.yml new file mode 100644 index 000000000..2d8ca676b --- /dev/null +++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_uninstall.yml @@ -0,0 +1,114 @@ +--- + +- name: Delete pre-existing heketi resources + oc_obj: + namespace: "{{ glusterfs_namespace }}" + kind: "{{ item.kind }}" + name: "{{ item.name | default(omit) }}" + selector: "{{ item.selector | default(omit) }}" + state: absent + with_items: + - kind: "template,route,service,dc,jobs,secret" + selector: "deploy-heketi" + - kind: "svc" + name: "heketi-storage-endpoints" + - kind: "svc" + name: "heketi-storage" + - kind: "secret" + name: "heketi-{{ glusterfs_name }}-topology-secret" + - kind: "template,route,service,dc" + name: "heketi-{{ glusterfs_name }}" + - kind: "svc" + name: "heketi-db-{{ glusterfs_name }}-endpoints" + - kind: "sa" + name: "heketi-{{ glusterfs_name }}-service-account" + - kind: "secret" + name: "heketi-{{ glusterfs_name }}-admin-secret" + - kind: "secret" + name: "heketi-{{ glusterfs_name }}-config-secret" + failed_when: False + +- name: Delete pre-existing GlusterFS resources + oc_obj: + namespace: "{{ glusterfs_namespace }}" + kind: "{{ item.kind }}" + name: "{{ item.name }}" + state: absent + with_items: + - kind: template + name: glusterfs + - kind: daemonset + name: "glusterfs-{{ glusterfs_name }}" + - kind: storageclass + name: "glusterfs-{{ glusterfs_name }}" + +- name: Unlabel any existing GlusterFS nodes + oc_label: + name: "{{ hostvars[item].openshift.node.nodename }}" + kind: node + state: absent + labels: "{{ glusterfs_nodeselector | lib_utils_oo_dict_to_list_of_dict }}" + with_items: "{{ groups.all }}" + +- name: Delete pre-existing GlusterFS config + file: + path: /var/lib/glusterd + state: absent + delegate_to: "{{ item }}" + with_items: "{{ glusterfs_nodes | default([]) }}" + +- name: Delete pre-existing additional GlusterFS config + file: + path: /etc/glusterfs + state: absent + delegate_to: "{{ item }}" + with_items: "{{ glusterfs_nodes | default([]) }}" + +- name: Delete pre-existing Heketi config + file: + path: /var/lib/heketi + state: absent + delegate_to: "{{ item }}" + with_items: "{{ glusterfs_nodes | default([]) }}" + +- name: Delete Glusterfs logs + file: + path: /var/log/glusterfs + state: absent + delegate_to: "{{ item }}" + with_items: "{{ glusterfs_nodes | default([]) }}" + +- name: Delete deploy resources + oc_obj: + namespace: "{{ glusterfs_namespace }}" + kind: "{{ item.kind }}" + name: "{{ item.name | default(omit) }}" + selector: "{{ item.selector | default(omit) }}" + state: absent + with_items: + - kind: "template,route,service,jobs,dc,secret" + selector: "deploy-heketi" + - kind: "svc" + name: "heketi-storage-endpoints" + - kind: "secret" + name: "heketi-{{ glusterfs_name }}-topology-secret" + +- name: Get GlusterFS storage devices state + command: "pvdisplay -C --noheadings -o pv_name,vg_name {% for device in hostvars[item].glusterfs_devices %}{{ device }} {% endfor %}" + register: devices_info + delegate_to: "{{ item }}" + with_items: "{{ glusterfs_nodes | default([]) }}" + failed_when: False + + # Runs "lvremove -ff ; vgremove -fy ; pvremove -fy " for every device found to be a physical volume. +- name: Clear GlusterFS storage device contents + shell: "{% for line in item.stdout_lines %}{% set fields = line.split() %}{% if fields | count > 1 %}lvremove -ff {{ fields[1] }}; vgremove -fy {{ fields[1] }}; {% endif %}pvremove -fy {{ fields[0] }}; {% endfor %}" + delegate_to: "{{ item.item }}" + with_items: "{{ devices_info.results }}" + register: clear_devices + until: + - "'contains a filesystem in use' not in clear_devices.stderr" + delay: 1 + retries: 30 + when: + - item.stdout_lines | count > 0 -- cgit v1.2.1 From 2861f28c05a10f0b85b019e36e6a61c4a6dbf442 Mon Sep 17 00:00:00 2001 From: Saravanakumar Arumugam Date: Wed, 7 Feb 2018 21:29:01 +0530 Subject: erase data only if variable is set. fix block indentatation Signed-off-by: Saravanakumar Arumugam --- roles/openshift_storage_glusterfs/tasks/glusterfs_uninstall.yml | 2 ++ 1 file changed, 2 insertions(+) (limited to 'roles/openshift_storage_glusterfs/tasks/glusterfs_uninstall.yml') diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_uninstall.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_uninstall.yml index 2d8ca676b..a5774cc75 100644 --- a/roles/openshift_storage_glusterfs/tasks/glusterfs_uninstall.yml +++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_uninstall.yml @@ -99,6 +99,7 @@ delegate_to: "{{ item }}" with_items: "{{ glusterfs_nodes | default([]) }}" failed_when: False + when: glusterfs_wipe # Runs "lvremove -ff ; vgremove -fy ; pvremove -fy " for every device found to be a physical volume. - name: Clear GlusterFS storage device contents @@ -111,4 +112,5 @@ delay: 1 retries: 30 when: + - glusterfs_wipe - item.stdout_lines | count > 0 -- cgit v1.2.1