From 6bc3a3ac71e11fb6459df715536fec373c123a97 Mon Sep 17 00:00:00 2001 From: "Suren A. Chilingaryan" Date: Wed, 7 Mar 2018 07:03:57 +0100 Subject: Streamlined networking, OpenShift recovery, Ganesha --- anslib/openshift-ansible | 2 +- group_vars/OSEv3.yml | 32 +- group_vars/ands.yml | 9 +- group_vars/staging.yml | 24 +- group_vars/testing.yml | 6 +- group_vars/vagrant.yml | 4 + library/warn.py | 14 + opts.sh | 25 +- playbooks/ands-gluster-ganesha.yml | 16 + playbooks/ands-gluster-migrate.yml | 20 + playbooks/ands-prepare.yml | 20 +- playbooks/ands-vm-conf.yml | 5 - playbooks/maintain.yml | 10 +- playbooks/openshift-add-etcd.yml | 13 + playbooks/openshift-add-gluster.yml | 18 + playbooks/openshift-add-masters.yml | 22 +- playbooks/openshift-add-nodes.yml | 19 +- playbooks/openshift-install.yml | 23 +- playbooks/openshift-setup-project.yml | 2 +- playbooks/openshift-setup-projects.yml | 16 +- playbooks/openshift-setup-security.yml | 16 +- playbooks/openshift-setup-storage.yml | 16 +- playbooks/openshift-setup-users.yml | 16 +- playbooks/openshift-setup-vpn.yml | 2 +- playbooks/openshift-setup.yml | 18 +- playbooks/reports | 1 + reports/certs.html | 599 +++++++++++++++++++++ roles/ands_facts/defaults/main.yml | 20 + roles/ands_facts/tasks/find_interface_by_ip.yml | 20 + roles/ands_facts/tasks/main.yml | 65 +-- roles/ands_facts/tasks/network.yml | 49 ++ roles/ands_facts/tasks/storage.yml | 59 ++ roles/ands_network/README | 8 + roles/ands_network/tasks/common.yml | 49 ++ roles/ands_network/tasks/install_post.yml | 9 + roles/ands_network/tasks/install_pre.yml | 15 + roles/ands_network/tasks/main.yml | 3 + roles/ands_network/tasks/maintain.yml | 9 + roles/ands_openshift/defaults/main.yml | 2 +- roles/ands_openshift/tasks/hostnames.yml | 15 - roles/ands_storage/tasks/detect_device.yml | 5 + roles/ands_storage/tasks/main.yml | 8 +- roles/ands_vagrant_vm/templates/Vagrantfile.j2 | 7 +- roles/ands_vagrant_vmconf/tasks/main.yml | 4 +- roles/common/default/main.yml | 1 + roles/common/tasks/main.yml | 7 + roles/docker/tasks/main.yml | 18 +- roles/docker/tasks/storage.yml | 2 +- roles/ganesha/files/ganesha.conf | 1 + roles/ganesha/tasks/main.yml | 30 ++ roles/ganesha/templates/ganesha-ha.conf.j2 | 12 + roles/glusterfs/defaults/main.yml | 2 +- roles/glusterfs/tasks/common.yml | 1 + roles/glusterfs/tasks/create_domain.yml | 8 + roles/glusterfs/tasks/ganesha.yml | 6 + roles/glusterfs/tasks/install.yml | 34 ++ roles/glusterfs/tasks/iterate_domains.yml | 7 + roles/glusterfs/tasks/iterate_volumes.yml | 12 + roles/glusterfs/tasks/main.yml | 35 +- roles/glusterfs/tasks/migrate.yml | 34 ++ roles/glusterfs/tasks/migrate_failed_brick.yml | 10 + roles/glusterfs/tasks/migrate_inform.yml | 1 + roles/glusterfs/tasks/migrate_volume.yml | 17 + roles/glusterfs/tasks/volumes.yml | 2 +- roles/glusterfs/templates/export.openshift.conf.j2 | 44 ++ roles/keepalived/defaults/main.yml | 7 +- roles/keepalived/tasks/main.yml | 1 + roles/keepalived/templates/keepalived.conf.j2 | 15 +- scripts/gluster.sh | 85 +++ scripts/opts.sh | 9 + setup.sh | 100 ++-- setup/configs/labels.yml | 9 +- setup/configs/volumes.yml | 16 +- 73 files changed, 1511 insertions(+), 330 deletions(-) create mode 100644 group_vars/vagrant.yml create mode 100644 library/warn.py create mode 100644 playbooks/ands-gluster-ganesha.yml create mode 100644 playbooks/ands-gluster-migrate.yml delete mode 100644 playbooks/ands-vm-conf.yml create mode 100644 playbooks/openshift-add-etcd.yml create mode 100644 playbooks/openshift-add-gluster.yml create mode 120000 playbooks/reports create mode 100644 reports/certs.html create mode 100644 roles/ands_facts/tasks/find_interface_by_ip.yml create mode 100644 roles/ands_facts/tasks/network.yml create mode 100644 roles/ands_facts/tasks/storage.yml create mode 100644 roles/ands_network/README create mode 100644 roles/ands_network/tasks/common.yml create mode 100644 roles/ands_network/tasks/install_post.yml create mode 100644 roles/ands_network/tasks/install_pre.yml create mode 100644 roles/ands_network/tasks/main.yml create mode 100644 roles/ands_network/tasks/maintain.yml delete mode 100644 roles/ands_openshift/tasks/hostnames.yml create mode 100644 roles/common/default/main.yml create mode 100644 roles/ganesha/files/ganesha.conf create mode 100644 roles/ganesha/tasks/main.yml create mode 100644 roles/ganesha/templates/ganesha-ha.conf.j2 create mode 100644 roles/glusterfs/tasks/ganesha.yml create mode 100644 roles/glusterfs/tasks/install.yml create mode 100644 roles/glusterfs/tasks/iterate_domains.yml create mode 100644 roles/glusterfs/tasks/iterate_volumes.yml create mode 100644 roles/glusterfs/tasks/migrate.yml create mode 100644 roles/glusterfs/tasks/migrate_failed_brick.yml create mode 100644 roles/glusterfs/tasks/migrate_inform.yml create mode 100644 roles/glusterfs/tasks/migrate_volume.yml create mode 100644 roles/glusterfs/templates/export.openshift.conf.j2 create mode 100755 scripts/gluster.sh create mode 100644 scripts/opts.sh diff --git a/anslib/openshift-ansible b/anslib/openshift-ansible index a462739..d1fcbd7 160000 --- a/anslib/openshift-ansible +++ b/anslib/openshift-ansible @@ -1 +1 @@ -Subproject commit a46273949c39d6bd20ee65eb89aa96a73ffe9aef +Subproject commit d1fcbd7a9a8511b895f9a163f7fa2a7bc0d72f2b diff --git a/group_vars/OSEv3.yml b/group_vars/OSEv3.yml index 537e5d7..d896677 100644 --- a/group_vars/OSEv3.yml +++ b/group_vars/OSEv3.yml @@ -1,5 +1,6 @@ ### Deployment Type -deployment_type: "origin" +openshift_deployment_type: origin +openshift_master_cluster_method: "native" #openshift_release: "v1.5" openshift_release: "v3.7.1" #openshift_release: "v3.7" @@ -11,7 +12,6 @@ openshift_release: "v3.7.1" #containerized: true containerized: false -openshift_master_cluster_method: "native" os_firewall_use_firewalld: true #Recommended to avoid: No package matching 'origin-docker-excluder-1.5.0*' found available @@ -20,23 +20,24 @@ os_firewall_use_firewalld: true ### Network & DNS configuration -openshift_master_cluster_hostname: "{{ ands_openshift_lb }}" +openshift_master_cluster_hostname: "{{ ands_openshift_cluster_fqdn }}" openshift_master_cluster_public_hostname: "{{ ands_openshift_lb }}" -openshift_master_default_subdomain: "{{ ands_openshift_subdomain }}" +openshift_master_default_subdomain: "{{ ands_openshift_subdomain | default(ands_openshift_lb) }}" openshift_master_ingress_ip_network_cidr: "{{ ands_openshift_ingress_network }}" #openshift_portal_net: #osm_host_subnet_length: -openshift_ip: "{{ ands_openshift_network | ipaddr(ands_host_id) | ipaddr('address') }}" -openshift_public_ip: "{{ ands_openshift_public_network | ipaddr(ands_host_id) | ipaddr('address') }}" -openshift_hostname: "{{ ansible_hostname }}" -openshift_public_hostname: "{{ ansible_hostname }}.{{ ands_cluster_domain }}" -#openshift_hostname: "{{ ands_openshift_network | ipaddr(ands_host_id) | ipaddr('address') }}" -#openshift_public_hostname: "{{ ands_openshift_public_network | ipaddr(ands_host_id) | ipaddr('address') }}" +# we may need to put conditionals here (except _ip). Currently values set to '' if undifined (OpenShift uses None which is equivalent in ansible) +openshift_ip: "{{ ands_openshift_ip }}" +openshift_public_ip: "{{ ands_openshift_public_ip }}" +openshift_hostname: "{{ ands_openshift_fqdn }}" +openshift_public_hostname: "{{ ands_openshift_public_fqdn }}" + #Check configuration to fight dynamic IPs -openshift_dns_ip: "{{ ands_ipfailover_vips[0] | ipaddr('address') }}" -openshift_set_hostname: true +# We have per node DNS, so it is not necessary to use vips here. +# This overrides default in roles/openshift_node/defaults which sets dns_ip to: ansible_default_ipv4['address'] +openshift_dns_ip: "{{ openshift_ip }}" openshift_set_node_ip: true ### Node configuration @@ -60,6 +61,13 @@ openshift_node_cert_expire_days: 3650 openshift_master_cert_expire_days: 3650 etcd_ca_default_days: 3650 +### Docker +# log_driver is currently ignored for some reason +openshift_docker_log_driver: "json-file" +openshift_docker_log_options: [ max-size=2m, max-file=3 ] +openshift_docker_options: --log-driver json-file +#openshift_docker_options: --log-opt max-size=2m --log-opt max-file=3 + ### Dynamic Storage openshift_storage_glusterfs_image: chsa/gluster-centos openshift_storage_glusterfs_version: "{{ glusterfs_version }}" diff --git a/group_vars/ands.yml b/group_vars/ands.yml index d9639f3..d81f11e 100644 --- a/group_vars/ands.yml +++ b/group_vars/ands.yml @@ -1,10 +1,11 @@ -# This should be here, the variables from the role are not propogated to hostvars +ands_configure_heketi: false +# This should be here, the variables from the role are not propogated to hostvars #ands_master_id: "{{ ('masters' in group_names) | ternary(groups.masters.index(('masters' in group_names) | ternary(inventory_hostname, groups.masters[0])), -1) }}" ands_storage_hostname: "{{ ands_storage_network | default(false) | ternary(ands_storage_network | default('') | ipaddr(ands_host_id) | ipaddr('address'), ansible_fqdn) }}" -ands_configure_heketi: false + ands_repo_url: http://ufo.kit.edu/ands/repos ands_repositories: - - name: ands-updates - url: "{{ ands_repo_url }}/centos74/" + - name: ands-updates + url: "{{ ands_repo_url }}/centos74/" diff --git a/group_vars/staging.yml b/group_vars/staging.yml index b35440a..34bf7c7 100644 --- a/group_vars/staging.yml +++ b/group_vars/staging.yml @@ -1,6 +1,7 @@ ands_storage_network: 192.168.212.0/24 -ands_cluster_domain: ipe.kit.edu +#ands_cluster_domain: ipe.kit.edu +ands_hostname_template: ipeshift #ands_openshift_lb: openshift.ipe.kit.edu #ands_openshift_subdomain: openshift.ipe.kit.edu ands_openshift_lb: openshift.suren.me @@ -9,17 +10,22 @@ ands_openshift_network: 192.168.213.0/24 ands_openshift_public_network: 192.168.226.0/24 ands_openshift_ingress_network: 192.168.216.0/24 -ands_ipfailover_interface: eth1 -ands_ipfailover_vips: [141.52.64.28/24] +ands_inner_domain: "" +#ands_inner_lb: true +#ands_openshift_set_hostname: false + +ands_inner_lb: false +ands_openshift_set_hostname: true + + +#ands_ipfailover_interface: eth1 +ands_ipfailover_vips: [141.52.64.28/23] katrin_openvpn_subnet_bits: 24 katrin_openvpn_subnet_offset: 221 katrin_openvpn_network: "192.168.0.0/16" -vagrant_hostname_template: ipeshift -vagrant_cpu_cores: 4 -vagrant_mem_size: 16 -vagrant_disk_size: 240 - -#ands_provision_without_dns: true ands_prefer_docker: true + + + diff --git a/group_vars/testing.yml b/group_vars/testing.yml index f0e4770..72b2dba 100644 --- a/group_vars/testing.yml +++ b/group_vars/testing.yml @@ -9,14 +9,14 @@ ands_openshift_network: 192.168.13.0/24 ands_openshift_public_network: 192.168.26.0/24 ands_openshift_ingress_network: 192.168.16.0/24 +ands_hostname_template: ipekatrin + ands_ipfailover_interface: eth1 -ands_ipfailover_vips: [141.52.64.15/24, 141.52.64.17/24] +ands_ipfailover_vips: [141.52.64.15/23, 141.52.64.17/23] katrin_openvpn_subnet_bits: 24 katrin_openvpn_subnet_offset: 111 katrin_openvpn_network: "192.168.0.0/16" -vagrant_hostname_template: ipekatrin -#ands_provision_without_dns: true ands_prefer_docker: true diff --git a/group_vars/vagrant.yml b/group_vars/vagrant.yml new file mode 100644 index 0000000..49921a5 --- /dev/null +++ b/group_vars/vagrant.yml @@ -0,0 +1,4 @@ +vagrant_hostname_template: "{{ ands_hostname_template }}" +vagrant_cpu_cores: 4 +vagrant_mem_size: 16 +vagrant_disk_size: 240 diff --git a/library/warn.py b/library/warn.py new file mode 100644 index 0000000..de6421c --- /dev/null +++ b/library/warn.py @@ -0,0 +1,14 @@ + +from ansible.module_utils.basic import * + +def main(): + spec = { + 'msg' : { 'required': True, 'type': 'str' } + } + + module = AnsibleModule(argument_spec=spec) + module.exit_json(changed=False, warnings=[ module.params['msg'] ]) + + +if __name__ == '__main__': + main() \ No newline at end of file diff --git a/opts.sh b/opts.sh index 7e6d16f..ac1962a 100644 --- a/opts.sh +++ b/opts.sh @@ -1,7 +1,7 @@ #!/bin/bash inventory="inventories/testing.erb" -group="nodes" +group="ands" usage() { cat << END @@ -23,14 +23,23 @@ Actions: configure - configures OpenShift cluster (Storage, Users, OpenVPN tunnels) projects - installs configuration files and OpenShift resources for KaaS and other configured projects + Additional services + ganesha - provide external nfs access to gluster volumes + + Maintenance + maintain - check and fix running configuration + upgrade - upgrade to a new version (Dangerous) + migrate - migrate GlusterFS bricks to a new server + Scaling the cluster + masters - complete action: prepares the masters/nodes, scales up the cluster, and reconfigures storage (if necessary) nodes - complete action: prepares the nodes, scales up the cluster, and reconfigures storage (if necessary) - masters - complete action: prepares the masters, scales up the cluster, and reconfigures storage prepare - prepares the new nodes + openshift-masters - scales OpenShift cluster with additional masters & nodes openshift-nodes - scales OpenShift cluster with additional nodes - openshift-masters - scales OpenShift cluster (master scallability is not checked) + openshift-etcd - scales etcd cluster (if for some reason it was not performed during master/nodes scale-up) + openshift-gluster - scale gluster pods and adapts heketi topology (if not performed during master/nodes scale-up) configure - Configures new nodes (Storage, Users, OpenVPN tunnels) - upgrade - Upgrade to a new version (Dangerous) Configuration of new resources, etc. users - configure user roles & passwords @@ -41,11 +50,13 @@ Actions: certs - re-generate OpenShift x509 certificates check - check current setup and report if any maintenace should be peformed setup - executes specific configuration task from ands-openshift - Tasks: hostnames, users, ssh, storage, heketi - + Tasks: users, ssh, storage, heketi Custom actions playbook.yml - execute the specified playbook (after ands_facts) role - generates temporary playbook and executes the role + + Ansible option + --limit - only execute rules on the specified host END echo @@ -101,7 +112,7 @@ END fi # ansible-playbook -vvv --vault-password-file .vault-pass -i $inventory $playbook $@ - ansible-playbook --vault-password-file .vault-pass -i $inventory $playbook $@ + ansible-playbook --vault-password-file .vault-pass -i $inventory $playbook "$@" if [ -n "$clean" ]; then rm -rf "$clean" diff --git a/playbooks/ands-gluster-ganesha.yml b/playbooks/ands-gluster-ganesha.yml new file mode 100644 index 0000000..586dd07 --- /dev/null +++ b/playbooks/ands-gluster-ganesha.yml @@ -0,0 +1,16 @@ +- name: Common setup procedures + hosts: ands + roles: + - role: ands_facts + + +- name: Configure GlusterFS cluster + hosts: masters, new_masters + roles: + - { role: glusterfs, action: ganesha } + - { role: ganesha } + vars: + glusterfs_network: "{{ ands_storage_network }}" + glusterfs_servers: "{{ ands_storage_servers }}" + glusterfs_bricks_path: "{{ ands_data_path }}/glusterfs" + glusterfs_domains: "{{ ands_storage_domains }}" diff --git a/playbooks/ands-gluster-migrate.yml b/playbooks/ands-gluster-migrate.yml new file mode 100644 index 0000000..9ef3f63 --- /dev/null +++ b/playbooks/ands-gluster-migrate.yml @@ -0,0 +1,20 @@ +- name: Common setup procedures + hosts: ands + roles: + - role: ands_facts + + +- name: Configure GlusterFS cluster + hosts: ands_servers + run_once: true + roles: + - { role: glusterfs, action: migrate } + vars: + glusterfs_network: "{{ ands_storage_network }}" + glusterfs_servers: "{{ ands_storage_servers }}" + glusterfs_bricks_path: "{{ ands_data_path }}/glusterfs" + glusterfs_domains: "{{ ands_storage_domains }}" + glusterfs_migrate_from: "{{ gfs_from }}" + glusterfs_migrate_to: "{{ gfs_to }}" + + diff --git a/playbooks/ands-prepare.yml b/playbooks/ands-prepare.yml index e8c785b..d198ec0 100644 --- a/playbooks/ands-prepare.yml +++ b/playbooks/ands-prepare.yml @@ -1,22 +1,28 @@ -- name: Common setup procedures +- name: Determin Ands facts hosts: ands roles: - role: ands_facts + +- name: Prepare virtual hosts + hosts: virtual + roles: + - role: ands_vagrant_vmconf + +- name: Common setup procedures + hosts: ands + roles: - role: common - role: firewall + - { role: ands_network, action: common } - name: Keepalived service - hosts: masters, new_masters + hosts: masters roles: - role: keepalived - vars: - keepalived_vips: "{{ ands_ipfailover_vips | default([]) }}" - keepalived_iface: "{{ ands_ipfailover_interface | default('eth0') }}" - keepalived_password: "{{ ands_secrets.keepalived }}" #OpenVPN started before Origin-node causes problems #- name: OpenVPN service -# hosts: nodes +# hosts: nodes, new_nodes # roles: # - role: openvpn # vars: diff --git a/playbooks/ands-vm-conf.yml b/playbooks/ands-vm-conf.yml deleted file mode 100644 index 2b1020b..0000000 --- a/playbooks/ands-vm-conf.yml +++ /dev/null @@ -1,5 +0,0 @@ -- name: Common setup procedures - hosts: ands_servers - roles: - - role: ands_vagrant_vmconf - diff --git a/playbooks/maintain.yml b/playbooks/maintain.yml index 3167252..03d6d9a 100644 --- a/playbooks/maintain.yml +++ b/playbooks/maintain.yml @@ -1,11 +1,17 @@ +- name: Common setup procedures + hosts: ands + roles: + - role: ands_facts + - { role: ands_network, action: maintain } + - name: Check cert expirys hosts: nodes:masters:etcd become: yes gather_facts: no vars: - openshift_certificate_expiry_save_json_results: yes + openshift_certificate_expiry_save_json_results: no openshift_certificate_expiry_generate_html_report: yes - openshift_certificate_expiry_html_report_path: ./report + openshift_certificate_expiry_html_report_path: reports/certs.html openshift_certificate_expiry_show_all: yes roles: - role: openshift_certificate_expiry diff --git a/playbooks/openshift-add-etcd.yml b/playbooks/openshift-add-etcd.yml new file mode 100644 index 0000000..c866f49 --- /dev/null +++ b/playbooks/openshift-add-etcd.yml @@ -0,0 +1,13 @@ +- name: Configure cluster hosts names + hosts: nodes:new_nodes + roles: + - { role: ands_facts } + - { role: ands_network, action: install_pre } + +- import_playbook: ../anslib/openshift-ansible/playbooks/prerequisites.yml +- import_playbook: ../anslib/openshift-ansible/playbooks/openshift-etcd/scaleup.yml + +- name: Configure cluster hosts names + hosts: nodes:new_nodes + roles: + - { role: ands_network, action: install_post } diff --git a/playbooks/openshift-add-gluster.yml b/playbooks/openshift-add-gluster.yml new file mode 100644 index 0000000..9495b06 --- /dev/null +++ b/playbooks/openshift-add-gluster.yml @@ -0,0 +1,18 @@ +- name: Configure cluster hosts names + hosts: nodes:new_nodes + roles: + - { role: ands_facts } + - { role: ands_network, action: install_pre } + +- import_playbook: ../anslib/openshift-ansible/playbooks/prerequisites.yml + +- import_playbook: ../anslib/openshift-ansible/playbooks/openshift-glusterfs/config.yml + vars: + openshift_storage_glusterfs_is_missing: False + openshift_storage_glusterfs_heketi_is_missing: False + + +- name: Configure cluster hosts names + hosts: nodes:new_nodes + roles: + - { role: ands_network, action: install_post } diff --git a/playbooks/openshift-add-masters.yml b/playbooks/openshift-add-masters.yml index bcc1a41..99672d0 100644 --- a/playbooks/openshift-add-masters.yml +++ b/playbooks/openshift-add-masters.yml @@ -1,7 +1,25 @@ - name: Configure cluster hosts names - hosts: nodes + hosts: nodes:new_nodes roles: - { role: ands_facts } - - { role: ands_openshift, subrole: hostnames } + - { role: common, os_update: true } + - { role: ands_network, action: install_pre } +# etcd will provisioned as well if node is listed in new_etcd +- import_playbook: ../anslib/openshift-ansible/playbooks/prerequisites.yml - import_playbook: ../anslib/openshift-ansible/playbooks/openshift-master/scaleup.yml + +- import_playbook: ../anslib/openshift-ansible/playbooks/openshift-glusterfs/config.yml + vars: + openshift_storage_glusterfs_is_missing: False + openshift_storage_glusterfs_heketi_is_missing: False + +- name: Keepalived service + hosts: new_masters + roles: + - role: keepalived + +- name: Configure cluster hosts names + hosts: nodes:new_nodes + roles: + - { role: ands_network, action: install_post } diff --git a/playbooks/openshift-add-nodes.yml b/playbooks/openshift-add-nodes.yml index 04a56a8..c788e12 100644 --- a/playbooks/openshift-add-nodes.yml +++ b/playbooks/openshift-add-nodes.yml @@ -1,7 +1,22 @@ - name: Configure cluster hosts names - hosts: nodes + hosts: nodes:new_nodes roles: - { role: ands_facts } - - { role: ands_openshift, subrole: hostnames } + - { role: common, os_update: true } + - { role: ands_network, action: install_pre } +# I am not sure if etcd will be automatic here. If not, we may need to run etcd scaleup afterwards +# if node is also in new_etcd list +- import_playbook: ../anslib/openshift-ansible/playbooks/prerequisites.yml - import_playbook: ../anslib/openshift-ansible/playbooks/openshift-node/scaleup.yml +#- import_playbook: ../anslib/openshift-ansible/playbooks/openshift-etcd/scaleup.yml + +- import_playbook: ../anslib/openshift-ansible/playbooks/openshift-glusterfs/config.yml + vars: + openshift_storage_glusterfs_is_missing: False + openshift_storage_glusterfs_heketi_is_missing: False + +- name: Configure cluster hosts names + hosts: nodes:new_nodes + roles: + - { role: ands_network, action: install_post } diff --git a/playbooks/openshift-install.yml b/playbooks/openshift-install.yml index f3a81ea..8d62b1c 100644 --- a/playbooks/openshift-install.yml +++ b/playbooks/openshift-install.yml @@ -2,25 +2,12 @@ hosts: nodes roles: - { role: ands_facts } - - { role: ands_openshift, subrole: hostnames } - -- name: Temporary provision /etc/hosts with Masters IP. - hosts: nodes:!masters - tasks: - - lineinfile: dest="/etc/hosts" line="{{ ands_openshift_network | ipaddr(node_id) | ipaddr('address') }} {{ ands_openshift_lb }}" regexp=".*{{ ands_openshift_lb }}$" state="present" - when: (ands_provision_without_dns | default(false)) - vars: - node_id: "{{ hostvars[groups['masters'][0]]['ands_host_id'] }}" + - { role: ands_network, action: install_pre } - import_playbook: ../anslib/openshift-ansible/playbooks/prerequisites.yml - -#- include: ../anslib/openshift-ansible/playbooks/byo/config.yml -#- include: ../anslib/openshift-ansible/playbooks/deploy_cluster.yml - import_playbook: ../anslib/openshift-ansible/playbooks/deploy_cluster.yml -#- import_playbook: openshift-deploy-cluster.yml -- name: Remove temporary entries in /etc/hosts - hosts: nodes:!masters - tasks: - - lineinfile: dest="/etc/hosts" regexp=".*{{ ands_openshift_lb }}$" state="absent" - when: (ands_provision_without_dns | default(false)) +- name: Configure cluster hosts names + hosts: nodes + roles: + - { role: ands_network, action: install_post } diff --git a/playbooks/openshift-setup-project.yml b/playbooks/openshift-setup-project.yml index ab95933..6150cdf 100644 --- a/playbooks/openshift-setup-project.yml +++ b/playbooks/openshift-setup-project.yml @@ -1,4 +1,4 @@ -- name: Configure users & user projects +- name: Analyze Ands configuration hosts: masters roles: - { role: ands_facts } diff --git a/playbooks/openshift-setup-projects.yml b/playbooks/openshift-setup-projects.yml index cc36498..689ecb4 100644 --- a/playbooks/openshift-setup-projects.yml +++ b/playbooks/openshift-setup-projects.yml @@ -1,16 +1,8 @@ -- name: Configure users & user projects +- name: Analyze Ands configuration hosts: masters roles: - { role: ands_facts } -- name: Temporary provision /etc/hosts with Masters IP. - hosts: nodes:!masters - tasks: - - lineinfile: dest="/etc/hosts" line="{{ ands_openshift_network | ipaddr(node_id) | ipaddr('address') }} {{ ands_openshift_lb }}" regexp=".*{{ ands_openshift_lb }}$" state="present" - when: (ands_provision_without_dns | default(false)) - vars: - node_id: "{{ hostvars[groups['masters'][0]]['ands_host_id'] }}" - - name: Configure users & user projects hosts: masters roles: @@ -22,9 +14,3 @@ kaas_projects: "{{ ands_openshift_projects.keys() }}" kaas_openshift_volumes: "{{ ands_openshift_volumes }}" - -- name: Remove temporary entries in /etc/hosts - hosts: nodes:!masters - tasks: - - lineinfile: dest="/etc/hosts" regexp=".*{{ ands_openshift_lb }}$" state="absent" - when: (ands_provision_without_dns | default(false)) diff --git a/playbooks/openshift-setup-security.yml b/playbooks/openshift-setup-security.yml index 6c85602..f576ba5 100644 --- a/playbooks/openshift-setup-security.yml +++ b/playbooks/openshift-setup-security.yml @@ -1,24 +1,10 @@ -- name: Configure users +- name: Analyze Ands configuration hosts: masters roles: - { role: ands_facts } - -- name: Temporary provision /etc/hosts with Masters IP. - hosts: nodes:!masters - tasks: - - lineinfile: dest="/etc/hosts" line="{{ ands_openshift_network | ipaddr(node_id) | ipaddr('address') }} {{ ands_openshift_lb }}" regexp=".*{{ ands_openshift_lb }}$" state="present" - when: (ands_provision_without_dns | default(false)) - vars: - node_id: "{{ hostvars[groups['masters'][0]]['ands_host_id'] }}" - - name: Configure security hosts: masters roles: - { role: ands_openshift, subrole: security } -- name: Remove temporary entries in /etc/hosts - hosts: nodes:!masters - tasks: - - lineinfile: dest="/etc/hosts" regexp=".*{{ ands_openshift_lb }}$" state="absent" - when: (ands_provision_without_dns | default(false)) diff --git a/playbooks/openshift-setup-storage.yml b/playbooks/openshift-setup-storage.yml index 387b775..64099bc 100644 --- a/playbooks/openshift-setup-storage.yml +++ b/playbooks/openshift-setup-storage.yml @@ -1,5 +1,5 @@ --- -- name: Configure GlusterFS storage +- name: Analyze Ands configuration hosts: nodes roles: - { role: ands_facts } @@ -15,23 +15,9 @@ glusterfs_domains: "{{ ands_storage_domains }}" -- name: Temporary provision /etc/hosts with Masters IP. - hosts: nodes:!masters - tasks: - - lineinfile: dest="/etc/hosts" line="{{ ands_openshift_network | ipaddr(node_id) | ipaddr('address') }} {{ ands_openshift_lb }}" regexp=".*{{ ands_openshift_lb }}$" state="present" - when: (ands_provision_without_dns | default(false)) - vars: - node_id: "{{ hostvars[groups['masters'][0]]['ands_host_id'] }}" - - name: Configure OpenShift volumes hosts: masters roles: - { role: ands_openshift, subrole: storage } -- name: Remove temporary entries in /etc/hosts - hosts: nodes:!masters - tasks: - - lineinfile: dest="/etc/hosts" regexp=".*{{ ands_openshift_lb }}$" state="absent" - when: (ands_provision_without_dns | default(false)) - # we also reconfigure kaas to populate new configs? diff --git a/playbooks/openshift-setup-users.yml b/playbooks/openshift-setup-users.yml index d160823..f54a806 100644 --- a/playbooks/openshift-setup-users.yml +++ b/playbooks/openshift-setup-users.yml @@ -1,24 +1,10 @@ -- name: Configure users +- name: Analyze Ands configuration hosts: masters roles: - { role: ands_facts } - -- name: Temporary provision /etc/hosts with Masters IP. - hosts: nodes:!masters - tasks: - - lineinfile: dest="/etc/hosts" line="{{ ands_openshift_network | ipaddr(node_id) | ipaddr('address') }} {{ ands_openshift_lb }}" regexp=".*{{ ands_openshift_lb }}$" state="present" - when: (ands_provision_without_dns | default(false)) - vars: - node_id: "{{ hostvars[groups['masters'][0]]['ands_host_id'] }}" - - name: Configure users hosts: masters roles: - { role: ands_openshift, subrole: users } -- name: Remove temporary entries in /etc/hosts - hosts: nodes:!masters - tasks: - - lineinfile: dest="/etc/hosts" regexp=".*{{ ands_openshift_lb }}$" state="absent" - when: (ands_provision_without_dns | default(false)) diff --git a/playbooks/openshift-setup-vpn.yml b/playbooks/openshift-setup-vpn.yml index be5fe45..c6db977 100644 --- a/playbooks/openshift-setup-vpn.yml +++ b/playbooks/openshift-setup-vpn.yml @@ -1,4 +1,4 @@ -- name: Common setup procedures +- name: Analyze Ands configuration hosts: ands roles: - role: ands_facts diff --git a/playbooks/openshift-setup.yml b/playbooks/openshift-setup.yml index 4b98f39..d5675e4 100644 --- a/playbooks/openshift-setup.yml +++ b/playbooks/openshift-setup.yml @@ -1,29 +1,13 @@ -- name: Common setup procedures +- name: Analyze Ands configuration hosts: ands roles: - role: ands_facts - -- name: Temporary provision /etc/hosts with Masters IP. - hosts: nodes:!masters - tasks: - - lineinfile: dest="/etc/hosts" line="{{ ands_openshift_network | ipaddr(node_id) | ipaddr('address') }} {{ ands_openshift_lb }}" regexp=".*{{ ands_openshift_lb }}$" state="present" - when: (ands_provision_without_dns | default(false)) - vars: - node_id: "{{ hostvars[groups['masters'][0]]['ands_host_id'] }}" - - name: Various OpenShift resources hosts: nodes roles: - role: ands_openshift -- name: Remove temporary entries in /etc/hosts - hosts: nodes:!masters - tasks: - - lineinfile: dest="/etc/hosts" regexp=".*{{ ands_openshift_lb }}$" state="absent" - when: (ands_provision_without_dns | default(false)) - - - name: OpenVPN service hosts: nodes roles: diff --git a/playbooks/reports b/playbooks/reports new file mode 120000 index 0000000..d5063a5 --- /dev/null +++ b/playbooks/reports @@ -0,0 +1 @@ +../reports/ \ No newline at end of file diff --git a/reports/certs.html b/reports/certs.html new file mode 100644 index 0000000..5f3b81f --- /dev/null +++ b/reports/certs.html @@ -0,0 +1,599 @@ + + + + + OCP Certificate Expiry Report + + + + + + + +

192.168.226.1

+ +

+ Checked 11 total certificates. Expired/Warning/OK: 0/0/11. Warning window: 30 days +

+
    +
  • Expirations checked at: 2018-03-06 01:40:05.401238
  • +
  • Warn after date: 2018-04-05 01:40:05.401238
  • +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

ocp_certs

 Certificate Common/Alt Name(s)Serial + HealthDays RemainingExpiration DatePath
CN:172.30.0.1, DNS:ipeshift1, DNS:ipeshift1.ipe.kit.edu, DNS:kubernetes, DNS:kubernetes.default, DNS:kubernetes.default.svc, DNS:kubernetes.default.svc.cluster.local, DNS:openshift, DNS:openshift.default, DNS:openshift.default.svc, DNS:openshift.default.svc.cluster.local, DNS:openshift.suren.me, DNS:172.30.0.1, DNS:192.168.213.1, DNS:192.168.226.1, IP Address:172.30.0.1, IP Address:192.168.213.1, IP Address:192.168.226.1int(3)/hex(0x3)ok36432028-02-25 07:23:22/etc/origin/master/master.server.crt
CN:openshift-signer@1519716200int(1)/hex(0x1)ok36432028-02-25 07:23:21/etc/origin/master/ca.crt
CN:192.168.213.1, DNS:ipeshift1, DNS:ipeshift1.ipe.kit.edu, DNS:192.168.213.1, DNS:192.168.226.1, IP Address:192.168.213.1, IP Address:192.168.226.1int(6)/hex(0x6)ok36432028-02-25 07:27:37/etc/origin/node/server.crt
CN:openshift-signer@1519716200int(1)/hex(0x1)ok36432028-02-25 07:23:21/etc/origin/node/ca.crt

etcd

 Certificate Common/Alt Name(s)Serial + HealthDays RemainingExpiration DatePath
CN:ipeshift1, IP Address:192.168.213.1, DNS:ipeshift1int(1)/hex(0x1)ok36432028-02-25 07:22:06/etc/etcd/server.crt
CN:ipeshift1, IP Address:192.168.213.1, DNS:ipeshift1int(4)/hex(0x4)ok36432028-02-25 07:22:07/etc/etcd/peer.crt

kubeconfigs

 Certificate Common/Alt Name(s)Serial + HealthDays RemainingExpiration DatePath
O:system:nodes, CN:system:node:ipeshift1int(3)/hex(0x3)ok36432028-02-25 07:27:33/etc/origin/node/system:node:ipeshift1.kubeconfig
O:system:cluster-admins, CN:system:adminint(7)/hex(0x7)ok36432028-02-25 07:23:23/etc/origin/master/admin.kubeconfig
O:system:masters, O:system:openshift-master, CN:system:openshift-masterint(16)/hex(0x10)ok36462028-02-28 04:41:57/etc/origin/master/openshift-master.kubeconfig

router

 Certificate Common/Alt Name(s)Serial + HealthDays RemainingExpiration DatePath
CN:*.openshift.suren.me, DNS:*.openshift.suren.me, DNS:openshift.suren.meint(9)/hex(0x9)ok7232020-02-27 07:37:27/api/v1/namespaces/default/secrets/router-certs

registry

 Certificate Common/Alt Name(s)Serial + HealthDays RemainingExpiration DatePath
CN:172.30.28.159, DNS:__omit_place_holder__ae699a41d947a82fa4b0786b473af05686593d38, DNS:docker-registry-default.openshift.suren.me, DNS:docker-registry.default.svc, DNS:docker-registry.default.svc.cluster.local, DNS:172.30.28.159, IP Address:172.30.28.159int(11)/hex(0xb)ok36432028-02-25 15:47:35/api/v1/namespaces/default/secrets/registry-certificates
+
+

192.168.226.4

+ +

+ Checked 11 total certificates. Expired/Warning/OK: 0/0/11. Warning window: 30 days +

+
    +
  • Expirations checked at: 2018-03-06 01:40:05.358115
  • +
  • Warn after date: 2018-04-05 01:40:05.358115
  • +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

ocp_certs

 Certificate Common/Alt Name(s)Serial + HealthDays RemainingExpiration DatePath
CN:openshift-signer@1519716200int(1)/hex(0x1)ok36432028-02-25 07:23:21/etc/origin/master/ca.crt
CN:openshift-signer@1519716200int(1)/hex(0x1)ok36432028-02-25 07:23:21/etc/origin/node/ca.crt
CN:172.30.0.1, DNS:ipeshift4, DNS:ipeshift4.ipe.kit.edu, DNS:kubernetes, DNS:kubernetes.default, DNS:kubernetes.default.svc, DNS:kubernetes.default.svc.cluster.local, DNS:openshift, DNS:openshift.default, DNS:openshift.default.svc, DNS:openshift.default.svc.cluster.local, DNS:openshift.suren.me, DNS:172.30.0.1, DNS:192.168.213.4, DNS:192.168.226.4, IP Address:172.30.0.1, IP Address:192.168.213.4, IP Address:192.168.226.4int(17)/hex(0x11)ok36462028-02-28 04:42:05/etc/origin/master/master.server.crt
CN:192.168.213.4, DNS:ipeshift4, DNS:ipeshift4.ipe.kit.edu, DNS:192.168.213.4, DNS:192.168.226.4, IP Address:192.168.213.4, IP Address:192.168.226.4int(20)/hex(0x14)ok36462028-02-28 04:44:57/etc/origin/node/server.crt

etcd

 Certificate Common/Alt Name(s)Serial + HealthDays RemainingExpiration DatePath
CN:ipeshift4, IP Address:192.168.213.4, DNS:ipeshift4int(11)/hex(0xb)ok36462028-02-28 04:38:10/etc/etcd/server.crt
CN:ipeshift4, IP Address:192.168.213.4, DNS:ipeshift4int(12)/hex(0xc)ok36462028-02-28 04:38:12/etc/etcd/peer.crt

kubeconfigs

 Certificate Common/Alt Name(s)Serial + HealthDays RemainingExpiration DatePath
O:system:cluster-admins, CN:system:adminint(7)/hex(0x7)ok36432028-02-25 07:23:23/etc/origin/master/admin.kubeconfig
O:system:nodes, CN:system:node:ipeshift4int(19)/hex(0x13)ok36462028-02-28 04:44:56/etc/origin/node/system:node:ipeshift4.kubeconfig
O:system:masters, O:system:openshift-master, CN:system:openshift-masterint(18)/hex(0x12)ok36462028-02-28 04:42:06/etc/origin/master/openshift-master.kubeconfig

router

 Certificate Common/Alt Name(s)Serial + HealthDays RemainingExpiration DatePath
CN:*.openshift.suren.me, DNS:*.openshift.suren.me, DNS:openshift.suren.meint(9)/hex(0x9)ok7232020-02-27 07:37:27/api/v1/namespaces/default/secrets/router-certs

registry

 Certificate Common/Alt Name(s)Serial + HealthDays RemainingExpiration DatePath
CN:172.30.28.159, DNS:__omit_place_holder__ae699a41d947a82fa4b0786b473af05686593d38, DNS:docker-registry-default.openshift.suren.me, DNS:docker-registry.default.svc, DNS:docker-registry.default.svc.cluster.local, DNS:172.30.28.159, IP Address:172.30.28.159int(11)/hex(0xb)ok36432028-02-25 15:47:35/api/v1/namespaces/default/secrets/registry-certificates
+
+

192.168.226.3

+ +

+ Checked 5 total certificates. Expired/Warning/OK: 0/0/5. Warning window: 30 days +

+
    +
  • Expirations checked at: 2018-03-06 01:40:05.358077
  • +
  • Warn after date: 2018-04-05 01:40:05.358077
  • +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

ocp_certs

 Certificate Common/Alt Name(s)Serial + HealthDays RemainingExpiration DatePath
CN:192.168.213.3, DNS:ipeshift3, DNS:ipeshift3.ipe.kit.edu, DNS:192.168.213.3, DNS:192.168.226.3, IP Address:192.168.213.3, IP Address:192.168.226.3int(8)/hex(0x8)ok36432028-02-25 07:27:39/etc/origin/node/server.crt
CN:openshift-signer@1519716200int(1)/hex(0x1)ok36432028-02-25 07:23:21/etc/origin/node/ca.crt

etcd

 Certificate Common/Alt Name(s)Serial + HealthDays RemainingExpiration DatePath
CN:ipeshift3, IP Address:192.168.213.3, DNS:ipeshift3int(3)/hex(0x3)ok36432028-02-25 07:22:06/etc/etcd/server.crt
CN:ipeshift3, IP Address:192.168.213.3, DNS:ipeshift3int(5)/hex(0x5)ok36432028-02-25 07:22:07/etc/etcd/peer.crt

kubeconfigs

 Certificate Common/Alt Name(s)Serial + HealthDays RemainingExpiration DatePath
O:system:nodes, CN:system:node:ipeshift3int(5)/hex(0x5)ok36432028-02-25 07:27:35/etc/origin/node/system:node:ipeshift3.kubeconfig

router

 Certificate Common/Alt Name(s)Serial + HealthDays RemainingExpiration DatePath

registry

 Certificate Common/Alt Name(s)Serial + HealthDays RemainingExpiration DatePath
+
+ + + + diff --git a/roles/ands_facts/defaults/main.yml b/roles/ands_facts/defaults/main.yml index ac61876..fc3fcfd 100644 --- a/roles/ands_facts/defaults/main.yml +++ b/roles/ands_facts/defaults/main.yml @@ -1,3 +1,5 @@ +ands_none: "{{ None }}" + ands_configure_heketi: false ands_data_device_default_threshold: 10 @@ -9,3 +11,21 @@ ands_data_lv: "ands_data" ands_data_vg: "{{ ( ansible_lvm['lvs'][ands_data_lv] | default(ands_empty_lv) )['vg'] }}" ands_heketi_lv: "ands_heketi" ands_heketi_vg: "{{ ( ansible_lvm['lvs'][ands_heketi_lv] | default(ands_empty_lv) )['vg'] }}" + +ands_openshift_set_hostname: false +ands_openshift_set_public_hostname: "{{ (ands_openshift_set_hostname and (ands_openshift_public_network is defined)) | ternary(true, false) }}" +ands_resolve_public_ip: false + +ands_cluster_domain: "{{ ansible_domain }}" +ands_inner_domain: "{{ ands_cluster_domain }}" + +ands_default_ip: "{{ ansible_default_ipv4.address }}" +ands_openshift_default_ip: "{{ ands_resolve_public_ip | default(false) | ternary(ands_default_ip, ands_none) }}" +ands_openshift_default_hostname: "{{ (ands_hostname_template is defined) | ternary(ands_hostname_template ~ ands_host_id, ansible_hostname) }}" + +ands_inner_lb: false +ands_inner_lb_id: 254 +ands_inner_lb_hostname: 'ands-lb' + +#ands_openshift_inner_interface: +#ands_openshift_public_interface: diff --git a/roles/ands_facts/tasks/find_interface_by_ip.yml b/roles/ands_facts/tasks/find_interface_by_ip.yml new file mode 100644 index 0000000..ecfa3c3 --- /dev/null +++ b/roles/ands_facts/tasks/find_interface_by_ip.yml @@ -0,0 +1,20 @@ +- name: "Looking for interface holding {{ ip }}" + set_fact: + "{{ var }}": "{{ eth['device'] }}" + vars: + eth: "{{ hostvars[inventory_hostname]['ansible_' + item] | default({}) }}" + ipv4: "{{ eth['ipv4'] | default({}) }}" + q: "{{ eth | json_query('ipv4_secondaries[*].address') }}" + sec: "{{ ((q == ands_none) or (q == '')) | ternary([], q) }}" + ips: "{{ sec | union([ipv4.address]) }}" + when: + - eth['type'] is defined + - eth['ipv4'] is defined + - eth['device'] is defined + - eth['type'] == 'ether' + - ip in ips + with_items: + - "{{ hostvars[inventory_hostname]['ansible_interfaces'] }}" +# loop_control: +# label: "{{ item }}" +# no_log: true diff --git a/roles/ands_facts/tasks/main.yml b/roles/ands_facts/tasks/main.yml index cf995a0..6b28683 100644 --- a/roles/ands_facts/tasks/main.yml +++ b/roles/ands_facts/tasks/main.yml @@ -1,59 +1,14 @@ -- include_vars: dir="vars" +--- -- name: Detect Heketi - set_fact: ands_storage_domains="{{ ands_storage_domains | union([ands_heketi_domain]) }}" - when: - - ands_configure_heketi - - ands_heketi_domain is defined - - ansible_lvm.lvs[ands_heketi_lv] is defined +# The variables accessed trough 'hostvars' should be set as facts +# Here we set 'ands_storage_servers' and other variables +- name: "Configuring storage facts" + include_tasks: "storage.yml" -- name: Set some facts - set_fact: - ands_storage_servers: "{{ ands_storage_servers }}" - -- name: Set some facts - set_fact: - ands_data_vg: "{{ ands_data_vg }}" - when: ands_data_vg != "" - -- name: Set some facts - set_fact: - ands_data_lv: "{{ ands_data_lv }}" - when: ands_data_lv != "" - -- name: Set some facts - set_fact: - ands_heketi_vg: "{{ ands_heketi_vg }}" - when: ands_heketi_vg != "" - -- name: Set some facts - set_fact: - ands_heketi_lv: "{{ ands_heketi_lv }}" - when: ands_heketi_lv != "" - -- name: Set some facts - set_fact: - ands_data_dev: "/dev/mapper/{{ands_data_vg}}-{{ands_data_lv}}" - when: - - ands_data_vg != "" - - ands_data_lv != "" - -- name: set some facts - set_fact: - ands_heketi_dev: "/dev/mapper/{{ands_heketi_vg}}-{{ands_heketi_lv}}" - when: - - ands_heketi_vg != "" - - ands_heketi_lv != "" +# Here we set 'openshift_hostname', 'openshift_ip' and other variables +- name: "Configuring network facts" + include_tasks: "network.yml" -- name: set some facts +- name: "Confirm that ands facts are configured" set_fact: - glusterfs_devices: [ "{{ ands_heketi_dev }}" ] - when: - - ands_heketi_vg != "" - - ands_heketi_lv != "" - -- include_tasks: detect_data_path.yml - when: not ands_data_path is defined - -#- command: yum-complete-transaction --cleanup-only - + ands_facts_configured: true diff --git a/roles/ands_facts/tasks/network.yml b/roles/ands_facts/tasks/network.yml new file mode 100644 index 0000000..1d0248f --- /dev/null +++ b/roles/ands_facts/tasks/network.yml @@ -0,0 +1,49 @@ +- name: Set network facts + set_fact: + ands_cluster_domain: "{{ ands_cluster_domain }}" + ands_cluster_dot_domain: ".{{ ands_cluster_domain }}" + ands_inner_domain: "{{ ands_inner_domain }}" + ands_inner_dot_domain: "{{ (ands_inner_domain == ands_none) | ternary('', '.' ~ ands_inner_domain) }}" + ands_inner_lb_ip: "{{ ands_openshift_network | ipaddr(ands_inner_lb_id) | ipaddr('address') }}" + ands_inner_lb_hostname: "{{ ands_inner_lb_hostname }}" + ands_openshift_ip: "{{ ands_openshift_network | ipaddr(ands_host_id) | ipaddr('address') }}" + ands_openshift_hostname: "{{ ands_openshift_hostname | default(ands_openshift_set_hostname | ternary(ands_openshift_default_hostname, ands_none)) }}" + ands_openshift_public_ip: "{{ (ands_openshift_public_network is defined) | ternary( ands_openshift_public_network | ipaddr(ands_host_id) | ipaddr('address'), ands_openshift_default_ip) }}" + ands_openshift_public_hostname: "{{ ands_openshift_public_hostname | default(ands_openshift_set_public_hostname | ternary(ands_openshift_default_hostname, ands_none)) }}" + ands_storage_ip: "{{ ands_storage_network | default(ands_openshift_network) | ipaddr(ands_host_id) | ipaddr('address') }}" + ands_hostname_storage: "ands_storage{{ ands_host_id }}" + ands_hostname_openshift: "ands_openshift{{ ands_host_id }}" + +- name: Set more network facts + set_fact: + ands_openshift_public_fqdn: "{{ (ands_openshift_public_hostname == ands_none) | ternary(ands_none, ands_openshift_public_hostname ~ ands_cluster_dot_domain ) }}" + ands_openshift_fqdn: "{{ (ands_openshift_hostname == ands_none) | ternary(ands_none, ands_openshift_hostname ~ ands_inner_dot_domain ) }}" + ands_openshift_cluster_fqdn: "{{ ands_inner_lb | ternary(ands_inner_lb_hostname ~ ands_inner_dot_domain, ands_openshift_lb) }}" + +- name: "Detect inner network interface" + include_tasks: "find_interface_by_ip.yml" + vars: + var: "ands_openshift_inner_interface" + ip: "{{ ands_openshift_ip }}" + when: + - ands_openshift_inner_interface is not defined + +- name: "Detect public network interface" + include_tasks: "find_interface_by_ip.yml" + vars: + var: "ands_openshift_public_interface" + ip: "{{ (ands_openshift_public_ip == ands_none) | ternary(ands_default_ip, ands_openshift_public_ip) }}" + when: + - ands_openshift_public_interface is not defined + +- name: Set ipfailover interface + set_fact: + ands_ipfailover_interface: "{{ ands_openshift_public_interface }}" + when: ands_ipfailover_interface is not defined + +- name: Set ipfailover inner interface + set_fact: + ands_ipfailover_inner_interface: "{{ ands_openshift_inner_interface }}" + when: ands_ipfailover_inner_interface is not defined + +#- debug: msg="{{ hostvars }}" diff --git a/roles/ands_facts/tasks/storage.yml b/roles/ands_facts/tasks/storage.yml new file mode 100644 index 0000000..cf995a0 --- /dev/null +++ b/roles/ands_facts/tasks/storage.yml @@ -0,0 +1,59 @@ +- include_vars: dir="vars" + +- name: Detect Heketi + set_fact: ands_storage_domains="{{ ands_storage_domains | union([ands_heketi_domain]) }}" + when: + - ands_configure_heketi + - ands_heketi_domain is defined + - ansible_lvm.lvs[ands_heketi_lv] is defined + +- name: Set some facts + set_fact: + ands_storage_servers: "{{ ands_storage_servers }}" + +- name: Set some facts + set_fact: + ands_data_vg: "{{ ands_data_vg }}" + when: ands_data_vg != "" + +- name: Set some facts + set_fact: + ands_data_lv: "{{ ands_data_lv }}" + when: ands_data_lv != "" + +- name: Set some facts + set_fact: + ands_heketi_vg: "{{ ands_heketi_vg }}" + when: ands_heketi_vg != "" + +- name: Set some facts + set_fact: + ands_heketi_lv: "{{ ands_heketi_lv }}" + when: ands_heketi_lv != "" + +- name: Set some facts + set_fact: + ands_data_dev: "/dev/mapper/{{ands_data_vg}}-{{ands_data_lv}}" + when: + - ands_data_vg != "" + - ands_data_lv != "" + +- name: set some facts + set_fact: + ands_heketi_dev: "/dev/mapper/{{ands_heketi_vg}}-{{ands_heketi_lv}}" + when: + - ands_heketi_vg != "" + - ands_heketi_lv != "" + +- name: set some facts + set_fact: + glusterfs_devices: [ "{{ ands_heketi_dev }}" ] + when: + - ands_heketi_vg != "" + - ands_heketi_lv != "" + +- include_tasks: detect_data_path.yml + when: not ands_data_path is defined + +#- command: yum-complete-transaction --cleanup-only + diff --git a/roles/ands_network/README b/roles/ands_network/README new file mode 100644 index 0000000..dfd029a --- /dev/null +++ b/roles/ands_network/README @@ -0,0 +1,8 @@ + We need to stop keepalived before provisioning OpenShift and scalling the cluster. This will, however, will + prevent nodes from communicating with masters. Therefore, we add IP of the first configure master node in + /etc/hosts. + + We do the same if for some reason the routing is currently off, but we still want to provision OpenShift + projects. Of course, we don't need to turn off keepalived in this case, just temporarily add an ip of the + first master. + diff --git a/roles/ands_network/tasks/common.yml b/roles/ands_network/tasks/common.yml new file mode 100644 index 0000000..384029f --- /dev/null +++ b/roles/ands_network/tasks/common.yml @@ -0,0 +1,49 @@ +#- name: Remove obsolte hostnames from /etc/hosts +# lineinfile: dest="/etc/hosts" regexp="{{ hostvars[item]['openshift_hostname'] }}" state="absent" +# when: +# - hostvars[item]['openshift_hostname'] | default(ands_none) != ands_none +# - hostvars[item]['ands_facts_configured'] is defined +# with_inventory_hostnames: +# - nodes +# - new_nodes + + +# This will not work properly unless 'ands_facts' are executed on all nodes.... This is checked by evaluating if 'ands_openshift_fqdn' is defined +- name: Configure all cluster hostnames in /etc/hosts + lineinfile: dest="/etc/hosts" line="{{ ip }} {{ fqdn }} {{ hostname }}" regexp="{{ fqdn }}" state="present" + when: + - hostvars[item]['ands_openshift_fqdn'] | default(ands_none) != ands_none + - hostvars[item]['ands_facts_configured'] is defined + vars: + ip: "{{ hostvars[item]['ands_openshift_ip'] }}" + fqdn: "{{ hostvars[item]['ands_openshift_fqdn'] }}" + hostname: "{{ fqdn.split('.')[0] }}" + with_inventory_hostnames: + - nodes + - new_nodes + +- name: Configure all storage ips in /etc/hosts + lineinfile: dest="/etc/hosts" line="{{ ip }} {{ hostname }}" regexp="{{ hostname }}" state="present" + when: + - hostvars[item]['ands_storage_network'] | default(ands_none) != ands_none + - hostvars[item]['ands_facts_configured'] is defined + vars: + ip: "{{ hostvars[item]['ands_storage_ip'] }}" + hostname: "{{ hostvars[item]['ands_hostname_storage'] }}" + with_inventory_hostnames: + - storage_nodes + - new_storage_nodes + + +- name: Provision /etc/hosts to ensure that all masters servers are accessing Master API on loopback device + lineinfile: dest="/etc/hosts" line="127.0.0.1 {{ openshift_master_cluster_hostname }}" regexp=".*{{ openshift_master_cluster_hostname }}$" state="present" + when: ('masters' in group_names or 'new_masters' in group_names) + register: result + +- name: Provision /etc/hosts to ensure that all masters servers are accessing Master API on loopback device + lineinfile: dest="/etc/hosts" line="{{ ands_inner_lb_ip }} {{ openshift_master_cluster_hostname }}" regexp=".*{{ openshift_master_cluster_hostname }}$" state="present" + when: (result | skipped) and (ands_inner_lb | default(false)) + +- name: Register openshift_dns_ip in /etc/hosts + lineinfile: dest="/etc/hosts" line="{{ openshift_dns_ip }} openshift_dns_ip" regexp="openshift_dns_ip$" state="present" + diff --git a/roles/ands_network/tasks/install_post.yml b/roles/ands_network/tasks/install_post.yml new file mode 100644 index 0000000..0bfef34 --- /dev/null +++ b/roles/ands_network/tasks/install_post.yml @@ -0,0 +1,9 @@ +- name: Start keepalived + service: name=keepalived state=started enabled=yes + when: ('masters' in group_names) or ('new_masters' in group_names) + +- name: Provision /etc/hosts to ensure that all hosts accessing masters servers appropriately + lineinfile: dest="/etc/hosts" line="{{ ands_inner_lb_ip | default('') }} {{ openshift_master_cluster_hostname }}" regexp=".*{{ openshift_master_cluster_hostname }}$" state="{{ state }}" + when: ('masters' not in group_names and 'new_masters' not in group_names) + vars: + state: "{{ ands_inner_lb | default(false) | ternary('present', 'absent') }}" diff --git a/roles/ands_network/tasks/install_pre.yml b/roles/ands_network/tasks/install_pre.yml new file mode 100644 index 0000000..f555d1b --- /dev/null +++ b/roles/ands_network/tasks/install_pre.yml @@ -0,0 +1,15 @@ +- name: Temporary provision /etc/hosts with Masters IP. + lineinfile: dest="/etc/hosts" line="{{ ands_openshift_network | ipaddr(node_id) | ipaddr('address') }} {{ openshift_master_cluster_hostname }}" regexp=".*{{ openshift_master_cluster_hostname }}$" state="present" + when: ('masters' not in group_names) + vars: + node_id: "{{ hostvars[groups['masters'][0]]['ands_host_id'] }}" + +- name: Check if keepalived is installed + stat: path="/etc/sysconfig/keepalived" + register: keepalived_result + +- name: Stop keepalived + service: name=keepalived state=stopped + when: + - keepalived_result.stat.exists + - ('masters' in group_names) or ('new_masters' in group_names) diff --git a/roles/ands_network/tasks/main.yml b/roles/ands_network/tasks/main.yml new file mode 100644 index 0000000..0bc913a --- /dev/null +++ b/roles/ands_network/tasks/main.yml @@ -0,0 +1,3 @@ +--- +- name: "Configuring network" + include_tasks: "{{ action | default('common') }}.yml" diff --git a/roles/ands_network/tasks/maintain.yml b/roles/ands_network/tasks/maintain.yml new file mode 100644 index 0000000..a7af597 --- /dev/null +++ b/roles/ands_network/tasks/maintain.yml @@ -0,0 +1,9 @@ +- name: Ensure keepalived is running on master nodes + service: name=keepalived state=started enabled=yes + when: ('masters' in group_names) + +- name: Provision /etc/hosts to ensure that all hosts accessing masters servers appropriately + lineinfile: dest="/etc/hosts" line="{{ ands_inner_lb_ip | default('') }} {{ openshift_master_cluster_hostname }}" regexp=".*{{ openshift_master_cluster_hostname }}$" state="{{ state }}" + when: ('masters' not in group_names and 'new_masters' not in group_names) + vars: + state: "{{ ands_inner_lb | default(false) | ternary('present', 'absent') }}" diff --git a/roles/ands_openshift/defaults/main.yml b/roles/ands_openshift/defaults/main.yml index b97b584..d279345 100644 --- a/roles/ands_openshift/defaults/main.yml +++ b/roles/ands_openshift/defaults/main.yml @@ -1,4 +1,4 @@ -openshift_common_subroles: "{{ [ 'hostnames', 'users', 'security', 'storage' ] }}" +openshift_common_subroles: "{{ [ 'users', 'security', 'storage' ] }}" openshift_heketi_subroles: "{{ [ 'ssh', 'heketi' ] }}" openshift_all_subroles: "{{ ands_configure_heketi | default(False) | ternary(openshift_common_subroles + openshift_heketi_subroles, openshift_common_subroles) }}" diff --git a/roles/ands_openshift/tasks/hostnames.yml b/roles/ands_openshift/tasks/hostnames.yml deleted file mode 100644 index e489a8c..0000000 --- a/roles/ands_openshift/tasks/hostnames.yml +++ /dev/null @@ -1,15 +0,0 @@ ---- -#- name: Remove obsolte hostnames from /etc/hosts -# lineinfile: dest="/etc/hosts" regexp="{{ hostvars[item]['openshift_hostname'] }}" state="absent" -# with_inventory_hostnames: -# - nodes - - -- name: Configure all cluster hostnames in /etc/hosts - lineinfile: dest="/etc/hosts" line="{{ hostvars[item]['openshift_ip'] }} {{ hostvars[item]['openshift_public_hostname'] }} {{ hostvars[item]['openshift_hostname'] }}" regexp="{{ hostvars[item]['openshift_hostname'] }}" state="present" - with_inventory_hostnames: - - nodes - -- name: Provision /etc/hosts to ensure that all masters servers are accessing Master API on loopback device - lineinfile: dest="/etc/hosts" line="127.0.0.1 {{ openshift_master_cluster_hostname }}" regexp=".*{{ openshift_master_cluster_hostname }}$" state="present" - when: "'masters' in group_names" diff --git a/roles/ands_storage/tasks/detect_device.yml b/roles/ands_storage/tasks/detect_device.yml index 0fb9764..3467371 100644 --- a/roles/ands_storage/tasks/detect_device.yml +++ b/roles/ands_storage/tasks/detect_device.yml @@ -1,3 +1,8 @@ +#- name: find if ands data is already mounted +# set_fact: ands_data_is_mounted=true +# with_items: "{{ ansible_mounts }}" +# when: item.mount == ands_data_path + - name: find large block devices set_fact: ands_data_device="/dev/{{ item.key }}" # debug: msg="{{ item.key }} - {{ (item.value.sectors | int) * (item.value.sectorsize | int) / 1024 / 1024 / 1024 }} GB" diff --git a/roles/ands_storage/tasks/main.yml b/roles/ands_storage/tasks/main.yml index 9318f88..43d4692 100644 --- a/roles/ands_storage/tasks/main.yml +++ b/roles/ands_storage/tasks/main.yml @@ -10,11 +10,15 @@ - name: Create Ands VG lvg: vg="{{ ands_data_vg }}" pvs="{{ ands_data_device }}" - when: ands_data_device is defined + when: + - ands_data_device is defined + - ansible_lvm.vgs[ands_data_vg] is not defined - name: Create Heketi VG lvg: vg="{{ ands_heketi_vg }}" pvs="{{ ands_heketi_device }}" - when: ands_heketi_device is defined + when: + - ands_heketi_device is defined + - ansible_lvm.vgs[ands_heketi_vg] is not defined - name: Check if Heketi Volume already exists stat: path="/dev/{{ ands_heketi_vg }}/{{ ands_heketi_lv }}" diff --git a/roles/ands_vagrant_vm/templates/Vagrantfile.j2 b/roles/ands_vagrant_vm/templates/Vagrantfile.j2 index b044e2e..386ba85 100644 --- a/roles/ands_vagrant_vm/templates/Vagrantfile.j2 +++ b/roles/ands_vagrant_vm/templates/Vagrantfile.j2 @@ -12,7 +12,7 @@ Vagrant.configure("2") do |config| (1..{{ vagrant_hosts }}).each do |i| config.vm.define "{{ vagrant_hostname_template }}#{i}" do |node| node.vm.network "public_network", nm_controlled: "yes", bridge: "br0", mac: "080027{{ macid }}02#{i}", ip: "{{ public_net }}.#{i}", type: "dhcp" - node.vm.network "private_network", nm_controlled: "yes", mac: "080027{{ macid }}12#{i}", ip: "{{ storage_net }}.#{i}", name: "vboxnet0", type: "static" + node.vm.network "private_network", nm_controlled: "yes", mac: "080027{{ macid }}12#{i}", ip: "{{ net }}.#{i}", name: "vboxnet0", type: "static" node.vm.box = "centos/7" node.disksize.size = "80 GB" node.vm.hostname = "{{ vagrant_hostname_template }}#{i}.ipe.kit.edu" @@ -26,8 +26,9 @@ Vagrant.configure("2") do |config| node.vm.provision "shell", run: "always", inline: "( ip addr show dev eth2 | grep {{ netid }}.#{i} ) || ip addr add 192.168.{{ netid }}.#{i}/24 dev eth2" node.vm.provision "shell", run: "always", inline: "chmod +r /etc/sysconfig/network-scripts/ifcfg-eth*" node.vm.provision "shell", run: "always", inline: "chcon --reference /etc/sysconfig/network-scripts/ifcfg-eth0 /etc/sysconfig/network-scripts/ifcfg-eth*" - - node.vm.provision "shell", run: "always", inline: "ip route del default dev eth0" +# node.vm.provision "shell", run: "always", inline: "nmcli con down 'System eth0'; nmcli con up 'System eth0'" + node.vm.provision "shell", run: "always", inline: "ip route del default dev eth0 &> /dev/null ; error=$?" + node.vm.provision "shell", run: "always", inline: "DEVICE_IFACE=eth1 /etc/NetworkManager/dispatcher.d/99-origin-dns.sh eth1 up &> /dev/null; error=$?" node.vm.provision "shell" do |s| ssh_pub_key = File.readlines("authorized_keys").first.strip diff --git a/roles/ands_vagrant_vmconf/tasks/main.yml b/roles/ands_vagrant_vmconf/tasks/main.yml index f52a52d..b130aa4 100644 --- a/roles/ands_vagrant_vmconf/tasks/main.yml +++ b/roles/ands_vagrant_vmconf/tasks/main.yml @@ -23,6 +23,4 @@ # We just need networkmanager running # - name: Bypass absent NM # copy: remote_src="yes" src="/etc/resolv.conf" dest="/etc/origin/node/resolv.conf" - - - name: Update CentOS - yum: name=* state=latest update_cache=yes + diff --git a/roles/common/default/main.yml b/roles/common/default/main.yml new file mode 100644 index 0000000..d355d15 --- /dev/null +++ b/roles/common/default/main.yml @@ -0,0 +1 @@ +os_update: "{{ ands_update | default(false) }}" \ No newline at end of file diff --git a/roles/common/tasks/main.yml b/roles/common/tasks/main.yml index 9bd820a..fdd7246 100644 --- a/roles/common/tasks/main.yml +++ b/roles/common/tasks/main.yml @@ -14,6 +14,7 @@ # Seems we need iptables-services at least temporary... - name: Ensure all required packages are installed package: name={{item}} state=present + register: result with_items: - mc - bzr @@ -28,6 +29,12 @@ - PyYAML - python-rhsm-certificates - glusterfs-fuse + - telnet + +# We always update on first install and if requested +- name: Update CentOS + yum: name=* state=latest update_cache=yes + when: (result | changed) or (os_update | default(false)) #- name: Add NodeJS required by a few used Ansible extensions # package: name={{item}} state=present diff --git a/roles/docker/tasks/main.yml b/roles/docker/tasks/main.yml index a7bd700..0d040a9 100644 --- a/roles/docker/tasks/main.yml +++ b/roles/docker/tasks/main.yml @@ -27,14 +27,16 @@ lvol: vg="{{ ansible_lvm['lvs'][docker_lv]['vg'] }}" lv="docker_lv" size="{{ docker_volume_size }}" when: docker_volume_size is defined -- name: Limit size of container log files - ghetto_json: - path: "/etc/docker/daemon.json" - log-driver: "json-file" - log-opts.max-size: "{{ docker_max_log_size }}" - log-opts.max-file: "{{ docker_max_log_files }}" - notify: - - restart docker +# By default there is systemd driver installed. It is removed during OpenShift installation, but is still there during prepare stage +# The parameters to docker can be set trough OpenShift and currently are moved there. +#- name: Limit size of container log files +# ghetto_json: +# path: "/etc/docker/daemon.json" +# log-driver: "json-file" +# log-opts.max-size: "{{ docker_max_log_size }}" +# log-opts.max-file: "{{ docker_max_log_files }}" +# notify: +# - restart docker - name: start docker service: name="docker" enabled=yes state=started diff --git a/roles/docker/tasks/storage.yml b/roles/docker/tasks/storage.yml index 595979c..5a5c858 100644 --- a/roles/docker/tasks/storage.yml +++ b/roles/docker/tasks/storage.yml @@ -20,7 +20,7 @@ - set_fact: docker_storage_config="VG={{ docker_storage_vg }} AUTO_EXTEND_POOL=true" - set_fact: docker_storage_config="{{ docker_storage_config }} DEVS={{ docker_storage_device }}" - when: ( docker_storage_device is defined ) and ( not ansible_lvm.vgs.{{ docker_storage_vg }} is defined ) + when: ( docker_storage_device is defined ) and ( ansible_lvm.vgs[docker_storage_vg] is not defined ) - name: stop docker service: name="docker" state="stopped" diff --git a/roles/ganesha/files/ganesha.conf b/roles/ganesha/files/ganesha.conf new file mode 100644 index 0000000..2bfc114 --- /dev/null +++ b/roles/ganesha/files/ganesha.conf @@ -0,0 +1 @@ +%include "/etc/ganesha/exports/export.openshift.conf" diff --git a/roles/ganesha/tasks/main.yml b/roles/ganesha/tasks/main.yml new file mode 100644 index 0000000..032631b --- /dev/null +++ b/roles/ganesha/tasks/main.yml @@ -0,0 +1,30 @@ +- name: Ensure GlusterFS repositories are present + yum: name="centos-release-gluster{{ glusterfs_version }}" state=present + +- name: Ensure Ganesha is installed + yum: name={{item}} state=present + with_items: + - nfs-ganesha-gluster + - nfs-ganesha + +- name: Change logdir group to prevent selinux problems + file: dest="/var/log/ganesha" owner="ganesha" group="root" mode="0775" state="directory" + +- name: Copy default Ganesha configuration + copy: src="ganesha.conf" dest="/etc/ganesha/ganesha.conf" owner="root" group="root" mode="0644" + +- name: Configure Ganesha HA + template: src="ganesha-ha.conf.j2" dest="/etc/ganesha/ganesha-ha.conf" owner=root group=root mode="0644" + +- name: Configure firewalld + firewalld: service="{{ item }}" state="enabled" permanent="true" immediate="true" + with_items: + - nfs +# - mountd +# - rpc-bind + +- name: Reload firewalld rules + shell: firewall-cmd --reload + +- name: Enable and start ganesha service + service: name="nfs-ganesha" state=started enabled=yes diff --git a/roles/ganesha/templates/ganesha-ha.conf.j2 b/roles/ganesha/templates/ganesha-ha.conf.j2 new file mode 100644 index 0000000..bdb2e0c --- /dev/null +++ b/roles/ganesha/templates/ganesha-ha.conf.j2 @@ -0,0 +1,12 @@ +{% set members = groups['masters'] | union(groups['new_masters'] | default([])) | map('extract', hostvars, 'ands_hostname_storage') | list %} +{% set vips = ands_ipfailover_vips | default([]) %} +{% set n_vips = vips | length %} +{% if n_vips > 0 %} +{% set nodes = members[0:n_vips] %} +HA_NAME="openshift_nfs" +#HA_VOL_SERVER="{{ hostvars[groups['masters'][0]]['ands_hostname_storage'] }}" +HA_CLUSTER_NODES="{{ nodes | join(',') }}" +{% for node in nodes %} +VIP_{{ node }}="{{ vips[loop.index - 1] }}" +{% endfor %} +{% endif %} diff --git a/roles/glusterfs/defaults/main.yml b/roles/glusterfs/defaults/main.yml index 9587a9b..700838d 100644 --- a/roles/glusterfs/defaults/main.yml +++ b/roles/glusterfs/defaults/main.yml @@ -1,5 +1,5 @@ --- -glusterfs_version: 39 +glusterfs_version: 312 glusterfs_transport: rdma glusterfs_network: "{{ ands_storage_network }}" diff --git a/roles/glusterfs/tasks/common.yml b/roles/glusterfs/tasks/common.yml index 5e8e3b6..67fb815 100644 --- a/roles/glusterfs/tasks/common.yml +++ b/roles/glusterfs/tasks/common.yml @@ -8,6 +8,7 @@ - glusterfs-cli - glusterfs-fuse - glusterfs-rdma + - heketi-client - libsemanage-python - name: Allow fuse in SELinux configuration diff --git a/roles/glusterfs/tasks/create_domain.yml b/roles/glusterfs/tasks/create_domain.yml index 8f8042b..76623f2 100644 --- a/roles/glusterfs/tasks/create_domain.yml +++ b/roles/glusterfs/tasks/create_domain.yml @@ -1,8 +1,16 @@ --- +- name: Get list of existing gluster volumes + shell: "gluster volume info" + changed_when: false + register: gv_results + + - name: Configure volumes include_tasks: create_volume.yml with_dict: "{{ domain.volumes }}" + when: volume_string not in gv_results.stdout_lines vars: + volume_string: "Volume Name: {{ volume.key }}" domain_servers: "{{ groups[domain.servers] | map('extract', hostvars, 'ands_storage_hostname') | list }}" loop_control: loop_var: volume diff --git a/roles/glusterfs/tasks/ganesha.yml b/roles/glusterfs/tasks/ganesha.yml new file mode 100644 index 0000000..61d151a --- /dev/null +++ b/roles/glusterfs/tasks/ganesha.yml @@ -0,0 +1,6 @@ +- name: Create /etc/ganesha/exports + file: dest="/etc/ganesha/exports" owner="root" group="root" mode="0755" state="directory" + +- name: Configure Ganesha NFS exports + template: src="export.openshift.conf.j2" dest="/etc/ganesha/exports/export.openshift.conf" owner=root group=root mode="0644" + diff --git a/roles/glusterfs/tasks/install.yml b/roles/glusterfs/tasks/install.yml new file mode 100644 index 0000000..d7ee766 --- /dev/null +++ b/roles/glusterfs/tasks/install.yml @@ -0,0 +1,34 @@ +--- +- name: Install GlusterFS Common Software + include_tasks: common.yml + when: + - "'software' in glusterfs_subroles" + +- name: Install GlusterFS client + include_tasks: setup-client.yml + when: + - "'software' in glusterfs_subroles" + - "'ands_storage_servers' not in group_names" + +- name: Install GlusterFS OpenShift Server + include_tasks: setup-openshift-server.yml + when: + - "'software' in glusterfs_subroles" + - "'ands_storage_servers' in group_names" + - "'glusterfs' in group_names" + +- name: Install GlusterFS External Server + include_tasks: setup-external-server.yml + when: + - "'software' in glusterfs_subroles" + - "'ands_storage_servers' in group_names" + - "'glusterfs' not in group_names" + +- name: Configure gluster peers (on first host) + shell: gluster peer probe {{item}} + run_once: true + with_items: "{{ glusterfs_servers }}" + +- include_tasks: volumes.yml + when: + - "'volumes' in glusterfs_subroles" diff --git a/roles/glusterfs/tasks/iterate_domains.yml b/roles/glusterfs/tasks/iterate_domains.yml new file mode 100644 index 0000000..e061652 --- /dev/null +++ b/roles/glusterfs/tasks/iterate_domains.yml @@ -0,0 +1,7 @@ +- name: Process all storage domains + include_tasks: "iterate_volumes.yml" + run_once: true + delegate_to: "{{ groups[domain.servers][0] }}" + with_items: "{{ glusterfs_domains }}" + loop_control: + loop_var: domain diff --git a/roles/glusterfs/tasks/iterate_volumes.yml b/roles/glusterfs/tasks/iterate_volumes.yml new file mode 100644 index 0000000..8f61116 --- /dev/null +++ b/roles/glusterfs/tasks/iterate_volumes.yml @@ -0,0 +1,12 @@ +--- +- name: Iterate volumes + include_tasks: "{{ action }}.yml" + with_dict: "{{ domain.volumes }}" + vars: + name: "{{ volume.key }}" + path: "{{ volume.value.mount }}" + server_group: "{{ domain.servers }}" + domain_servers: "{{ groups[domain.servers] | map('extract', hostvars, 'ands_storage_hostname') | list }}" + when: volume.value.mount is defined + loop_control: + loop_var: volume diff --git a/roles/glusterfs/tasks/main.yml b/roles/glusterfs/tasks/main.yml index d7ee766..a02c1a1 100644 --- a/roles/glusterfs/tasks/main.yml +++ b/roles/glusterfs/tasks/main.yml @@ -1,34 +1,3 @@ --- -- name: Install GlusterFS Common Software - include_tasks: common.yml - when: - - "'software' in glusterfs_subroles" - -- name: Install GlusterFS client - include_tasks: setup-client.yml - when: - - "'software' in glusterfs_subroles" - - "'ands_storage_servers' not in group_names" - -- name: Install GlusterFS OpenShift Server - include_tasks: setup-openshift-server.yml - when: - - "'software' in glusterfs_subroles" - - "'ands_storage_servers' in group_names" - - "'glusterfs' in group_names" - -- name: Install GlusterFS External Server - include_tasks: setup-external-server.yml - when: - - "'software' in glusterfs_subroles" - - "'ands_storage_servers' in group_names" - - "'glusterfs' not in group_names" - -- name: Configure gluster peers (on first host) - shell: gluster peer probe {{item}} - run_once: true - with_items: "{{ glusterfs_servers }}" - -- include_tasks: volumes.yml - when: - - "'volumes' in glusterfs_subroles" +- name: "Configuring Gluster storage subsystem" + include_tasks: "{{ action | default('install') }}.yml" diff --git a/roles/glusterfs/tasks/migrate.yml b/roles/glusterfs/tasks/migrate.yml new file mode 100644 index 0000000..3ef7917 --- /dev/null +++ b/roles/glusterfs/tasks/migrate.yml @@ -0,0 +1,34 @@ +- debug: msg="Migrating" + +#- name: Detect if source host {{ glusterfs_migrate_from }} is failed or running +# wait_for: host={{ glusterfs_migrate_from }} port=24007 timeout=1 +# register: srcres +# changed_when: srcres | failed +# failed_when: false + +#- set_fact: +# glusterfs_migrate_failed: "{{ srcres | changed }}" + +- set_fact: + glusterfs_migrate_failed: "1" + +- name: Analyze current configuration + include_tasks: iterate_domains.yml + vars: + action: "migrate_volume" + migrate_action: "migrate_inform" + migrate_from: "{{ glusterfs_migrate_from }}" + migrate_to: "{{ glusterfs_migrate_to }}" + migrate_failed: "{{ glusterfs_migrate_failed }}" + +- name: Exterminate mankind + pause: prompt='Please confirm if you want to proceed. Press Ctrl+c and then "a" to abort' + +- name: Analyze current configuration + include_tasks: iterate_domains.yml + vars: + action: "migrate_volume" + migrate_action: "{{ migrate_failed | ternary('migrate_failed_brick', 'migrate_live_brick') }}" + migrate_from: "{{ glusterfs_migrate_from }}" + migrate_to: "{{ glusterfs_migrate_to }}" + migrate_failed: "{{ glusterfs_migrate_failed }}" diff --git a/roles/glusterfs/tasks/migrate_failed_brick.yml b/roles/glusterfs/tasks/migrate_failed_brick.yml new file mode 100644 index 0000000..3490c82 --- /dev/null +++ b/roles/glusterfs/tasks/migrate_failed_brick.yml @@ -0,0 +1,10 @@ +- name: "Volume {{ vol }} - Migrating {{ src }} to {{ dst }}" + command: "gluster volume replace-brick {{ vol }} {{ src }} {{ dst }} commit force" + +# This is only working within containers with 'oc rsh'. But actually there are auto-healing daemons running. +# So we probably find just migrating +#- name: "Healing {{ vol }}" +# command: "gluster volume heal {{ vol }} full" + +#- name: "Rebalancing {{ vol }}" +# command: "gluster volume rebalance {{ vol }} fix-layout start" diff --git a/roles/glusterfs/tasks/migrate_inform.yml b/roles/glusterfs/tasks/migrate_inform.yml new file mode 100644 index 0000000..912d359 --- /dev/null +++ b/roles/glusterfs/tasks/migrate_inform.yml @@ -0,0 +1 @@ +- warn: msg="Volume {{ vol }} - Migrating {{ src }} to {{ dst }}" diff --git a/roles/glusterfs/tasks/migrate_volume.yml b/roles/glusterfs/tasks/migrate_volume.yml new file mode 100644 index 0000000..f9edeac --- /dev/null +++ b/roles/glusterfs/tasks/migrate_volume.yml @@ -0,0 +1,17 @@ +- name: "Analyze bricks of {{ volume.key }}" + shell: | + gluster volume info '{{ volume.key }}' | grep -P 'Brick\d+:' | awk '{ print $2 }' + register: gvires + +- name: "Execute configured {{ migrate_action }} on volume {{ volume.key }} with bricks {{ src }} and {{ dst }}" + include_tasks: "{{ migrate_action }}.yml" + vars: + bricks: "{{ gvires.stdout_lines | list }}" + servers: "{{ bricks | map('regex_replace', ':.*$', '') | list }}" + brick: "{{ servers.index(migrate_from) }}" + src: "{{ bricks[brick | int] | default('') }}" + dst: "{{ bricks[brick | int] | default('') | regex_replace('^' ~ migrate_from, migrate_to) }}" + vol: "{{ volume.key }}" + when: + - migrate_from in servers + - brick is defined diff --git a/roles/glusterfs/tasks/volumes.yml b/roles/glusterfs/tasks/volumes.yml index c4d49ac..1a85378 100644 --- a/roles/glusterfs/tasks/volumes.yml +++ b/roles/glusterfs/tasks/volumes.yml @@ -8,7 +8,7 @@ - name: Mount volume domains include_tasks: mount_domain.yml - when: ( domain.clients | default("---") ) in group_names + when: domain.clients | default([]) | intersect(group_names) | length > 0 with_items: "{{ glusterfs_domains }}" loop_control: loop_var: domain diff --git a/roles/glusterfs/templates/export.openshift.conf.j2 b/roles/glusterfs/templates/export.openshift.conf.j2 new file mode 100644 index 0000000..b2c547f --- /dev/null +++ b/roles/glusterfs/templates/export.openshift.conf.j2 @@ -0,0 +1,44 @@ +{% set i = 0 %} +{% for domain in glusterfs_domains %} +{% for name, vol in domain.volumes.iteritems() %} +{% if vol.nfs_clients is defined %} +{% set nfs = vol.nfs_clients %} +{% set i = i + 1 %} +EXPORT { + Export_Id = {{ i }}; + Path = "/{{ name }}"; + FSAL { + name = GLUSTER; + hostname = "localhost"; + volume = "{{ name }}"; + } + Access_type = RW; + Disable_ACL = true; + Squash = "No_root_squash"; + Pseudo = "/{{ name }}"; + Protocols = "3", "4" ; + Transports = "UDP","TCP"; + SecType = "sys"; + +{% if nfs.rw is defined %} +{% for net in nfs.rw %} + CLIENT { + clients = {{ net }}; + Access_type = RW; + } +{% endfor %} +{% endif %} + +{% if nfs.ro is defined %} +{% for net in nfs.ro %} + CLIENT { + clients = {{ net }}; + Access_type = RO; + } +{% endfor %} +{% endif %} +} + +{% endif %} +{% endfor %} +{% endfor %} diff --git a/roles/keepalived/defaults/main.yml b/roles/keepalived/defaults/main.yml index a7087b0..3302b85 100644 --- a/roles/keepalived/defaults/main.yml +++ b/roles/keepalived/defaults/main.yml @@ -1,6 +1,9 @@ --- -keepalived_vips: "{{ ands_ipfailover_vips | default([]) }}" -keepalived_iface: "{{ ands_ipfailover_interface | default('eth0') }}" +keepalived_vrrp: + - vips: "{{ ands_ipfailover_vips | default([]) }}" + iface: "{{ ands_ipfailover_interface | default('eth0') }}" + - vips: "{{ ands_inner_lb | ternary([ands_inner_lb_ip], []) }}" + iface: "{{ ands_ipfailover_inner_interface }}" keepalived_master_prio: 80 keepalived_backup_prio: 20 diff --git a/roles/keepalived/tasks/main.yml b/roles/keepalived/tasks/main.yml index adedcdc..08835b8 100644 --- a/roles/keepalived/tasks/main.yml +++ b/roles/keepalived/tasks/main.yml @@ -20,3 +20,4 @@ - name: Start keepalived service: name=keepalived state=started enabled=yes tags: keepalived + diff --git a/roles/keepalived/templates/keepalived.conf.j2 b/roles/keepalived/templates/keepalived.conf.j2 index 8d9a580..6df5eab 100644 --- a/roles/keepalived/templates/keepalived.conf.j2 +++ b/roles/keepalived/templates/keepalived.conf.j2 @@ -6,20 +6,22 @@ vrrp_script track { interval {{ keepalived_check_interval }} } -{% for vips in keepalived_vips %} -{% set id = ( vips | ipaddr('address') | regex_replace('^.*\.', '') ) %} +{% for vrrp in keepalived_vrrp %} +{% set vrrp_id = loop.index %} +{% for vips in vrrp.vips %} +{% set id = ( vips | ipaddr('address') | regex_replace('^.*\.', '') ) %} -vrrp_instance VI_{{ loop.index }} { +vrrp_instance VI_{{vrrp_id}}_{{ loop.index }} { - virtual_router_id {{ id }} + virtual_router_id {{ (vrrp_id - 1) * 32 + (loop.index) }} state {{ (( ( loop.index - 1) % (keepalived_num_nodes | int) ) == (keepalived_node_id | int) ) | ternary('MASTER', 'BACKUP') }} state {{ (( ( loop.index - 1) % (keepalived_num_nodes | int) ) == (keepalived_node_id | int) ) | ternary(keepalived_master_prio, keepalived_backup_prio) }} - interface {{ keepalived_iface }} + interface {{ vrrp.iface }} virtual_ipaddress { - {{ vips }} dev {{ keepalived_iface }} + {{ vips }} dev {{ vrrp.iface }} } advert_int 1 @@ -33,4 +35,5 @@ vrrp_instance VI_{{ loop.index }} { track } } +{% endfor %} {% endfor %} diff --git a/scripts/gluster.sh b/scripts/gluster.sh new file mode 100755 index 0000000..02a0a3f --- /dev/null +++ b/scripts/gluster.sh @@ -0,0 +1,85 @@ +#! /bin/bash + +. opts.sh + +[ $? -ne 0 -o -z "$gpod" ] && { echo "No storage pods are running..." ; exit 1 ; } +[ -z "$1" ] && { echo "Usage: $0 [src] [dst]" ; exit 1 ; } +action=$1 +shift + + +function info { + vol=$1 + + status=$(gluster volume info databases | grep -P 'Status' | awk '{ print $2 }' | tr -d '\r\n') + bricks=$(gluster volume info "$vol" | grep -P 'Number of Bricks' | awk '{ print $NF }' | tr -d '\r\n') + avail=$(gluster volume status "$vol" detail | grep Brick | wc -l) + online=$(gluster volume status "$vol" detail | grep Online | grep Y | wc -l) + + echo "Volume $vol: $status (Bricks: $bricks, Available: $avail, Online: $online)" +} + +function heal { + vol=$1 + + distributed=0 + gluster volume info "$vol" | grep "Type:" | grep -i "Distribute" &> /dev/null + [ $? -eq 0 ] && distributed=1 + + echo "Healing volume $vol" + echo "-------------------" + gluster volume heal "$vol" full + gluster volume heal "$vol" info + + if [ $distributed -eq 1 ]; then + echo "Rebalancing distributed volume $vol" + gluster volume rebalance "$vol" fix-layout start + fi + + + gluster volume status "$vol" +} + +function migrate { + vol=$1 + src=$2 + dst=$3 + + [ -z "$src" -o -z "$dst" ] && { echo "Source and destination servers are required" ; exit 1 ; } + + src_brick=$(gluster volume info $vol | grep -P '^Brick\d+:' | awk '{ print $2 }' | grep -P "^$src" | tr -d '\r\n' ) + dst_brick=${src_brick/$src/$dst} + + [ -z "$src_brick" -o -z "$dst_brick" ] && return 0 + + echo "Volume $vol: migrating failed brick" + echo " from $src_brick" + echo " to $dst_brick" + echo "Press enter to continue" + read + [ $? -ne 0 ] && exit + + gluster volume replace-brick $vol "$src_brick" "$dst_brick" commit force + heal $vol +} + + + +# +# heal $1 + + +if [ -n "$1" -a "$1" != "all" ]; then + eval "$action" "$@" +else + [ "$1" == "all" ] && shift + + vols=$(gluster volume info | grep -P '^Volume Name' | awk '{ print $NF }' | tr '\r\n' ' ') + for vol in $vols; do + [[ "$vol" =~ [0-9] ]] && continue + [[ "$vol" =~ ^vol_ ]] && continue + [[ "$vol" =~ ^heketi ]] && continue + + eval "$action" "$vol" "$@" + done +fi diff --git a/scripts/opts.sh b/scripts/opts.sh new file mode 100644 index 0000000..d484efc --- /dev/null +++ b/scripts/opts.sh @@ -0,0 +1,9 @@ +function get_gluster_pod { + oc -n glusterfs get pods -l 'glusterfs=storage-pod' | grep Running | awk '{ print $1 }' | head -n 1 +} + +gpod=$(get_gluster_pod) + +function gluster { + oc -n glusterfs rsh po/$gpod gluster "$@" +} diff --git a/setup.sh b/setup.sh index 83b8a52..1c38536 100755 --- a/setup.sh +++ b/setup.sh @@ -2,88 +2,106 @@ . opts.sh +action=$1 +shift -case "${1}" in +case "$action" in all) - ./setup.sh -i $inventory prepare || exit 1 - ./setup.sh -i $inventory openshift || exit 1 - ./setup.sh -i $inventory gluster || exit 1 - ./setup.sh -i $inventory configure || exit 1 - ./setup.sh -i $inventory projects || exit 1 + ./setup.sh -i $inventory prepare "$@" || exit 1 + ./setup.sh -i $inventory openshift "$@" || exit 1 + ./setup.sh -i $inventory gluster "$@" || exit 1 + ./setup.sh -i $inventory configure "$@" || exit 1 + ./setup.sh -i $inventory projects "$@" || exit 1 ;; local) - apply playbooks/local.yml || exit 1 + apply playbooks/local.yml "$@" || exit 1 ;; vm) - apply playbooks/ands-vm-setup.yml || exit 1 - ;; - vmconf) - apply playbooks/ands-vm-conf.yml || exit 1 + apply playbooks/ands-vm-setup.yml "$@" || exit 1 ;; prepare) - apply playbooks/ands-prepare.yml || exit 1 + apply playbooks/ands-prepare.yml "$@" || exit 1 ;; openshift) - apply playbooks/openshift-install.yml || exit 1 + apply playbooks/openshift-install.yml "$@" || exit 1 ;; gluster) - apply playbooks/ands-gluster.yml || exit 1 + apply playbooks/ands-gluster.yml "$@" || exit 1 + ;; + migrate) + from=$1 + to=$2 + [ -z "$from" -o -z "$to" ] && { usage "you must specify source and destination ip addresses" ; exit 1 ; } + shift 2 + apply playbooks/ands-gluster-migrate.yml --extra-vars "gfs_from=$from gfs_to=$to" "$@" || exit 1 +# echo "Currently unsupported, use scripts..." + ;; + ganesha) + apply playbooks/ands-gluster-ganesha.yml "$@" || exit 1 ;; configure) - apply playbooks/openshift-setup.yml || exit 1 + apply playbooks/openshift-setup.yml "$@" || exit 1 ;; projects) - apply playbooks/openshift-setup-projects.yml || exit 1 + apply playbooks/openshift-setup-projects.yml "$@" || exit 1 ;; project) project=$2 shift - [ -z "$project" ] && { echo 'project name should be specified...' ; exit 1; } - apply playbooks/openshift-setup-project.yml --extra-vars "ands_configure_project=$project" || exit 1 + [ -n "$project" ] || { usage 'project name should be specified...' ; exit 1; } + apply playbooks/openshift-setup-project.yml --extra-vars "ands_configure_project=$project" "$@" || exit 1 + ;; + openshift-masters) + apply playbooks/openshift-add-masters.yml "$@" || exit 1 ;; openshift-nodes) - apply playbooks/openshift-add-nodes.yml || exit 1 + apply playbooks/openshift-add-nodes.yml "$@" || exit 1 ;; - nodes) - ./setup.sh -i $inventory prepare || exit 1 - ./setup.sh -i $inventory openshift-nodes || exit 1 - ./setup.sh -i $inventory gluster || exit 1 - ./setup.sh -i $inventory configure || exit 1 + openshift-etcd) + apply playbooks/openshift-add-etcd.yml "$@" || exit 1 ;; - openshift-masters) - apply playbooks/openshift-add-masters.yml || exit 1 + openshift-gluster) + apply playbooks/openshift-add-gluster.yml "$@" || exit 1 ;; masters) - ./setup.sh -i $inventory prepare || exit 1 - ./setup.sh -i $inventory openshift-masters || exit 1 - ./setup.sh -i $inventory gluster || exit 1 - ./setup.sh -i $inventory configure || exit 1 + ./setup.sh -i $inventory prepare "$@" || exit 1 + ./setup.sh -i $inventory openshift-masters "$@" || exit 1 + ./setup.sh -i $inventory gluster "$@" || exit 1 + ./setup.sh -i $inventory configure "$@" || exit 1 + ;; + nodes) + ./setup.sh -i $inventory prepare "$@" || exit 1 + ./setup.sh -i $inventory openshift-nodes "$@" || exit 1 + ./setup.sh -i $inventory gluster "$@" || exit 1 + ./setup.sh -i $inventory configure "$@" || exit 1 ;; users) - apply playbooks/openshift-setup-users.yml || exit 1 + apply playbooks/openshift-setup-users.yml "$@" || exit 1 ;; security) - apply playbooks/openshift-setup-security.yml || exit 1 + apply playbooks/openshift-setup-security.yml "$@" || exit 1 ;; storage) - apply playbooks/openshift-setup-storage.yml || exit 1 + apply playbooks/openshift-setup-storage.yml "$@" || exit 1 ;; vpn) - apply playbooks/openshift-setup-vpn.yml || exit 1 + apply playbooks/openshift-setup-vpn.yml "$@" || exit 1 ;; certs) - apply playbooks/openshift-redeploy-certificates.yml --extra-vars "openshift_certificates_redeploy_ca=true" || exit 1 + apply playbooks/openshift-redeploy-certificates.yml --extra-vars "openshift_certificates_redeploy_ca=true" "$@" || exit 1 ;; upgrade) - apply playbooks/openshift-upgrade.yml || exit 1 + apply playbooks/openshift-upgrade.yml "$@" || exit 1 ;; - check) - apply playbooks/maintain.yml || exit + maintain) + apply playbooks/maintain.yml "$@" || exit ;; setup) - [ -n "$2" ] || usage "Specify that to setup" - apply ands_openshift -e "subrole=$2" + subrole=$2 + shift + [ -n "$subrole" ] || { usage "Specify that to setup"; exit 1; } + apply ands_openshift -e "subrole=$subrole" "$@" ;; *) - apply $@ || exit 1 + apply $action "$@" || exit 1 esac diff --git a/setup/configs/labels.yml b/setup/configs/labels.yml index 1c5f19f..e8ee868 100644 --- a/setup/configs/labels.yml +++ b/setup/configs/labels.yml @@ -2,5 +2,12 @@ ands_openshift_labels: region: "infra" zone: "default" - master: "{{ ( 'masters' in group_names ) | ternary(1, 0) }}" + hostid: "{{ ands_host_id }}" + hostname: "{{ ansible_hostname }}" + fqdn: "{{ ansible_hostname }}.{{ ansible_domain }}" + master: "{{ (('masters' in group_names) or ( 'new_masters' in group_names )) | ternary(1, 0) }}" fat_storage: "{{ ( 'storage_nodes' in group_names ) | ternary(1, 0) }}" + fat_memory: 0 + pod_node: 1 + compute_node: 0 + gpu_node: 0 diff --git a/setup/configs/volumes.yml b/setup/configs/volumes.yml index f400eed..f97d485 100644 --- a/setup/configs/volumes.yml +++ b/setup/configs/volumes.yml @@ -12,18 +12,22 @@ ands_heketi_domain: volumes: heketidbstorage: { type: "cfg" } +ands_nfs_clients: + rw: [ "{{ ands_openshift_public_network }}", "141.52.64.104" ] + ro: [ "141.52.64.0/23" ] + ands_storage_domains: - servers: "ands_storage_servers" - clients: "masters" + clients: [ "masters", "new_masters" ] volumes: provision: { type: "cfg", mount: "{{ ands_paths.provision }}" } - openshift: { type: "cfg", mount: "{{ ands_paths.openshift }}" } + openshift: { type: "cfg", mount: "{{ ands_paths.openshift }}", nfs_clients: "{{ ands_nfs_clients }}" } databases: { type: "db", mount: "{{ ands_paths.databases }}" } - temporary: { type: "tmp", mount: "{{ ands_paths.temporary }}" } - datastore: { type: "data", mount: "{{ ands_paths.datastore }}" } - katrin_data: { type: "data", mount: "{{ ands_paths.katrin_data }}" } + temporary: { type: "tmp", mount: "{{ ands_paths.temporary }}", nfs_clients: "{{ ands_nfs_clients }}" } + datastore: { type: "data", mount: "{{ ands_paths.datastore }}", nfs_clients: "{{ ands_nfs_clients }}" } + katrin_data: { type: "data", mount: "{{ ands_paths.katrin_data }}", nfs_clients: "{{ ands_nfs_clients }}" } # - servers: "storage_nodes" -# clients: "nodes" +# clients: [ "nodes" ] # openshift: { type: "cfg", mount: "{{ ands_paths.openshift }}" } # temporary: { type: "tmp", mount: "{{ ands_paths.temporary }}" } # volumes: -- cgit v1.2.1