summaryrefslogtreecommitdiffstats
path: root/roles
diff options
context:
space:
mode:
authorSuren A. Chilingaryan <csa@suren.me>2018-03-07 07:03:57 +0100
committerSuren A. Chilingaryan <csa@suren.me>2018-03-07 07:03:57 +0100
commit6bc3a3ac71e11fb6459df715536fec373c123a97 (patch)
treec99a4507012fd853ffa2622e35fa26f3bd3804e3 /roles
parent69adb23c59e991ddcabf5cfce415fd8b638dbc1a (diff)
downloadands-6bc3a3ac71e11fb6459df715536fec373c123a97.tar.gz
ands-6bc3a3ac71e11fb6459df715536fec373c123a97.tar.bz2
ands-6bc3a3ac71e11fb6459df715536fec373c123a97.tar.xz
ands-6bc3a3ac71e11fb6459df715536fec373c123a97.zip
Streamlined networking, OpenShift recovery, Ganesha
Diffstat (limited to 'roles')
-rw-r--r--roles/ands_facts/defaults/main.yml20
-rw-r--r--roles/ands_facts/tasks/find_interface_by_ip.yml20
-rw-r--r--roles/ands_facts/tasks/main.yml65
-rw-r--r--roles/ands_facts/tasks/network.yml49
-rw-r--r--roles/ands_facts/tasks/storage.yml59
-rw-r--r--roles/ands_network/README8
-rw-r--r--roles/ands_network/tasks/common.yml49
-rw-r--r--roles/ands_network/tasks/install_post.yml9
-rw-r--r--roles/ands_network/tasks/install_pre.yml15
-rw-r--r--roles/ands_network/tasks/main.yml3
-rw-r--r--roles/ands_network/tasks/maintain.yml9
-rw-r--r--roles/ands_openshift/defaults/main.yml2
-rw-r--r--roles/ands_openshift/tasks/hostnames.yml15
-rw-r--r--roles/ands_storage/tasks/detect_device.yml5
-rw-r--r--roles/ands_storage/tasks/main.yml8
-rw-r--r--roles/ands_vagrant_vm/templates/Vagrantfile.j27
-rw-r--r--roles/ands_vagrant_vmconf/tasks/main.yml4
-rw-r--r--roles/common/default/main.yml1
-rw-r--r--roles/common/tasks/main.yml7
-rw-r--r--roles/docker/tasks/main.yml18
-rw-r--r--roles/docker/tasks/storage.yml2
-rw-r--r--roles/ganesha/files/ganesha.conf1
-rw-r--r--roles/ganesha/tasks/main.yml30
-rw-r--r--roles/ganesha/templates/ganesha-ha.conf.j212
-rw-r--r--roles/glusterfs/defaults/main.yml2
-rw-r--r--roles/glusterfs/tasks/common.yml1
-rw-r--r--roles/glusterfs/tasks/create_domain.yml8
-rw-r--r--roles/glusterfs/tasks/ganesha.yml6
-rw-r--r--roles/glusterfs/tasks/install.yml34
-rw-r--r--roles/glusterfs/tasks/iterate_domains.yml7
-rw-r--r--roles/glusterfs/tasks/iterate_volumes.yml12
-rw-r--r--roles/glusterfs/tasks/main.yml35
-rw-r--r--roles/glusterfs/tasks/migrate.yml34
-rw-r--r--roles/glusterfs/tasks/migrate_failed_brick.yml10
-rw-r--r--roles/glusterfs/tasks/migrate_inform.yml1
-rw-r--r--roles/glusterfs/tasks/migrate_volume.yml17
-rw-r--r--roles/glusterfs/tasks/volumes.yml2
-rw-r--r--roles/glusterfs/templates/export.openshift.conf.j244
-rw-r--r--roles/keepalived/defaults/main.yml7
-rw-r--r--roles/keepalived/tasks/main.yml1
-rw-r--r--roles/keepalived/templates/keepalived.conf.j215
41 files changed, 523 insertions, 131 deletions
diff --git a/roles/ands_facts/defaults/main.yml b/roles/ands_facts/defaults/main.yml
index ac61876..fc3fcfd 100644
--- a/roles/ands_facts/defaults/main.yml
+++ b/roles/ands_facts/defaults/main.yml
@@ -1,3 +1,5 @@
+ands_none: "{{ None }}"
+
ands_configure_heketi: false
ands_data_device_default_threshold: 10
@@ -9,3 +11,21 @@ ands_data_lv: "ands_data"
ands_data_vg: "{{ ( ansible_lvm['lvs'][ands_data_lv] | default(ands_empty_lv) )['vg'] }}"
ands_heketi_lv: "ands_heketi"
ands_heketi_vg: "{{ ( ansible_lvm['lvs'][ands_heketi_lv] | default(ands_empty_lv) )['vg'] }}"
+
+ands_openshift_set_hostname: false
+ands_openshift_set_public_hostname: "{{ (ands_openshift_set_hostname and (ands_openshift_public_network is defined)) | ternary(true, false) }}"
+ands_resolve_public_ip: false
+
+ands_cluster_domain: "{{ ansible_domain }}"
+ands_inner_domain: "{{ ands_cluster_domain }}"
+
+ands_default_ip: "{{ ansible_default_ipv4.address }}"
+ands_openshift_default_ip: "{{ ands_resolve_public_ip | default(false) | ternary(ands_default_ip, ands_none) }}"
+ands_openshift_default_hostname: "{{ (ands_hostname_template is defined) | ternary(ands_hostname_template ~ ands_host_id, ansible_hostname) }}"
+
+ands_inner_lb: false
+ands_inner_lb_id: 254
+ands_inner_lb_hostname: 'ands-lb'
+
+#ands_openshift_inner_interface:
+#ands_openshift_public_interface:
diff --git a/roles/ands_facts/tasks/find_interface_by_ip.yml b/roles/ands_facts/tasks/find_interface_by_ip.yml
new file mode 100644
index 0000000..ecfa3c3
--- /dev/null
+++ b/roles/ands_facts/tasks/find_interface_by_ip.yml
@@ -0,0 +1,20 @@
+- name: "Looking for interface holding {{ ip }}"
+ set_fact:
+ "{{ var }}": "{{ eth['device'] }}"
+ vars:
+ eth: "{{ hostvars[inventory_hostname]['ansible_' + item] | default({}) }}"
+ ipv4: "{{ eth['ipv4'] | default({}) }}"
+ q: "{{ eth | json_query('ipv4_secondaries[*].address') }}"
+ sec: "{{ ((q == ands_none) or (q == '')) | ternary([], q) }}"
+ ips: "{{ sec | union([ipv4.address]) }}"
+ when:
+ - eth['type'] is defined
+ - eth['ipv4'] is defined
+ - eth['device'] is defined
+ - eth['type'] == 'ether'
+ - ip in ips
+ with_items:
+ - "{{ hostvars[inventory_hostname]['ansible_interfaces'] }}"
+# loop_control:
+# label: "{{ item }}"
+# no_log: true
diff --git a/roles/ands_facts/tasks/main.yml b/roles/ands_facts/tasks/main.yml
index cf995a0..6b28683 100644
--- a/roles/ands_facts/tasks/main.yml
+++ b/roles/ands_facts/tasks/main.yml
@@ -1,59 +1,14 @@
-- include_vars: dir="vars"
+---
-- name: Detect Heketi
- set_fact: ands_storage_domains="{{ ands_storage_domains | union([ands_heketi_domain]) }}"
- when:
- - ands_configure_heketi
- - ands_heketi_domain is defined
- - ansible_lvm.lvs[ands_heketi_lv] is defined
+# The variables accessed trough 'hostvars' should be set as facts
+# Here we set 'ands_storage_servers' and other variables
+- name: "Configuring storage facts"
+ include_tasks: "storage.yml"
-- name: Set some facts
- set_fact:
- ands_storage_servers: "{{ ands_storage_servers }}"
-
-- name: Set some facts
- set_fact:
- ands_data_vg: "{{ ands_data_vg }}"
- when: ands_data_vg != ""
-
-- name: Set some facts
- set_fact:
- ands_data_lv: "{{ ands_data_lv }}"
- when: ands_data_lv != ""
-
-- name: Set some facts
- set_fact:
- ands_heketi_vg: "{{ ands_heketi_vg }}"
- when: ands_heketi_vg != ""
-
-- name: Set some facts
- set_fact:
- ands_heketi_lv: "{{ ands_heketi_lv }}"
- when: ands_heketi_lv != ""
-
-- name: Set some facts
- set_fact:
- ands_data_dev: "/dev/mapper/{{ands_data_vg}}-{{ands_data_lv}}"
- when:
- - ands_data_vg != ""
- - ands_data_lv != ""
-
-- name: set some facts
- set_fact:
- ands_heketi_dev: "/dev/mapper/{{ands_heketi_vg}}-{{ands_heketi_lv}}"
- when:
- - ands_heketi_vg != ""
- - ands_heketi_lv != ""
+# Here we set 'openshift_hostname', 'openshift_ip' and other variables
+- name: "Configuring network facts"
+ include_tasks: "network.yml"
-- name: set some facts
+- name: "Confirm that ands facts are configured"
set_fact:
- glusterfs_devices: [ "{{ ands_heketi_dev }}" ]
- when:
- - ands_heketi_vg != ""
- - ands_heketi_lv != ""
-
-- include_tasks: detect_data_path.yml
- when: not ands_data_path is defined
-
-#- command: yum-complete-transaction --cleanup-only
-
+ ands_facts_configured: true
diff --git a/roles/ands_facts/tasks/network.yml b/roles/ands_facts/tasks/network.yml
new file mode 100644
index 0000000..1d0248f
--- /dev/null
+++ b/roles/ands_facts/tasks/network.yml
@@ -0,0 +1,49 @@
+- name: Set network facts
+ set_fact:
+ ands_cluster_domain: "{{ ands_cluster_domain }}"
+ ands_cluster_dot_domain: ".{{ ands_cluster_domain }}"
+ ands_inner_domain: "{{ ands_inner_domain }}"
+ ands_inner_dot_domain: "{{ (ands_inner_domain == ands_none) | ternary('', '.' ~ ands_inner_domain) }}"
+ ands_inner_lb_ip: "{{ ands_openshift_network | ipaddr(ands_inner_lb_id) | ipaddr('address') }}"
+ ands_inner_lb_hostname: "{{ ands_inner_lb_hostname }}"
+ ands_openshift_ip: "{{ ands_openshift_network | ipaddr(ands_host_id) | ipaddr('address') }}"
+ ands_openshift_hostname: "{{ ands_openshift_hostname | default(ands_openshift_set_hostname | ternary(ands_openshift_default_hostname, ands_none)) }}"
+ ands_openshift_public_ip: "{{ (ands_openshift_public_network is defined) | ternary( ands_openshift_public_network | ipaddr(ands_host_id) | ipaddr('address'), ands_openshift_default_ip) }}"
+ ands_openshift_public_hostname: "{{ ands_openshift_public_hostname | default(ands_openshift_set_public_hostname | ternary(ands_openshift_default_hostname, ands_none)) }}"
+ ands_storage_ip: "{{ ands_storage_network | default(ands_openshift_network) | ipaddr(ands_host_id) | ipaddr('address') }}"
+ ands_hostname_storage: "ands_storage{{ ands_host_id }}"
+ ands_hostname_openshift: "ands_openshift{{ ands_host_id }}"
+
+- name: Set more network facts
+ set_fact:
+ ands_openshift_public_fqdn: "{{ (ands_openshift_public_hostname == ands_none) | ternary(ands_none, ands_openshift_public_hostname ~ ands_cluster_dot_domain ) }}"
+ ands_openshift_fqdn: "{{ (ands_openshift_hostname == ands_none) | ternary(ands_none, ands_openshift_hostname ~ ands_inner_dot_domain ) }}"
+ ands_openshift_cluster_fqdn: "{{ ands_inner_lb | ternary(ands_inner_lb_hostname ~ ands_inner_dot_domain, ands_openshift_lb) }}"
+
+- name: "Detect inner network interface"
+ include_tasks: "find_interface_by_ip.yml"
+ vars:
+ var: "ands_openshift_inner_interface"
+ ip: "{{ ands_openshift_ip }}"
+ when:
+ - ands_openshift_inner_interface is not defined
+
+- name: "Detect public network interface"
+ include_tasks: "find_interface_by_ip.yml"
+ vars:
+ var: "ands_openshift_public_interface"
+ ip: "{{ (ands_openshift_public_ip == ands_none) | ternary(ands_default_ip, ands_openshift_public_ip) }}"
+ when:
+ - ands_openshift_public_interface is not defined
+
+- name: Set ipfailover interface
+ set_fact:
+ ands_ipfailover_interface: "{{ ands_openshift_public_interface }}"
+ when: ands_ipfailover_interface is not defined
+
+- name: Set ipfailover inner interface
+ set_fact:
+ ands_ipfailover_inner_interface: "{{ ands_openshift_inner_interface }}"
+ when: ands_ipfailover_inner_interface is not defined
+
+#- debug: msg="{{ hostvars }}"
diff --git a/roles/ands_facts/tasks/storage.yml b/roles/ands_facts/tasks/storage.yml
new file mode 100644
index 0000000..cf995a0
--- /dev/null
+++ b/roles/ands_facts/tasks/storage.yml
@@ -0,0 +1,59 @@
+- include_vars: dir="vars"
+
+- name: Detect Heketi
+ set_fact: ands_storage_domains="{{ ands_storage_domains | union([ands_heketi_domain]) }}"
+ when:
+ - ands_configure_heketi
+ - ands_heketi_domain is defined
+ - ansible_lvm.lvs[ands_heketi_lv] is defined
+
+- name: Set some facts
+ set_fact:
+ ands_storage_servers: "{{ ands_storage_servers }}"
+
+- name: Set some facts
+ set_fact:
+ ands_data_vg: "{{ ands_data_vg }}"
+ when: ands_data_vg != ""
+
+- name: Set some facts
+ set_fact:
+ ands_data_lv: "{{ ands_data_lv }}"
+ when: ands_data_lv != ""
+
+- name: Set some facts
+ set_fact:
+ ands_heketi_vg: "{{ ands_heketi_vg }}"
+ when: ands_heketi_vg != ""
+
+- name: Set some facts
+ set_fact:
+ ands_heketi_lv: "{{ ands_heketi_lv }}"
+ when: ands_heketi_lv != ""
+
+- name: Set some facts
+ set_fact:
+ ands_data_dev: "/dev/mapper/{{ands_data_vg}}-{{ands_data_lv}}"
+ when:
+ - ands_data_vg != ""
+ - ands_data_lv != ""
+
+- name: set some facts
+ set_fact:
+ ands_heketi_dev: "/dev/mapper/{{ands_heketi_vg}}-{{ands_heketi_lv}}"
+ when:
+ - ands_heketi_vg != ""
+ - ands_heketi_lv != ""
+
+- name: set some facts
+ set_fact:
+ glusterfs_devices: [ "{{ ands_heketi_dev }}" ]
+ when:
+ - ands_heketi_vg != ""
+ - ands_heketi_lv != ""
+
+- include_tasks: detect_data_path.yml
+ when: not ands_data_path is defined
+
+#- command: yum-complete-transaction --cleanup-only
+
diff --git a/roles/ands_network/README b/roles/ands_network/README
new file mode 100644
index 0000000..dfd029a
--- /dev/null
+++ b/roles/ands_network/README
@@ -0,0 +1,8 @@
+ We need to stop keepalived before provisioning OpenShift and scalling the cluster. This will, however, will
+ prevent nodes from communicating with masters. Therefore, we add IP of the first configure master node in
+ /etc/hosts.
+
+ We do the same if for some reason the routing is currently off, but we still want to provision OpenShift
+ projects. Of course, we don't need to turn off keepalived in this case, just temporarily add an ip of the
+ first master.
+
diff --git a/roles/ands_network/tasks/common.yml b/roles/ands_network/tasks/common.yml
new file mode 100644
index 0000000..384029f
--- /dev/null
+++ b/roles/ands_network/tasks/common.yml
@@ -0,0 +1,49 @@
+#- name: Remove obsolte hostnames from /etc/hosts
+# lineinfile: dest="/etc/hosts" regexp="{{ hostvars[item]['openshift_hostname'] }}" state="absent"
+# when:
+# - hostvars[item]['openshift_hostname'] | default(ands_none) != ands_none
+# - hostvars[item]['ands_facts_configured'] is defined
+# with_inventory_hostnames:
+# - nodes
+# - new_nodes
+
+
+# This will not work properly unless 'ands_facts' are executed on all nodes.... This is checked by evaluating if 'ands_openshift_fqdn' is defined
+- name: Configure all cluster hostnames in /etc/hosts
+ lineinfile: dest="/etc/hosts" line="{{ ip }} {{ fqdn }} {{ hostname }}" regexp="{{ fqdn }}" state="present"
+ when:
+ - hostvars[item]['ands_openshift_fqdn'] | default(ands_none) != ands_none
+ - hostvars[item]['ands_facts_configured'] is defined
+ vars:
+ ip: "{{ hostvars[item]['ands_openshift_ip'] }}"
+ fqdn: "{{ hostvars[item]['ands_openshift_fqdn'] }}"
+ hostname: "{{ fqdn.split('.')[0] }}"
+ with_inventory_hostnames:
+ - nodes
+ - new_nodes
+
+- name: Configure all storage ips in /etc/hosts
+ lineinfile: dest="/etc/hosts" line="{{ ip }} {{ hostname }}" regexp="{{ hostname }}" state="present"
+ when:
+ - hostvars[item]['ands_storage_network'] | default(ands_none) != ands_none
+ - hostvars[item]['ands_facts_configured'] is defined
+ vars:
+ ip: "{{ hostvars[item]['ands_storage_ip'] }}"
+ hostname: "{{ hostvars[item]['ands_hostname_storage'] }}"
+ with_inventory_hostnames:
+ - storage_nodes
+ - new_storage_nodes
+
+
+- name: Provision /etc/hosts to ensure that all masters servers are accessing Master API on loopback device
+ lineinfile: dest="/etc/hosts" line="127.0.0.1 {{ openshift_master_cluster_hostname }}" regexp=".*{{ openshift_master_cluster_hostname }}$" state="present"
+ when: ('masters' in group_names or 'new_masters' in group_names)
+ register: result
+
+- name: Provision /etc/hosts to ensure that all masters servers are accessing Master API on loopback device
+ lineinfile: dest="/etc/hosts" line="{{ ands_inner_lb_ip }} {{ openshift_master_cluster_hostname }}" regexp=".*{{ openshift_master_cluster_hostname }}$" state="present"
+ when: (result | skipped) and (ands_inner_lb | default(false))
+
+- name: Register openshift_dns_ip in /etc/hosts
+ lineinfile: dest="/etc/hosts" line="{{ openshift_dns_ip }} openshift_dns_ip" regexp="openshift_dns_ip$" state="present"
+
diff --git a/roles/ands_network/tasks/install_post.yml b/roles/ands_network/tasks/install_post.yml
new file mode 100644
index 0000000..0bfef34
--- /dev/null
+++ b/roles/ands_network/tasks/install_post.yml
@@ -0,0 +1,9 @@
+- name: Start keepalived
+ service: name=keepalived state=started enabled=yes
+ when: ('masters' in group_names) or ('new_masters' in group_names)
+
+- name: Provision /etc/hosts to ensure that all hosts accessing masters servers appropriately
+ lineinfile: dest="/etc/hosts" line="{{ ands_inner_lb_ip | default('') }} {{ openshift_master_cluster_hostname }}" regexp=".*{{ openshift_master_cluster_hostname }}$" state="{{ state }}"
+ when: ('masters' not in group_names and 'new_masters' not in group_names)
+ vars:
+ state: "{{ ands_inner_lb | default(false) | ternary('present', 'absent') }}"
diff --git a/roles/ands_network/tasks/install_pre.yml b/roles/ands_network/tasks/install_pre.yml
new file mode 100644
index 0000000..f555d1b
--- /dev/null
+++ b/roles/ands_network/tasks/install_pre.yml
@@ -0,0 +1,15 @@
+- name: Temporary provision /etc/hosts with Masters IP.
+ lineinfile: dest="/etc/hosts" line="{{ ands_openshift_network | ipaddr(node_id) | ipaddr('address') }} {{ openshift_master_cluster_hostname }}" regexp=".*{{ openshift_master_cluster_hostname }}$" state="present"
+ when: ('masters' not in group_names)
+ vars:
+ node_id: "{{ hostvars[groups['masters'][0]]['ands_host_id'] }}"
+
+- name: Check if keepalived is installed
+ stat: path="/etc/sysconfig/keepalived"
+ register: keepalived_result
+
+- name: Stop keepalived
+ service: name=keepalived state=stopped
+ when:
+ - keepalived_result.stat.exists
+ - ('masters' in group_names) or ('new_masters' in group_names)
diff --git a/roles/ands_network/tasks/main.yml b/roles/ands_network/tasks/main.yml
new file mode 100644
index 0000000..0bc913a
--- /dev/null
+++ b/roles/ands_network/tasks/main.yml
@@ -0,0 +1,3 @@
+---
+- name: "Configuring network"
+ include_tasks: "{{ action | default('common') }}.yml"
diff --git a/roles/ands_network/tasks/maintain.yml b/roles/ands_network/tasks/maintain.yml
new file mode 100644
index 0000000..a7af597
--- /dev/null
+++ b/roles/ands_network/tasks/maintain.yml
@@ -0,0 +1,9 @@
+- name: Ensure keepalived is running on master nodes
+ service: name=keepalived state=started enabled=yes
+ when: ('masters' in group_names)
+
+- name: Provision /etc/hosts to ensure that all hosts accessing masters servers appropriately
+ lineinfile: dest="/etc/hosts" line="{{ ands_inner_lb_ip | default('') }} {{ openshift_master_cluster_hostname }}" regexp=".*{{ openshift_master_cluster_hostname }}$" state="{{ state }}"
+ when: ('masters' not in group_names and 'new_masters' not in group_names)
+ vars:
+ state: "{{ ands_inner_lb | default(false) | ternary('present', 'absent') }}"
diff --git a/roles/ands_openshift/defaults/main.yml b/roles/ands_openshift/defaults/main.yml
index b97b584..d279345 100644
--- a/roles/ands_openshift/defaults/main.yml
+++ b/roles/ands_openshift/defaults/main.yml
@@ -1,4 +1,4 @@
-openshift_common_subroles: "{{ [ 'hostnames', 'users', 'security', 'storage' ] }}"
+openshift_common_subroles: "{{ [ 'users', 'security', 'storage' ] }}"
openshift_heketi_subroles: "{{ [ 'ssh', 'heketi' ] }}"
openshift_all_subroles: "{{ ands_configure_heketi | default(False) | ternary(openshift_common_subroles + openshift_heketi_subroles, openshift_common_subroles) }}"
diff --git a/roles/ands_openshift/tasks/hostnames.yml b/roles/ands_openshift/tasks/hostnames.yml
deleted file mode 100644
index e489a8c..0000000
--- a/roles/ands_openshift/tasks/hostnames.yml
+++ /dev/null
@@ -1,15 +0,0 @@
----
-#- name: Remove obsolte hostnames from /etc/hosts
-# lineinfile: dest="/etc/hosts" regexp="{{ hostvars[item]['openshift_hostname'] }}" state="absent"
-# with_inventory_hostnames:
-# - nodes
-
-
-- name: Configure all cluster hostnames in /etc/hosts
- lineinfile: dest="/etc/hosts" line="{{ hostvars[item]['openshift_ip'] }} {{ hostvars[item]['openshift_public_hostname'] }} {{ hostvars[item]['openshift_hostname'] }}" regexp="{{ hostvars[item]['openshift_hostname'] }}" state="present"
- with_inventory_hostnames:
- - nodes
-
-- name: Provision /etc/hosts to ensure that all masters servers are accessing Master API on loopback device
- lineinfile: dest="/etc/hosts" line="127.0.0.1 {{ openshift_master_cluster_hostname }}" regexp=".*{{ openshift_master_cluster_hostname }}$" state="present"
- when: "'masters' in group_names"
diff --git a/roles/ands_storage/tasks/detect_device.yml b/roles/ands_storage/tasks/detect_device.yml
index 0fb9764..3467371 100644
--- a/roles/ands_storage/tasks/detect_device.yml
+++ b/roles/ands_storage/tasks/detect_device.yml
@@ -1,3 +1,8 @@
+#- name: find if ands data is already mounted
+# set_fact: ands_data_is_mounted=true
+# with_items: "{{ ansible_mounts }}"
+# when: item.mount == ands_data_path
+
- name: find large block devices
set_fact: ands_data_device="/dev/{{ item.key }}"
# debug: msg="{{ item.key }} - {{ (item.value.sectors | int) * (item.value.sectorsize | int) / 1024 / 1024 / 1024 }} GB"
diff --git a/roles/ands_storage/tasks/main.yml b/roles/ands_storage/tasks/main.yml
index 9318f88..43d4692 100644
--- a/roles/ands_storage/tasks/main.yml
+++ b/roles/ands_storage/tasks/main.yml
@@ -10,11 +10,15 @@
- name: Create Ands VG
lvg: vg="{{ ands_data_vg }}" pvs="{{ ands_data_device }}"
- when: ands_data_device is defined
+ when:
+ - ands_data_device is defined
+ - ansible_lvm.vgs[ands_data_vg] is not defined
- name: Create Heketi VG
lvg: vg="{{ ands_heketi_vg }}" pvs="{{ ands_heketi_device }}"
- when: ands_heketi_device is defined
+ when:
+ - ands_heketi_device is defined
+ - ansible_lvm.vgs[ands_heketi_vg] is not defined
- name: Check if Heketi Volume already exists
stat: path="/dev/{{ ands_heketi_vg }}/{{ ands_heketi_lv }}"
diff --git a/roles/ands_vagrant_vm/templates/Vagrantfile.j2 b/roles/ands_vagrant_vm/templates/Vagrantfile.j2
index b044e2e..386ba85 100644
--- a/roles/ands_vagrant_vm/templates/Vagrantfile.j2
+++ b/roles/ands_vagrant_vm/templates/Vagrantfile.j2
@@ -12,7 +12,7 @@ Vagrant.configure("2") do |config|
(1..{{ vagrant_hosts }}).each do |i|
config.vm.define "{{ vagrant_hostname_template }}#{i}" do |node|
node.vm.network "public_network", nm_controlled: "yes", bridge: "br0", mac: "080027{{ macid }}02#{i}", ip: "{{ public_net }}.#{i}", type: "dhcp"
- node.vm.network "private_network", nm_controlled: "yes", mac: "080027{{ macid }}12#{i}", ip: "{{ storage_net }}.#{i}", name: "vboxnet0", type: "static"
+ node.vm.network "private_network", nm_controlled: "yes", mac: "080027{{ macid }}12#{i}", ip: "{{ net }}.#{i}", name: "vboxnet0", type: "static"
node.vm.box = "centos/7"
node.disksize.size = "80 GB"
node.vm.hostname = "{{ vagrant_hostname_template }}#{i}.ipe.kit.edu"
@@ -26,8 +26,9 @@ Vagrant.configure("2") do |config|
node.vm.provision "shell", run: "always", inline: "( ip addr show dev eth2 | grep {{ netid }}.#{i} ) || ip addr add 192.168.{{ netid }}.#{i}/24 dev eth2"
node.vm.provision "shell", run: "always", inline: "chmod +r /etc/sysconfig/network-scripts/ifcfg-eth*"
node.vm.provision "shell", run: "always", inline: "chcon --reference /etc/sysconfig/network-scripts/ifcfg-eth0 /etc/sysconfig/network-scripts/ifcfg-eth*"
-
- node.vm.provision "shell", run: "always", inline: "ip route del default dev eth0"
+# node.vm.provision "shell", run: "always", inline: "nmcli con down 'System eth0'; nmcli con up 'System eth0'"
+ node.vm.provision "shell", run: "always", inline: "ip route del default dev eth0 &> /dev/null ; error=$?"
+ node.vm.provision "shell", run: "always", inline: "DEVICE_IFACE=eth1 /etc/NetworkManager/dispatcher.d/99-origin-dns.sh eth1 up &> /dev/null; error=$?"
node.vm.provision "shell" do |s|
ssh_pub_key = File.readlines("authorized_keys").first.strip
diff --git a/roles/ands_vagrant_vmconf/tasks/main.yml b/roles/ands_vagrant_vmconf/tasks/main.yml
index f52a52d..b130aa4 100644
--- a/roles/ands_vagrant_vmconf/tasks/main.yml
+++ b/roles/ands_vagrant_vmconf/tasks/main.yml
@@ -23,6 +23,4 @@
# We just need networkmanager running
# - name: Bypass absent NM
# copy: remote_src="yes" src="/etc/resolv.conf" dest="/etc/origin/node/resolv.conf"
-
- - name: Update CentOS
- yum: name=* state=latest update_cache=yes
+
diff --git a/roles/common/default/main.yml b/roles/common/default/main.yml
new file mode 100644
index 0000000..d355d15
--- /dev/null
+++ b/roles/common/default/main.yml
@@ -0,0 +1 @@
+os_update: "{{ ands_update | default(false) }}" \ No newline at end of file
diff --git a/roles/common/tasks/main.yml b/roles/common/tasks/main.yml
index 9bd820a..fdd7246 100644
--- a/roles/common/tasks/main.yml
+++ b/roles/common/tasks/main.yml
@@ -14,6 +14,7 @@
# Seems we need iptables-services at least temporary...
- name: Ensure all required packages are installed
package: name={{item}} state=present
+ register: result
with_items:
- mc
- bzr
@@ -28,6 +29,12 @@
- PyYAML
- python-rhsm-certificates
- glusterfs-fuse
+ - telnet
+
+# We always update on first install and if requested
+- name: Update CentOS
+ yum: name=* state=latest update_cache=yes
+ when: (result | changed) or (os_update | default(false))
#- name: Add NodeJS required by a few used Ansible extensions
# package: name={{item}} state=present
diff --git a/roles/docker/tasks/main.yml b/roles/docker/tasks/main.yml
index a7bd700..0d040a9 100644
--- a/roles/docker/tasks/main.yml
+++ b/roles/docker/tasks/main.yml
@@ -27,14 +27,16 @@
lvol: vg="{{ ansible_lvm['lvs'][docker_lv]['vg'] }}" lv="docker_lv" size="{{ docker_volume_size }}"
when: docker_volume_size is defined
-- name: Limit size of container log files
- ghetto_json:
- path: "/etc/docker/daemon.json"
- log-driver: "json-file"
- log-opts.max-size: "{{ docker_max_log_size }}"
- log-opts.max-file: "{{ docker_max_log_files }}"
- notify:
- - restart docker
+# By default there is systemd driver installed. It is removed during OpenShift installation, but is still there during prepare stage
+# The parameters to docker can be set trough OpenShift and currently are moved there.
+#- name: Limit size of container log files
+# ghetto_json:
+# path: "/etc/docker/daemon.json"
+# log-driver: "json-file"
+# log-opts.max-size: "{{ docker_max_log_size }}"
+# log-opts.max-file: "{{ docker_max_log_files }}"
+# notify:
+# - restart docker
- name: start docker
service: name="docker" enabled=yes state=started
diff --git a/roles/docker/tasks/storage.yml b/roles/docker/tasks/storage.yml
index 595979c..5a5c858 100644
--- a/roles/docker/tasks/storage.yml
+++ b/roles/docker/tasks/storage.yml
@@ -20,7 +20,7 @@
- set_fact: docker_storage_config="VG={{ docker_storage_vg }} AUTO_EXTEND_POOL=true"
- set_fact: docker_storage_config="{{ docker_storage_config }} DEVS={{ docker_storage_device }}"
- when: ( docker_storage_device is defined ) and ( not ansible_lvm.vgs.{{ docker_storage_vg }} is defined )
+ when: ( docker_storage_device is defined ) and ( ansible_lvm.vgs[docker_storage_vg] is not defined )
- name: stop docker
service: name="docker" state="stopped"
diff --git a/roles/ganesha/files/ganesha.conf b/roles/ganesha/files/ganesha.conf
new file mode 100644
index 0000000..2bfc114
--- /dev/null
+++ b/roles/ganesha/files/ganesha.conf
@@ -0,0 +1 @@
+%include "/etc/ganesha/exports/export.openshift.conf"
diff --git a/roles/ganesha/tasks/main.yml b/roles/ganesha/tasks/main.yml
new file mode 100644
index 0000000..032631b
--- /dev/null
+++ b/roles/ganesha/tasks/main.yml
@@ -0,0 +1,30 @@
+- name: Ensure GlusterFS repositories are present
+ yum: name="centos-release-gluster{{ glusterfs_version }}" state=present
+
+- name: Ensure Ganesha is installed
+ yum: name={{item}} state=present
+ with_items:
+ - nfs-ganesha-gluster
+ - nfs-ganesha
+
+- name: Change logdir group to prevent selinux problems
+ file: dest="/var/log/ganesha" owner="ganesha" group="root" mode="0775" state="directory"
+
+- name: Copy default Ganesha configuration
+ copy: src="ganesha.conf" dest="/etc/ganesha/ganesha.conf" owner="root" group="root" mode="0644"
+
+- name: Configure Ganesha HA
+ template: src="ganesha-ha.conf.j2" dest="/etc/ganesha/ganesha-ha.conf" owner=root group=root mode="0644"
+
+- name: Configure firewalld
+ firewalld: service="{{ item }}" state="enabled" permanent="true" immediate="true"
+ with_items:
+ - nfs
+# - mountd
+# - rpc-bind
+
+- name: Reload firewalld rules
+ shell: firewall-cmd --reload
+
+- name: Enable and start ganesha service
+ service: name="nfs-ganesha" state=started enabled=yes
diff --git a/roles/ganesha/templates/ganesha-ha.conf.j2 b/roles/ganesha/templates/ganesha-ha.conf.j2
new file mode 100644
index 0000000..bdb2e0c
--- /dev/null
+++ b/roles/ganesha/templates/ganesha-ha.conf.j2
@@ -0,0 +1,12 @@
+{% set members = groups['masters'] | union(groups['new_masters'] | default([])) | map('extract', hostvars, 'ands_hostname_storage') | list %}
+{% set vips = ands_ipfailover_vips | default([]) %}
+{% set n_vips = vips | length %}
+{% if n_vips > 0 %}
+{% set nodes = members[0:n_vips] %}
+HA_NAME="openshift_nfs"
+#HA_VOL_SERVER="{{ hostvars[groups['masters'][0]]['ands_hostname_storage'] }}"
+HA_CLUSTER_NODES="{{ nodes | join(',') }}"
+{% for node in nodes %}
+VIP_{{ node }}="{{ vips[loop.index - 1] }}"
+{% endfor %}
+{% endif %}
diff --git a/roles/glusterfs/defaults/main.yml b/roles/glusterfs/defaults/main.yml
index 9587a9b..700838d 100644
--- a/roles/glusterfs/defaults/main.yml
+++ b/roles/glusterfs/defaults/main.yml
@@ -1,5 +1,5 @@
---
-glusterfs_version: 39
+glusterfs_version: 312
glusterfs_transport: rdma
glusterfs_network: "{{ ands_storage_network }}"
diff --git a/roles/glusterfs/tasks/common.yml b/roles/glusterfs/tasks/common.yml
index 5e8e3b6..67fb815 100644
--- a/roles/glusterfs/tasks/common.yml
+++ b/roles/glusterfs/tasks/common.yml
@@ -8,6 +8,7 @@
- glusterfs-cli
- glusterfs-fuse
- glusterfs-rdma
+ - heketi-client
- libsemanage-python
- name: Allow fuse in SELinux configuration
diff --git a/roles/glusterfs/tasks/create_domain.yml b/roles/glusterfs/tasks/create_domain.yml
index 8f8042b..76623f2 100644
--- a/roles/glusterfs/tasks/create_domain.yml
+++ b/roles/glusterfs/tasks/create_domain.yml
@@ -1,8 +1,16 @@
---
+- name: Get list of existing gluster volumes
+ shell: "gluster volume info"
+ changed_when: false
+ register: gv_results
+
+
- name: Configure volumes
include_tasks: create_volume.yml
with_dict: "{{ domain.volumes }}"
+ when: volume_string not in gv_results.stdout_lines
vars:
+ volume_string: "Volume Name: {{ volume.key }}"
domain_servers: "{{ groups[domain.servers] | map('extract', hostvars, 'ands_storage_hostname') | list }}"
loop_control:
loop_var: volume
diff --git a/roles/glusterfs/tasks/ganesha.yml b/roles/glusterfs/tasks/ganesha.yml
new file mode 100644
index 0000000..61d151a
--- /dev/null
+++ b/roles/glusterfs/tasks/ganesha.yml
@@ -0,0 +1,6 @@
+- name: Create /etc/ganesha/exports
+ file: dest="/etc/ganesha/exports" owner="root" group="root" mode="0755" state="directory"
+
+- name: Configure Ganesha NFS exports
+ template: src="export.openshift.conf.j2" dest="/etc/ganesha/exports/export.openshift.conf" owner=root group=root mode="0644"
+
diff --git a/roles/glusterfs/tasks/install.yml b/roles/glusterfs/tasks/install.yml
new file mode 100644
index 0000000..d7ee766
--- /dev/null
+++ b/roles/glusterfs/tasks/install.yml
@@ -0,0 +1,34 @@
+---
+- name: Install GlusterFS Common Software
+ include_tasks: common.yml
+ when:
+ - "'software' in glusterfs_subroles"
+
+- name: Install GlusterFS client
+ include_tasks: setup-client.yml
+ when:
+ - "'software' in glusterfs_subroles"
+ - "'ands_storage_servers' not in group_names"
+
+- name: Install GlusterFS OpenShift Server
+ include_tasks: setup-openshift-server.yml
+ when:
+ - "'software' in glusterfs_subroles"
+ - "'ands_storage_servers' in group_names"
+ - "'glusterfs' in group_names"
+
+- name: Install GlusterFS External Server
+ include_tasks: setup-external-server.yml
+ when:
+ - "'software' in glusterfs_subroles"
+ - "'ands_storage_servers' in group_names"
+ - "'glusterfs' not in group_names"
+
+- name: Configure gluster peers (on first host)
+ shell: gluster peer probe {{item}}
+ run_once: true
+ with_items: "{{ glusterfs_servers }}"
+
+- include_tasks: volumes.yml
+ when:
+ - "'volumes' in glusterfs_subroles"
diff --git a/roles/glusterfs/tasks/iterate_domains.yml b/roles/glusterfs/tasks/iterate_domains.yml
new file mode 100644
index 0000000..e061652
--- /dev/null
+++ b/roles/glusterfs/tasks/iterate_domains.yml
@@ -0,0 +1,7 @@
+- name: Process all storage domains
+ include_tasks: "iterate_volumes.yml"
+ run_once: true
+ delegate_to: "{{ groups[domain.servers][0] }}"
+ with_items: "{{ glusterfs_domains }}"
+ loop_control:
+ loop_var: domain
diff --git a/roles/glusterfs/tasks/iterate_volumes.yml b/roles/glusterfs/tasks/iterate_volumes.yml
new file mode 100644
index 0000000..8f61116
--- /dev/null
+++ b/roles/glusterfs/tasks/iterate_volumes.yml
@@ -0,0 +1,12 @@
+---
+- name: Iterate volumes
+ include_tasks: "{{ action }}.yml"
+ with_dict: "{{ domain.volumes }}"
+ vars:
+ name: "{{ volume.key }}"
+ path: "{{ volume.value.mount }}"
+ server_group: "{{ domain.servers }}"
+ domain_servers: "{{ groups[domain.servers] | map('extract', hostvars, 'ands_storage_hostname') | list }}"
+ when: volume.value.mount is defined
+ loop_control:
+ loop_var: volume
diff --git a/roles/glusterfs/tasks/main.yml b/roles/glusterfs/tasks/main.yml
index d7ee766..a02c1a1 100644
--- a/roles/glusterfs/tasks/main.yml
+++ b/roles/glusterfs/tasks/main.yml
@@ -1,34 +1,3 @@
---
-- name: Install GlusterFS Common Software
- include_tasks: common.yml
- when:
- - "'software' in glusterfs_subroles"
-
-- name: Install GlusterFS client
- include_tasks: setup-client.yml
- when:
- - "'software' in glusterfs_subroles"
- - "'ands_storage_servers' not in group_names"
-
-- name: Install GlusterFS OpenShift Server
- include_tasks: setup-openshift-server.yml
- when:
- - "'software' in glusterfs_subroles"
- - "'ands_storage_servers' in group_names"
- - "'glusterfs' in group_names"
-
-- name: Install GlusterFS External Server
- include_tasks: setup-external-server.yml
- when:
- - "'software' in glusterfs_subroles"
- - "'ands_storage_servers' in group_names"
- - "'glusterfs' not in group_names"
-
-- name: Configure gluster peers (on first host)
- shell: gluster peer probe {{item}}
- run_once: true
- with_items: "{{ glusterfs_servers }}"
-
-- include_tasks: volumes.yml
- when:
- - "'volumes' in glusterfs_subroles"
+- name: "Configuring Gluster storage subsystem"
+ include_tasks: "{{ action | default('install') }}.yml"
diff --git a/roles/glusterfs/tasks/migrate.yml b/roles/glusterfs/tasks/migrate.yml
new file mode 100644
index 0000000..3ef7917
--- /dev/null
+++ b/roles/glusterfs/tasks/migrate.yml
@@ -0,0 +1,34 @@
+- debug: msg="Migrating"
+
+#- name: Detect if source host {{ glusterfs_migrate_from }} is failed or running
+# wait_for: host={{ glusterfs_migrate_from }} port=24007 timeout=1
+# register: srcres
+# changed_when: srcres | failed
+# failed_when: false
+
+#- set_fact:
+# glusterfs_migrate_failed: "{{ srcres | changed }}"
+
+- set_fact:
+ glusterfs_migrate_failed: "1"
+
+- name: Analyze current configuration
+ include_tasks: iterate_domains.yml
+ vars:
+ action: "migrate_volume"
+ migrate_action: "migrate_inform"
+ migrate_from: "{{ glusterfs_migrate_from }}"
+ migrate_to: "{{ glusterfs_migrate_to }}"
+ migrate_failed: "{{ glusterfs_migrate_failed }}"
+
+- name: Exterminate mankind
+ pause: prompt='Please confirm if you want to proceed. Press Ctrl+c and then "a" to abort'
+
+- name: Analyze current configuration
+ include_tasks: iterate_domains.yml
+ vars:
+ action: "migrate_volume"
+ migrate_action: "{{ migrate_failed | ternary('migrate_failed_brick', 'migrate_live_brick') }}"
+ migrate_from: "{{ glusterfs_migrate_from }}"
+ migrate_to: "{{ glusterfs_migrate_to }}"
+ migrate_failed: "{{ glusterfs_migrate_failed }}"
diff --git a/roles/glusterfs/tasks/migrate_failed_brick.yml b/roles/glusterfs/tasks/migrate_failed_brick.yml
new file mode 100644
index 0000000..3490c82
--- /dev/null
+++ b/roles/glusterfs/tasks/migrate_failed_brick.yml
@@ -0,0 +1,10 @@
+- name: "Volume {{ vol }} - Migrating {{ src }} to {{ dst }}"
+ command: "gluster volume replace-brick {{ vol }} {{ src }} {{ dst }} commit force"
+
+# This is only working within containers with 'oc rsh'. But actually there are auto-healing daemons running.
+# So we probably find just migrating
+#- name: "Healing {{ vol }}"
+# command: "gluster volume heal {{ vol }} full"
+
+#- name: "Rebalancing {{ vol }}"
+# command: "gluster volume rebalance {{ vol }} fix-layout start"
diff --git a/roles/glusterfs/tasks/migrate_inform.yml b/roles/glusterfs/tasks/migrate_inform.yml
new file mode 100644
index 0000000..912d359
--- /dev/null
+++ b/roles/glusterfs/tasks/migrate_inform.yml
@@ -0,0 +1 @@
+- warn: msg="Volume {{ vol }} - Migrating {{ src }} to {{ dst }}"
diff --git a/roles/glusterfs/tasks/migrate_volume.yml b/roles/glusterfs/tasks/migrate_volume.yml
new file mode 100644
index 0000000..f9edeac
--- /dev/null
+++ b/roles/glusterfs/tasks/migrate_volume.yml
@@ -0,0 +1,17 @@
+- name: "Analyze bricks of {{ volume.key }}"
+ shell: |
+ gluster volume info '{{ volume.key }}' | grep -P 'Brick\d+:' | awk '{ print $2 }'
+ register: gvires
+
+- name: "Execute configured {{ migrate_action }} on volume {{ volume.key }} with bricks {{ src }} and {{ dst }}"
+ include_tasks: "{{ migrate_action }}.yml"
+ vars:
+ bricks: "{{ gvires.stdout_lines | list }}"
+ servers: "{{ bricks | map('regex_replace', ':.*$', '') | list }}"
+ brick: "{{ servers.index(migrate_from) }}"
+ src: "{{ bricks[brick | int] | default('') }}"
+ dst: "{{ bricks[brick | int] | default('') | regex_replace('^' ~ migrate_from, migrate_to) }}"
+ vol: "{{ volume.key }}"
+ when:
+ - migrate_from in servers
+ - brick is defined
diff --git a/roles/glusterfs/tasks/volumes.yml b/roles/glusterfs/tasks/volumes.yml
index c4d49ac..1a85378 100644
--- a/roles/glusterfs/tasks/volumes.yml
+++ b/roles/glusterfs/tasks/volumes.yml
@@ -8,7 +8,7 @@
- name: Mount volume domains
include_tasks: mount_domain.yml
- when: ( domain.clients | default("---") ) in group_names
+ when: domain.clients | default([]) | intersect(group_names) | length > 0
with_items: "{{ glusterfs_domains }}"
loop_control:
loop_var: domain
diff --git a/roles/glusterfs/templates/export.openshift.conf.j2 b/roles/glusterfs/templates/export.openshift.conf.j2
new file mode 100644
index 0000000..b2c547f
--- /dev/null
+++ b/roles/glusterfs/templates/export.openshift.conf.j2
@@ -0,0 +1,44 @@
+{% set i = 0 %}
+{% for domain in glusterfs_domains %}
+{% for name, vol in domain.volumes.iteritems() %}
+{% if vol.nfs_clients is defined %}
+{% set nfs = vol.nfs_clients %}
+{% set i = i + 1 %}
+EXPORT {
+ Export_Id = {{ i }};
+ Path = "/{{ name }}";
+ FSAL {
+ name = GLUSTER;
+ hostname = "localhost";
+ volume = "{{ name }}";
+ }
+ Access_type = RW;
+ Disable_ACL = true;
+ Squash = "No_root_squash";
+ Pseudo = "/{{ name }}";
+ Protocols = "3", "4" ;
+ Transports = "UDP","TCP";
+ SecType = "sys";
+
+{% if nfs.rw is defined %}
+{% for net in nfs.rw %}
+ CLIENT {
+ clients = {{ net }};
+ Access_type = RW;
+ }
+{% endfor %}
+{% endif %}
+
+{% if nfs.ro is defined %}
+{% for net in nfs.ro %}
+ CLIENT {
+ clients = {{ net }};
+ Access_type = RO;
+ }
+{% endfor %}
+{% endif %}
+}
+
+{% endif %}
+{% endfor %}
+{% endfor %}
diff --git a/roles/keepalived/defaults/main.yml b/roles/keepalived/defaults/main.yml
index a7087b0..3302b85 100644
--- a/roles/keepalived/defaults/main.yml
+++ b/roles/keepalived/defaults/main.yml
@@ -1,6 +1,9 @@
---
-keepalived_vips: "{{ ands_ipfailover_vips | default([]) }}"
-keepalived_iface: "{{ ands_ipfailover_interface | default('eth0') }}"
+keepalived_vrrp:
+ - vips: "{{ ands_ipfailover_vips | default([]) }}"
+ iface: "{{ ands_ipfailover_interface | default('eth0') }}"
+ - vips: "{{ ands_inner_lb | ternary([ands_inner_lb_ip], []) }}"
+ iface: "{{ ands_ipfailover_inner_interface }}"
keepalived_master_prio: 80
keepalived_backup_prio: 20
diff --git a/roles/keepalived/tasks/main.yml b/roles/keepalived/tasks/main.yml
index adedcdc..08835b8 100644
--- a/roles/keepalived/tasks/main.yml
+++ b/roles/keepalived/tasks/main.yml
@@ -20,3 +20,4 @@
- name: Start keepalived
service: name=keepalived state=started enabled=yes
tags: keepalived
+
diff --git a/roles/keepalived/templates/keepalived.conf.j2 b/roles/keepalived/templates/keepalived.conf.j2
index 8d9a580..6df5eab 100644
--- a/roles/keepalived/templates/keepalived.conf.j2
+++ b/roles/keepalived/templates/keepalived.conf.j2
@@ -6,20 +6,22 @@ vrrp_script track {
interval {{ keepalived_check_interval }}
}
-{% for vips in keepalived_vips %}
-{% set id = ( vips | ipaddr('address') | regex_replace('^.*\.', '') ) %}
+{% for vrrp in keepalived_vrrp %}
+{% set vrrp_id = loop.index %}
+{% for vips in vrrp.vips %}
+{% set id = ( vips | ipaddr('address') | regex_replace('^.*\.', '') ) %}
-vrrp_instance VI_{{ loop.index }} {
+vrrp_instance VI_{{vrrp_id}}_{{ loop.index }} {
- virtual_router_id {{ id }}
+ virtual_router_id {{ (vrrp_id - 1) * 32 + (loop.index) }}
state {{ (( ( loop.index - 1) % (keepalived_num_nodes | int) ) == (keepalived_node_id | int) ) | ternary('MASTER', 'BACKUP') }}
state {{ (( ( loop.index - 1) % (keepalived_num_nodes | int) ) == (keepalived_node_id | int) ) | ternary(keepalived_master_prio, keepalived_backup_prio) }}
- interface {{ keepalived_iface }}
+ interface {{ vrrp.iface }}
virtual_ipaddress {
- {{ vips }} dev {{ keepalived_iface }}
+ {{ vips }} dev {{ vrrp.iface }}
}
advert_int 1
@@ -33,4 +35,5 @@ vrrp_instance VI_{{ loop.index }} {
track
}
}
+{% endfor %}
{% endfor %}