summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--playbooks/cluster-operator/aws/components.yml24
-rw-r--r--playbooks/openstack/advanced-configuration.md32
-rw-r--r--playbooks/openstack/sample-inventory/group_vars/all.yml7
-rw-r--r--roles/container_runtime/defaults/main.yml1
-rw-r--r--roles/etcd/defaults/main.yaml2
-rw-r--r--roles/kuryr/tasks/master.yaml4
-rw-r--r--roles/lib_utils/filter_plugins/oo_filters.py48
-rw-r--r--roles/openshift_aws/templates/user_data.j23
-rw-r--r--roles/openshift_facts/defaults/main.yml3
-rw-r--r--roles/openshift_logging_curator/tasks/main.yaml7
-rw-r--r--roles/openshift_logging_elasticsearch/tasks/main.yaml327
-rw-r--r--roles/openshift_logging_eventrouter/tasks/install_eventrouter.yaml7
-rw-r--r--roles/openshift_logging_kibana/tasks/main.yaml7
-rw-r--r--roles/openshift_logging_mux/tasks/main.yaml55
-rw-r--r--roles/openshift_metrics/tasks/install_cassandra.yaml7
-rw-r--r--roles/openshift_metrics/tasks/install_hawkular.yaml7
-rw-r--r--roles/openshift_metrics/tasks/install_heapster.yaml19
-rw-r--r--roles/openshift_metrics/tasks/install_hosa.yaml13
-rw-r--r--roles/openshift_node/files/bootstrap.yml8
-rwxr-xr-xroles/openshift_node/files/networkmanager/99-origin-dns.sh5
-rw-r--r--roles/openshift_openstack/defaults/main.yml6
-rw-r--r--roles/openshift_openstack/templates/heat_stack.yaml.j222
-rw-r--r--roles/openshift_openstack/templates/user_data.j216
-rw-r--r--roles/openshift_prometheus/tasks/install_prometheus.yaml61
-rw-r--r--roles/openshift_provisioners/tasks/install_provisioners.yaml15
-rw-r--r--roles/openshift_service_catalog/tasks/install.yml57
-rw-r--r--roles/openshift_storage_glusterfs/files/v3.6/deploy-heketi-template.yml12
-rw-r--r--roles/openshift_storage_glusterfs/files/v3.6/gluster-s3-pvcs-template.yml67
-rw-r--r--roles/openshift_storage_glusterfs/files/v3.6/gluster-s3-template.yml140
-rw-r--r--roles/openshift_storage_glusterfs/files/v3.6/glusterblock-provisioner.yml104
-rw-r--r--roles/openshift_storage_glusterfs/files/v3.6/glusterfs-template.yml20
-rw-r--r--roles/openshift_storage_glusterfs/files/v3.6/heketi-template.yml16
-rw-r--r--roles/openshift_storage_glusterfs/templates/v3.6/gluster-block-storageclass.yml.j219
-rw-r--r--roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-storageclass.yml.j24
-rw-r--r--roles/openshift_storage_glusterfs/templates/v3.6/heketi.json.j28
-rw-r--r--roles/openshift_web_console/tasks/install.yml138
-rw-r--r--roles/template_service_broker/tasks/install.yml21
37 files changed, 932 insertions, 380 deletions
diff --git a/playbooks/cluster-operator/aws/components.yml b/playbooks/cluster-operator/aws/components.yml
new file mode 100644
index 000000000..8587aac45
--- /dev/null
+++ b/playbooks/cluster-operator/aws/components.yml
@@ -0,0 +1,24 @@
+---
+- name: Alert user to variables needed
+ hosts: localhost
+ tasks:
+ - name: Alert user to variables needed - clusterid
+ debug:
+ msg: "openshift_aws_clusterid={{ openshift_aws_clusterid | default('default') }}"
+
+ - name: Alert user to variables needed - region
+ debug:
+ msg: "openshift_aws_region={{ openshift_aws_region | default('us-east-1') }}"
+
+- name: Setup the master node group
+ hosts: localhost
+ tasks:
+ - import_role:
+ name: openshift_aws
+ tasks_from: setup_master_group.yml
+
+- name: run the init
+ import_playbook: ../../init/main.yml
+
+- name: Include the components playbook to finish the hosted configuration
+ import_playbook: ../../common/private/components.yml
diff --git a/playbooks/openstack/advanced-configuration.md b/playbooks/openstack/advanced-configuration.md
index e8f4cfc32..8df3c40b0 100644
--- a/playbooks/openstack/advanced-configuration.md
+++ b/playbooks/openstack/advanced-configuration.md
@@ -273,6 +273,38 @@ openshift_openstack_cluster_node_labels:
mylabel: myvalue
```
+`openshift_openstack_provision_user_commands` allows users to execute
+shell commands via cloud-init for all of the created Nova servers in
+the Heat stack, before they are available for SSH connections.
+Note that you should use custom ansible playbooks whenever
+possible, like this `provision_install_custom.yml` example playbook:
+```
+- import_playbook: openshift-ansible/playbooks/openstack/openshift-cluster/provision.yml
+
+- name: My custom actions
+ hosts: cluster_hosts
+ tasks:
+ - do whatever you want here
+
+- import_playbook: openshift-ansible/playbooks/openstack/openshift-cluster/install.yml
+```
+The playbook leverages a two existing provider interfaces: `provision.yml` and
+`install.yml`. For some cases, like SSH keys configuration and coordinated reboots of
+servers, the cloud-init runcmd directive may be a better choice though. User specified
+shell commands for cloud-init need to be either strings or lists, for example:
+```
+- openshift_openstack_provision_user_commands:
+ - set -vx
+ - systemctl stop sshd # fences off ansible playbooks as we want to reboot later
+ - ['echo', 'foo', '>', '/tmp/foo']
+ - [ ls, /tmp/foo, '||', true ]
+ - reboot # unfences ansible playbooks to continue after reboot
+```
+
+**Note** To protect Nova servers from recreating when the user-data changes via
+`openshift_openstack_provision_user_commands`, the
+`user_data_update_policy` parameter configured to `IGNORE` for Heat resources.
+
The `openshift_openstack_nodes_to_remove` allows you to specify the numerical indexes
of App nodes that should be removed; for example, ['0', '2'],
diff --git a/playbooks/openstack/sample-inventory/group_vars/all.yml b/playbooks/openstack/sample-inventory/group_vars/all.yml
index d63229120..101ac52ad 100644
--- a/playbooks/openstack/sample-inventory/group_vars/all.yml
+++ b/playbooks/openstack/sample-inventory/group_vars/all.yml
@@ -85,7 +85,12 @@ openshift_openstack_docker_volume_size: "15"
## WARNING: This will delete any data on the volume!
#openshift_openstack_prepare_and_format_registry_volume: False
-openshift_openstack_subnet_prefix: "192.168.99"
+# The Classless Inter-Domain Routing (CIDR) for the OpenStack VM subnet.
+openshift_openstack_subnet_cidr: "192.168.99.0/24"
+# The starting IP address for the OpenStack subnet allocation pool.
+openshift_openstack_pool_start: "192.168.99.3"
+# The ending IP address for the OpenStack subnet allocation pool.
+openshift_openstack_pool_end: "192.168.99.254"
## Red Hat subscription:
#rhsub_user: '<username>'
diff --git a/roles/container_runtime/defaults/main.yml b/roles/container_runtime/defaults/main.yml
index 22e16d29d..01540776f 100644
--- a/roles/container_runtime/defaults/main.yml
+++ b/roles/container_runtime/defaults/main.yml
@@ -101,6 +101,7 @@ l_additional_crio_registries: "{{ '\"{}\"'.format('\", \"'.join(l_crio_registrie
# options:
# - rw
# - mode=755
+# type: bind
container_runtime_crio_additional_mounts: []
l_crio_additional_mounts: "{{ ',' + (container_runtime_crio_additional_mounts | lib_utils_oo_l_of_d_to_csv) if container_runtime_crio_additional_mounts != [] else '' }}"
diff --git a/roles/etcd/defaults/main.yaml b/roles/etcd/defaults/main.yaml
index 87e249642..6f1dc5847 100644
--- a/roles/etcd/defaults/main.yaml
+++ b/roles/etcd/defaults/main.yaml
@@ -78,7 +78,7 @@ etcd_listen_client_urls: "{{ etcd_url_scheme }}://{{ etcd_ip }}:{{ etcd_client_p
# required role variable
#etcd_peer: 127.0.0.1
-etcdctlv2: "etcdctl --cert-file {{ etcd_peer_cert_file }} --key-file {{ etcd_peer_key_file }} --ca-file {{ etcd_peer_ca_file }} -C https://{{ etcd_peer }}:{{ etcd_client_port }}"
+etcdctlv2: "{{ r_etcd_common_etcdctl_command }} --cert-file {{ etcd_peer_cert_file }} --key-file {{ etcd_peer_key_file }} --ca-file {{ etcd_peer_ca_file }} -C https://{{ etcd_peer }}:{{ etcd_client_port }}"
etcd_service: "{{ 'etcd_container' if r_etcd_common_etcd_runtime == 'docker' else 'etcd' }}"
# Location of the service file is fixed and not meant to be changed
diff --git a/roles/kuryr/tasks/master.yaml b/roles/kuryr/tasks/master.yaml
index 1cc6d2375..4f9dd82de 100644
--- a/roles/kuryr/tasks/master.yaml
+++ b/roles/kuryr/tasks/master.yaml
@@ -1,6 +1,7 @@
---
- name: Perform OpenShift ServiceAccount config
include_tasks: serviceaccount.yaml
+ run_once: true
- name: Create kuryr manifests tempdir
command: mktemp -d
@@ -32,6 +33,7 @@
namespace: "{{ kuryr_namespace }}"
files:
- "{{ manifests_tmpdir.stdout }}/configmap.yaml"
+ run_once: true
- name: Apply Controller Deployment manifest
oc_obj:
@@ -41,6 +43,7 @@
namespace: "{{ kuryr_namespace }}"
files:
- "{{ manifests_tmpdir.stdout }}/controller-deployment.yaml"
+ run_once: true
- name: Apply kuryr-cni DaemonSet manifest
oc_obj:
@@ -50,3 +53,4 @@
namespace: "{{ kuryr_namespace }}"
files:
- "{{ manifests_tmpdir.stdout }}/cni-daemonset.yaml"
+ run_once: true
diff --git a/roles/lib_utils/filter_plugins/oo_filters.py b/roles/lib_utils/filter_plugins/oo_filters.py
index c355115b5..ed6bb4c28 100644
--- a/roles/lib_utils/filter_plugins/oo_filters.py
+++ b/roles/lib_utils/filter_plugins/oo_filters.py
@@ -660,6 +660,50 @@ def map_from_pairs(source, delim="="):
return dict(item.split(delim) for item in source.split(","))
+def lib_utils_oo_get_node_labels(source, hostvars=None):
+ ''' Return a list of labels assigned to schedulable nodes '''
+ labels = list()
+
+ # Filter out the unschedulable nodes
+ for host in source:
+ if host not in hostvars:
+ return
+ node_vars = hostvars[host]
+
+ # All nodes are considered schedulable,
+ # unless explicitly marked so
+ schedulable = node_vars.get('openshift_schedulable')
+ if schedulable is None:
+ schedulable = True
+ try:
+ if not strtobool(str(schedulable)):
+ # explicitly marked as unschedulable
+ continue
+ except ValueError:
+ # Incorrect value in openshift_schedulable, skip node
+ continue
+
+ # Get a list of labels from the node
+ node_labels = node_vars.get('openshift_node_labels')
+ if node_labels:
+ labels.append(node_labels)
+
+ return labels
+
+
+def lib_utils_oo_has_no_matching_selector(source, selector=None):
+ ''' Return True when selector cannot be placed
+ on nodes with labels from source '''
+ # Empty selector means any node
+ if not selector:
+ return False
+ for item in source:
+ if selector.items() <= item.items():
+ # Matching selector found
+ return False
+ return True
+
+
class FilterModule(object):
""" Custom ansible filter mapping """
@@ -691,5 +735,7 @@ class FilterModule(object):
"lib_utils_oo_selector_to_string_list": lib_utils_oo_selector_to_string_list,
"lib_utils_oo_filter_sa_secrets": lib_utils_oo_filter_sa_secrets,
"lib_utils_oo_l_of_d_to_csv": lib_utils_oo_l_of_d_to_csv,
- "map_from_pairs": map_from_pairs
+ "lib_utils_oo_has_no_matching_selector": lib_utils_oo_has_no_matching_selector,
+ "lib_utils_oo_get_node_labels": lib_utils_oo_get_node_labels,
+ "map_from_pairs": map_from_pairs,
}
diff --git a/roles/openshift_aws/templates/user_data.j2 b/roles/openshift_aws/templates/user_data.j2
index bda1334cd..46e4e1cc5 100644
--- a/roles/openshift_aws/templates/user_data.j2
+++ b/roles/openshift_aws/templates/user_data.j2
@@ -20,6 +20,9 @@ runcmd:
- [ ansible-playbook, /root/openshift_bootstrap/bootstrap.yml]
{% endif %}
{% if openshift_aws_node_group.group != 'master' %}
+{# Restarting systemd-hostnamed ensures that instances will have FQDN
+hostnames following network restart. #}
+- [ systemctl, restart, systemd-hostnamed]
- [ systemctl, restart, NetworkManager]
- [ systemctl, enable, {% if openshift_deployment_type == 'openshift-enterprise' %}atomic-openshift{% else %}origin{% endif %}-node]
- [ systemctl, start, {% if openshift_deployment_type == 'openshift-enterprise' %}atomic-openshift{% else %}origin{% endif %}-node]
diff --git a/roles/openshift_facts/defaults/main.yml b/roles/openshift_facts/defaults/main.yml
index a223ffba6..3b381b1e4 100644
--- a/roles/openshift_facts/defaults/main.yml
+++ b/roles/openshift_facts/defaults/main.yml
@@ -104,3 +104,6 @@ openshift_service_type_dict:
openshift-enterprise: atomic-openshift
openshift_service_type: "{{ openshift_service_type_dict[openshift_deployment_type] }}"
+
+# Create a list of node labels (dict) for schedulable nodes
+openshift_schedulable_node_labels: "{{ groups['oo_nodes_to_config'] | lib_utils_oo_get_node_labels(hostvars) }}"
diff --git a/roles/openshift_logging_curator/tasks/main.yaml b/roles/openshift_logging_curator/tasks/main.yaml
index 6e8605d28..456a25082 100644
--- a/roles/openshift_logging_curator/tasks/main.yaml
+++ b/roles/openshift_logging_curator/tasks/main.yaml
@@ -14,6 +14,13 @@
- include_tasks: determine_version.yaml
+- name: Ensure that logging curator has nodes to run on
+ fail:
+ msg: |-
+ No schedulable nodes found matching node selector for logging curator - '{{ openshift_logging_curator_nodeselector }}'
+ when:
+ - openshift_schedulable_node_labels | lib_utils_oo_has_no_matching_selector(openshift_logging_curator_nodeselector)
+
# allow passing in a tempdir
- name: Create temp directory for doing work in
command: mktemp -d /tmp/openshift-logging-ansible-XXXXXX
diff --git a/roles/openshift_logging_elasticsearch/tasks/main.yaml b/roles/openshift_logging_elasticsearch/tasks/main.yaml
index 9db67ea9b..64e5a3a1f 100644
--- a/roles/openshift_logging_elasticsearch/tasks/main.yaml
+++ b/roles/openshift_logging_elasticsearch/tasks/main.yaml
@@ -1,4 +1,11 @@
---
+- name: Ensure that ElasticSearch has nodes to run on
+ fail:
+ msg: |-
+ No schedulable nodes found matching node selector for Elasticsearch - '{{ openshift_logging_es_nodeselector }}'
+ when:
+ - openshift_schedulable_node_labels | lib_utils_oo_has_no_matching_selector(openshift_logging_es_nodeselector)
+
- name: Validate Elasticsearch cluster size
fail: msg="The openshift_logging_es_cluster_size may only be scaled down manually. Please see official documentation on how to do this."
when: openshift_logging_facts.elasticsearch.deploymentconfigs | length > openshift_logging_es_cluster_size|int
@@ -18,8 +25,8 @@
- name: Set default image variables based on openshift_deployment_type
include_vars: "{{ var_file_name }}"
with_first_found:
- - "{{ openshift_deployment_type }}.yml"
- - "default_images.yml"
+ - "{{ openshift_deployment_type }}.yml"
+ - "default_images.yml"
loop_control:
loop_var: var_file_name
@@ -35,14 +42,14 @@
- set_fact:
full_restart_cluster: True
when:
- - _es_installed_version is defined
- - _es_installed_version.split('.')[0] | int < __es_version.split('.')[0] | int
+ - _es_installed_version is defined
+ - _es_installed_version.split('.')[0] | int < __es_version.split('.')[0] | int
- set_fact:
full_restart_cluster: True
when:
- - _es_ops_installed_version is defined
- - _es_ops_installed_version.split('.')[0] | int < __es_version.split('.')[0] | int
+ - _es_ops_installed_version is defined
+ - _es_ops_installed_version.split('.')[0] | int < __es_version.split('.')[0] | int
# allow passing in a tempdir
- name: Create temp directory for doing work in
@@ -78,7 +85,7 @@
name: "aggregated-logging-elasticsearch"
namespace: "{{ openshift_logging_elasticsearch_namespace }}"
when:
- - openshift_logging_image_pull_secret == ''
+ - openshift_logging_image_pull_secret == ''
# rolebinding reader
- name: Create rolebinding-reader role
@@ -86,9 +93,9 @@
state: present
name: rolebinding-reader
rules:
- - apiGroups: [""]
- resources: ["clusterrolebindings"]
- verbs: ["get"]
+ - apiGroups: [""]
+ resources: ["clusterrolebindings"]
+ verbs: ["get"]
# SA roles
- name: Set rolebinding-reader permissions for ES
@@ -128,8 +135,8 @@
- fail:
msg: "There was an error creating the logging-metrics-role and binding: {{prometheus_out}}"
when:
- - "prometheus_out.stderr | length > 0"
- - "'already exists' not in prometheus_out.stderr"
+ - "prometheus_out.stderr | length > 0"
+ - "'already exists' not in prometheus_out.stderr"
- set_fact:
_logging_metrics_proxy_passwd: "{{ 16 | lib_utils_oo_random_word | b64encode }}"
@@ -151,8 +158,8 @@
roleRef:
name: view
subjects:
- - kind: ServiceAccount
- name: aggregated-logging-elasticsearch
+ - kind: ServiceAccount
+ name: aggregated-logging-elasticsearch
changed_when: no
- name: Set logging-elasticsearch-view-role role
@@ -162,18 +169,18 @@
kind: rolebinding
namespace: "{{ openshift_logging_elasticsearch_namespace }}"
files:
- - "{{ tempdir }}/logging-elasticsearch-view-role.yaml"
+ - "{{ tempdir }}/logging-elasticsearch-view-role.yaml"
delete_after: true
# configmap
- assert:
that:
- - openshift_logging_elasticsearch_kibana_index_mode in __kibana_index_modes
+ - openshift_logging_elasticsearch_kibana_index_mode in __kibana_index_modes
msg: "The openshift_logging_elasticsearch_kibana_index_mode '{{ openshift_logging_elasticsearch_kibana_index_mode }}' only supports one of: {{ __kibana_index_modes | join(', ') }}"
- assert:
that:
- - "{{ openshift_logging_es_log_appenders | length > 0 }}"
+ - "{{ openshift_logging_es_log_appenders | length > 0 }}"
msg: "The openshift_logging_es_log_appenders '{{ openshift_logging_es_log_appenders }}' has an unrecognized option and only supports the following as a list: {{ __es_log_appenders | join(', ') }}"
- template:
@@ -189,81 +196,81 @@
# create diff between current configmap files and our current files
- when: not openshift_logging_es5_techpreview
block:
- - template:
- src: "{{ __base_file_dir }}/elasticsearch-logging.yml.j2"
- dest: "{{ tempdir }}/elasticsearch-logging.yml"
- vars:
- root_logger: "{{openshift_logging_es_log_appenders | join(', ')}}"
- changed_when: no
-
- - include_role:
- name: openshift_logging
- tasks_from: patch_configmap_files.yaml
- vars:
- configmap_name: "logging-elasticsearch"
- configmap_namespace: "logging"
- configmap_file_names:
- - current_file: "elasticsearch.yml"
- new_file: "{{ tempdir }}/elasticsearch.yml"
- protected_lines: ["number_of_shards", "number_of_replicas"]
- - current_file: "logging.yml"
- new_file: "{{ tempdir }}/elasticsearch-logging.yml"
-
- - name: Set ES configmap
- oc_configmap:
- state: present
- name: "{{ elasticsearch_name }}"
- namespace: "{{ openshift_logging_elasticsearch_namespace }}"
- from_file:
- elasticsearch.yml: "{{ tempdir }}/elasticsearch.yml"
- logging.yml: "{{ tempdir }}/elasticsearch-logging.yml"
- register: es_config_creation
- notify: "restart elasticsearch"
+ - template:
+ src: "{{ __base_file_dir }}/elasticsearch-logging.yml.j2"
+ dest: "{{ tempdir }}/elasticsearch-logging.yml"
+ vars:
+ root_logger: "{{openshift_logging_es_log_appenders | join(', ')}}"
+ changed_when: no
+
+ - include_role:
+ name: openshift_logging
+ tasks_from: patch_configmap_files.yaml
+ vars:
+ configmap_name: "logging-elasticsearch"
+ configmap_namespace: "logging"
+ configmap_file_names:
+ - current_file: "elasticsearch.yml"
+ new_file: "{{ tempdir }}/elasticsearch.yml"
+ protected_lines: ["number_of_shards", "number_of_replicas"]
+ - current_file: "logging.yml"
+ new_file: "{{ tempdir }}/elasticsearch-logging.yml"
+
+ - name: Set ES configmap
+ oc_configmap:
+ state: present
+ name: "{{ elasticsearch_name }}"
+ namespace: "{{ openshift_logging_elasticsearch_namespace }}"
+ from_file:
+ elasticsearch.yml: "{{ tempdir }}/elasticsearch.yml"
+ logging.yml: "{{ tempdir }}/elasticsearch-logging.yml"
+ register: es_config_creation
+ notify: "restart elasticsearch"
- when: openshift_logging_es5_techpreview | bool
block:
- - template:
- src: "{{ __base_file_dir }}/log4j2.properties.j2"
- dest: "{{ tempdir }}/log4j2.properties"
- vars:
- root_logger: "{{ openshift_logging_es_log_appenders | list }}"
- changed_when: no
-
- - include_role:
- name: openshift_logging
- tasks_from: patch_configmap_files.yaml
- vars:
- configmap_name: "logging-elasticsearch"
- configmap_namespace: "logging"
- configmap_file_names:
- - current_file: "elasticsearch.yml"
- new_file: "{{ tempdir }}/elasticsearch.yml"
- - current_file: "log4j2.properties"
- new_file: "{{ tempdir }}/log4j2.properties"
-
- - name: Set ES configmap
- oc_configmap:
- state: present
- name: "{{ elasticsearch_name }}"
- namespace: "{{ openshift_logging_elasticsearch_namespace }}"
- from_file:
- elasticsearch.yml: "{{ tempdir }}/elasticsearch.yml"
- log4j2.properties: "{{ tempdir }}/log4j2.properties"
- register: es_config_creation
- notify: "restart elasticsearch"
+ - template:
+ src: "{{ __base_file_dir }}/log4j2.properties.j2"
+ dest: "{{ tempdir }}/log4j2.properties"
+ vars:
+ root_logger: "{{ openshift_logging_es_log_appenders | list }}"
+ changed_when: no
+
+ - include_role:
+ name: openshift_logging
+ tasks_from: patch_configmap_files.yaml
+ vars:
+ configmap_name: "logging-elasticsearch"
+ configmap_namespace: "logging"
+ configmap_file_names:
+ - current_file: "elasticsearch.yml"
+ new_file: "{{ tempdir }}/elasticsearch.yml"
+ - current_file: "log4j2.properties"
+ new_file: "{{ tempdir }}/log4j2.properties"
+
+ - name: Set ES configmap
+ oc_configmap:
+ state: present
+ name: "{{ elasticsearch_name }}"
+ namespace: "{{ openshift_logging_elasticsearch_namespace }}"
+ from_file:
+ elasticsearch.yml: "{{ tempdir }}/elasticsearch.yml"
+ log4j2.properties: "{{ tempdir }}/log4j2.properties"
+ register: es_config_creation
+ notify: "restart elasticsearch"
- when: es_config_creation.changed | bool
block:
- - set_fact:
- _restart_logging_components: "{{ _restart_logging_components | default([]) + [es_component] | unique }}"
+ - set_fact:
+ _restart_logging_components: "{{ _restart_logging_components | default([]) + [es_component] | unique }}"
- - shell: >
- {{ openshift_client_binary }} get dc -l component="{{ es_component }}" -n "{{ openshift_logging_elasticsearch_namespace }}" -o name | cut -d'/' -f2
- register: _es_dcs
+ - shell: >
+ {{ openshift_client_binary }} get dc -l component="{{ es_component }}" -n "{{ openshift_logging_elasticsearch_namespace }}" -o name | cut -d'/' -f2
+ register: _es_dcs
- - set_fact:
- _restart_logging_nodes: "{{ _restart_logging_nodes | default([]) + [_es_dcs.stdout] | unique }}"
- when: _es_dcs.stdout != ""
+ - set_fact:
+ _restart_logging_nodes: "{{ _restart_logging_nodes | default([]) + [_es_dcs.stdout] | unique }}"
+ when: _es_dcs.stdout != ""
# secret
- name: Set ES secret
@@ -272,24 +279,24 @@
name: "logging-elasticsearch"
namespace: "{{ openshift_logging_elasticsearch_namespace }}"
files:
- - name: key
- path: "{{ generated_certs_dir }}/logging-es.jks"
- - name: truststore
- path: "{{ generated_certs_dir }}/truststore.jks"
- - name: searchguard.key
- path: "{{ generated_certs_dir }}/elasticsearch.jks"
- - name: searchguard.truststore
- path: "{{ generated_certs_dir }}/truststore.jks"
- - name: admin-key
- path: "{{ generated_certs_dir }}/system.admin.key"
- - name: admin-cert
- path: "{{ generated_certs_dir }}/system.admin.crt"
- - name: admin-ca
- path: "{{ generated_certs_dir }}/ca.crt"
- - name: admin.jks
- path: "{{ generated_certs_dir }}/system.admin.jks"
- - name: passwd.yml
- path: "{{mktemp.stdout}}/passwd.yml"
+ - name: key
+ path: "{{ generated_certs_dir }}/logging-es.jks"
+ - name: truststore
+ path: "{{ generated_certs_dir }}/truststore.jks"
+ - name: searchguard.key
+ path: "{{ generated_certs_dir }}/elasticsearch.jks"
+ - name: searchguard.truststore
+ path: "{{ generated_certs_dir }}/truststore.jks"
+ - name: admin-key
+ path: "{{ generated_certs_dir }}/system.admin.key"
+ - name: admin-cert
+ path: "{{ generated_certs_dir }}/system.admin.crt"
+ - name: admin-ca
+ path: "{{ generated_certs_dir }}/ca.crt"
+ - name: admin.jks
+ path: "{{ generated_certs_dir }}/system.admin.jks"
+ - name: passwd.yml
+ path: "{{mktemp.stdout}}/passwd.yml"
# services
- name: Set logging-{{ es_component }}-cluster service
@@ -303,7 +310,7 @@
labels:
logging-infra: 'support'
ports:
- - port: 9300
+ - port: 9300
- name: Set logging-{{ es_component }} service
oc_service:
@@ -316,8 +323,8 @@
labels:
logging-infra: 'support'
ports:
- - port: 9200
- targetPort: "restapi"
+ - port: 9200
+ targetPort: "restapi"
- name: Set logging-{{ es_component}}-prometheus service
oc_service:
@@ -327,9 +334,9 @@
labels:
logging-infra: 'support'
ports:
- - name: proxy
- port: 443
- targetPort: 4443
+ - name: proxy
+ port: 443
+ targetPort: 4443
selector:
component: "{{ es_component }}"
provider: openshift
@@ -357,46 +364,46 @@
# so we check for the presence of 'stderr' to determine if the obj exists or not
# the RC for existing and not existing is both 0
- when:
- - logging_elasticsearch_pvc.results.stderr is defined
- - openshift_logging_elasticsearch_storage_type == "pvc"
+ - logging_elasticsearch_pvc.results.stderr is defined
+ - openshift_logging_elasticsearch_storage_type == "pvc"
block:
- # storageclasses are used by default but if static then disable
- # storageclasses with the storageClassName set to "" in pvc.j2
- - name: Creating ES storage template - static
- template:
- src: "{{ __base_file_dir }}/pvc.j2"
- dest: "{{ tempdir }}/templates/logging-es-pvc.yml"
- vars:
- obj_name: "{{ openshift_logging_elasticsearch_pvc_name }}"
- size: "{{ (openshift_logging_elasticsearch_pvc_size | trim | length == 0) | ternary('10Gi', openshift_logging_elasticsearch_pvc_size) }}"
- access_modes: "{{ openshift_logging_elasticsearch_pvc_access_modes | list }}"
- pv_selector: "{{ openshift_logging_elasticsearch_pvc_pv_selector }}"
- storage_class_name: "{{ openshift_logging_elasticsearch_pvc_storage_class_name | default('', true) }}"
- when:
- - not openshift_logging_elasticsearch_pvc_dynamic | bool
-
- # Storageclasses are used by default if configured
- - name: Creating ES storage template - dynamic
- template:
- src: "{{ __base_file_dir }}/pvc.j2"
- dest: "{{ tempdir }}/templates/logging-es-pvc.yml"
- vars:
- obj_name: "{{ openshift_logging_elasticsearch_pvc_name }}"
- size: "{{ (openshift_logging_elasticsearch_pvc_size | trim | length == 0) | ternary('10Gi', openshift_logging_elasticsearch_pvc_size) }}"
- access_modes: "{{ openshift_logging_elasticsearch_pvc_access_modes | list }}"
- pv_selector: "{{ openshift_logging_elasticsearch_pvc_pv_selector }}"
- when:
- - openshift_logging_elasticsearch_pvc_dynamic | bool
-
- - name: Set ES storage
- oc_obj:
- state: present
- kind: pvc
- name: "{{ openshift_logging_elasticsearch_pvc_name }}"
- namespace: "{{ openshift_logging_elasticsearch_namespace }}"
- files:
- - "{{ tempdir }}/templates/logging-es-pvc.yml"
- delete_after: true
+ # storageclasses are used by default but if static then disable
+ # storageclasses with the storageClassName set to "" in pvc.j2
+ - name: Creating ES storage template - static
+ template:
+ src: "{{ __base_file_dir }}/pvc.j2"
+ dest: "{{ tempdir }}/templates/logging-es-pvc.yml"
+ vars:
+ obj_name: "{{ openshift_logging_elasticsearch_pvc_name }}"
+ size: "{{ (openshift_logging_elasticsearch_pvc_size | trim | length == 0) | ternary('10Gi', openshift_logging_elasticsearch_pvc_size) }}"
+ access_modes: "{{ openshift_logging_elasticsearch_pvc_access_modes | list }}"
+ pv_selector: "{{ openshift_logging_elasticsearch_pvc_pv_selector }}"
+ storage_class_name: "{{ openshift_logging_elasticsearch_pvc_storage_class_name | default('', true) }}"
+ when:
+ - not openshift_logging_elasticsearch_pvc_dynamic | bool
+
+ # Storageclasses are used by default if configured
+ - name: Creating ES storage template - dynamic
+ template:
+ src: "{{ __base_file_dir }}/pvc.j2"
+ dest: "{{ tempdir }}/templates/logging-es-pvc.yml"
+ vars:
+ obj_name: "{{ openshift_logging_elasticsearch_pvc_name }}"
+ size: "{{ (openshift_logging_elasticsearch_pvc_size | trim | length == 0) | ternary('10Gi', openshift_logging_elasticsearch_pvc_size) }}"
+ access_modes: "{{ openshift_logging_elasticsearch_pvc_access_modes | list }}"
+ pv_selector: "{{ openshift_logging_elasticsearch_pvc_pv_selector }}"
+ when:
+ - openshift_logging_elasticsearch_pvc_dynamic | bool
+
+ - name: Set ES storage
+ oc_obj:
+ state: present
+ kind: pvc
+ name: "{{ openshift_logging_elasticsearch_pvc_name }}"
+ namespace: "{{ openshift_logging_elasticsearch_namespace }}"
+ files:
+ - "{{ tempdir }}/templates/logging-es-pvc.yml"
+ delete_after: true
- set_fact:
es_deploy_name: "logging-{{ es_component }}-{{ openshift_logging_elasticsearch_deployment_type }}-{{ 8 | lib_utils_oo_random_word('abcdefghijklmnopqrstuvwxyz0123456789') }}"
@@ -437,7 +444,7 @@
namespace: "{{ openshift_logging_elasticsearch_namespace }}"
kind: dc
files:
- - "{{ tempdir }}/templates/logging-es-dc.yml"
+ - "{{ tempdir }}/templates/logging-es-dc.yml"
delete_after: true
register: es_dc_creation
notify: "restart elasticsearch"
@@ -452,37 +459,37 @@
src: "{{ generated_certs_dir }}/{{ item.file }}"
register: key_pairs
with_items:
- - { name: "ca_file", file: "ca.crt" }
- - { name: "es_key", file: "system.logging.es.key" }
- - { name: "es_cert", file: "system.logging.es.crt" }
+ - { name: "ca_file", file: "ca.crt" }
+ - { name: "es_key", file: "system.logging.es.key" }
+ - { name: "es_cert", file: "system.logging.es.crt" }
when: openshift_logging_es_allow_external | bool
- set_fact:
es_key: "{{ lookup('file', openshift_logging_es_key) | b64encode }}"
when:
- - openshift_logging_es_key | trim | length > 0
- - openshift_logging_es_allow_external | bool
+ - openshift_logging_es_key | trim | length > 0
+ - openshift_logging_es_allow_external | bool
changed_when: false
- set_fact:
es_cert: "{{ lookup('file', openshift_logging_es_cert) | b64encode }}"
when:
- - openshift_logging_es_cert | trim | length > 0
- - openshift_logging_es_allow_external | bool
+ - openshift_logging_es_cert | trim | length > 0
+ - openshift_logging_es_allow_external | bool
changed_when: false
- set_fact:
es_ca: "{{ lookup('file', openshift_logging_es_ca_ext) | b64encode }}"
when:
- - openshift_logging_es_ca_ext | trim | length > 0
- - openshift_logging_es_allow_external | bool
+ - openshift_logging_es_ca_ext | trim | length > 0
+ - openshift_logging_es_allow_external | bool
changed_when: false
- set_fact:
es_ca: "{{ key_pairs | entry_from_named_pair('ca_file') }}"
when:
- - es_ca is not defined
- - openshift_logging_es_allow_external | bool
+ - es_ca is not defined
+ - openshift_logging_es_allow_external | bool
changed_when: false
- name: Generating Elasticsearch {{ es_component }} route template
@@ -513,7 +520,7 @@
namespace: "{{ openshift_logging_elasticsearch_namespace }}"
kind: route
files:
- - "{{ tempdir }}/templates/logging-{{ es_component }}-route.yaml"
+ - "{{ tempdir }}/templates/logging-{{ es_component }}-route.yaml"
when: openshift_logging_es_allow_external | bool
## Placeholder for migration when necessary ##
diff --git a/roles/openshift_logging_eventrouter/tasks/install_eventrouter.yaml b/roles/openshift_logging_eventrouter/tasks/install_eventrouter.yaml
index fffdd9f8b..2adc51a16 100644
--- a/roles/openshift_logging_eventrouter/tasks/install_eventrouter.yaml
+++ b/roles/openshift_logging_eventrouter/tasks/install_eventrouter.yaml
@@ -4,6 +4,13 @@
msg: Invalid sink type "{{openshift_logging_eventrouter_sink}}", only one of "{{__eventrouter_sinks}}" allowed
that: openshift_logging_eventrouter_sink in __eventrouter_sinks
+- name: Ensure that logging eventrouter has nodes to run on
+ fail:
+ msg: |-
+ No schedulable nodes found matching node selector for logging EventRouter - '{{ openshift_logging_eventrouter_nodeselector }}'
+ when:
+ - openshift_schedulable_node_labels | lib_utils_oo_has_no_matching_selector(openshift_logging_eventrouter_nodeselector)
+
# allow passing in a tempdir
- name: Create temp directory for doing work in
command: mktemp -d /tmp/openshift-logging-ansible-XXXXXX
diff --git a/roles/openshift_logging_kibana/tasks/main.yaml b/roles/openshift_logging_kibana/tasks/main.yaml
index 58edc5ce5..7b6bc02e1 100644
--- a/roles/openshift_logging_kibana/tasks/main.yaml
+++ b/roles/openshift_logging_kibana/tasks/main.yaml
@@ -8,6 +8,13 @@
loop_control:
loop_var: var_file_name
+- name: Ensure that Kibana has nodes to run on
+ fail:
+ msg: |-
+ No schedulable nodes found matching node selector for Kibana - '{{ openshift_logging_kibana_nodeselector }}'
+ when:
+ - openshift_schedulable_node_labels | lib_utils_oo_has_no_matching_selector(openshift_logging_kibana_nodeselector)
+
- name: Set kibana image facts
set_fact:
openshift_logging_kibana_image_prefix: "{{ openshift_logging_kibana_image_prefix | default(__openshift_logging_kibana_image_prefix) }}"
diff --git a/roles/openshift_logging_mux/tasks/main.yaml b/roles/openshift_logging_mux/tasks/main.yaml
index b2699b285..f810f3606 100644
--- a/roles/openshift_logging_mux/tasks/main.yaml
+++ b/roles/openshift_logging_mux/tasks/main.yaml
@@ -7,11 +7,18 @@
msg: Operations logs destination is required
when: not openshift_logging_mux_ops_host or openshift_logging_mux_ops_host == ''
+- name: Ensure that logging mux has nodes to run on
+ fail:
+ msg: |-
+ No schedulable nodes found matching node selector for logging mux - '{{ openshift_logging_mux_nodeselector }}'
+ when:
+ - openshift_schedulable_node_labels | lib_utils_oo_has_no_matching_selector(openshift_logging_mux_nodeselector)
+
- name: Set default image variables based on openshift_deployment_type
include_vars: "{{ var_file_name }}"
with_first_found:
- - "{{ openshift_deployment_type }}.yml"
- - "default_images.yml"
+ - "{{ openshift_deployment_type }}.yml"
+ - "default_images.yml"
loop_control:
loop_var: var_file_name
@@ -55,7 +62,7 @@
name: "aggregated-logging-mux"
namespace: "{{ openshift_logging_mux_namespace }}"
when:
- - openshift_logging_image_pull_secret == ''
+ - openshift_logging_image_pull_secret == ''
# set service account scc
- name: Set privileged permissions for Mux
@@ -102,10 +109,10 @@
configmap_name: "logging-mux"
configmap_namespace: "{{ openshift_logging_mux_namespace }}"
configmap_file_names:
- - current_file: "fluent.conf"
- new_file: "{{ tempdir }}/fluent-mux.conf"
- - current_file: "secure-forward.conf"
- new_file: "{{ tempdir }}/secure-forward-mux.conf"
+ - current_file: "fluent.conf"
+ new_file: "{{ tempdir }}/fluent-mux.conf"
+ - current_file: "secure-forward.conf"
+ new_file: "{{ tempdir }}/secure-forward-mux.conf"
- name: Set Mux configmap
oc_configmap:
@@ -123,14 +130,14 @@
name: logging-mux
namespace: "{{ openshift_logging_mux_namespace }}"
files:
- - name: ca
- path: "{{ generated_certs_dir }}/ca.crt"
- - name: key
- path: "{{ generated_certs_dir }}/system.logging.mux.key"
- - name: cert
- path: "{{ generated_certs_dir }}/system.logging.mux.crt"
- - name: shared_key
- path: "{{ generated_certs_dir }}/mux_shared_key"
+ - name: ca
+ path: "{{ generated_certs_dir }}/ca.crt"
+ - name: key
+ path: "{{ generated_certs_dir }}/system.logging.mux.key"
+ - name: cert
+ path: "{{ generated_certs_dir }}/system.logging.mux.crt"
+ - name: shared_key
+ path: "{{ generated_certs_dir }}/mux_shared_key"
# services
- name: Set logging-mux service for external communication
@@ -144,11 +151,11 @@
labels:
logging-infra: 'support'
ports:
- - name: mux-forward
- port: "{{ openshift_logging_mux_port }}"
- targetPort: "mux-forward"
+ - name: mux-forward
+ port: "{{ openshift_logging_mux_port }}"
+ targetPort: "mux-forward"
external_ips:
- - "{{ openshift_logging_mux_external_address }}"
+ - "{{ openshift_logging_mux_external_address }}"
when: openshift_logging_mux_allow_external | bool
- name: Set logging-mux service for internal communication
@@ -162,9 +169,9 @@
labels:
logging-infra: 'support'
ports:
- - name: mux-forward
- port: "{{ openshift_logging_mux_port }}"
- targetPort: "mux-forward"
+ - name: mux-forward
+ port: "{{ openshift_logging_mux_port }}"
+ targetPort: "mux-forward"
when: not openshift_logging_mux_allow_external | bool
# create Mux DC
@@ -199,7 +206,7 @@
selector: "{{ openshift_logging_mux_file_buffer_pvc_pv_selector }}"
storage_class_name: "{{ openshift_logging_mux_file_buffer_pvc_storage_class_name | default('', true) }}"
when:
- - openshift_logging_mux_file_buffer_storage_type == "pvc"
+ - openshift_logging_mux_file_buffer_storage_type == "pvc"
- name: Set logging-mux DC
oc_obj:
@@ -208,7 +215,7 @@
namespace: "{{ openshift_logging_mux_namespace }}"
kind: dc
files:
- - "{{ tempdir }}/templates/logging-mux-dc.yaml"
+ - "{{ tempdir }}/templates/logging-mux-dc.yaml"
delete_after: true
- name: Add mux namespaces
diff --git a/roles/openshift_metrics/tasks/install_cassandra.yaml b/roles/openshift_metrics/tasks/install_cassandra.yaml
index 158e596ec..e0b37ac26 100644
--- a/roles/openshift_metrics/tasks/install_cassandra.yaml
+++ b/roles/openshift_metrics/tasks/install_cassandra.yaml
@@ -1,4 +1,11 @@
---
+- name: Ensure that Cassandra has nodes to run on
+ fail:
+ msg: |-
+ No schedulable nodes found matching node selector for cassandra - '{{ openshift_metrics_cassandra_nodeselector }}'
+ when:
+ - openshift_schedulable_node_labels | lib_utils_oo_has_no_matching_selector(openshift_metrics_cassandra_nodeselector)
+
- shell: >
{{ openshift_client_binary }} -n {{ openshift_metrics_project | quote }}
--config={{ mktemp.stdout }}/admin.kubeconfig
diff --git a/roles/openshift_metrics/tasks/install_hawkular.yaml b/roles/openshift_metrics/tasks/install_hawkular.yaml
index f45e7a042..de4e89a01 100644
--- a/roles/openshift_metrics/tasks/install_hawkular.yaml
+++ b/roles/openshift_metrics/tasks/install_hawkular.yaml
@@ -1,4 +1,11 @@
---
+- name: Ensure that Hawkular has nodes to run on
+ fail:
+ msg: |-
+ No schedulable nodes found matching node selector for hawkular - '{{ openshift_metrics_hawkular_nodeselector }}'
+ when:
+ - openshift_schedulable_node_labels | lib_utils_oo_has_no_matching_selector(openshift_metrics_hawkular_nodeselector)
+
- command: >
{{ openshift_client_binary }} -n {{ openshift_metrics_project | quote }}
--config={{ mktemp.stdout }}/admin.kubeconfig
diff --git a/roles/openshift_metrics/tasks/install_heapster.yaml b/roles/openshift_metrics/tasks/install_heapster.yaml
index 73e7454f0..e4ddf98ff 100644
--- a/roles/openshift_metrics/tasks/install_heapster.yaml
+++ b/roles/openshift_metrics/tasks/install_heapster.yaml
@@ -1,4 +1,11 @@
---
+- name: Ensure that Heapster has nodes to run on
+ fail:
+ msg: |-
+ No schedulable nodes found matching node selector for heapster - '{{ openshift_metrics_heapster_nodeselector }}'
+ when:
+ - openshift_schedulable_node_labels | lib_utils_oo_has_no_matching_selector(openshift_metrics_heapster_nodeselector)
+
- command: >
{{ openshift_client_binary }} -n {{ openshift_metrics_project | quote }}
--config={{ mktemp.stdout }}/admin.kubeconfig
@@ -20,8 +27,8 @@
- set_fact:
heapster_sa_secrets: "{{ heapster_sa_secrets + [item] }}"
with_items:
- - hawkular-metrics-certs
- - hawkular-metrics-account
+ - hawkular-metrics-certs
+ - hawkular-metrics-account
when: not openshift_metrics_heapster_standalone | bool
- name: Generating serviceaccount for heapster
@@ -38,7 +45,7 @@
vars:
obj_name: heapster
ports:
- - {port: 80, targetPort: http-endpoint}
+ - {port: 80, targetPort: http-endpoint}
selector:
name: "{{obj_name}}"
annotations:
@@ -61,9 +68,9 @@
kind: ClusterRole
name: cluster-reader
subjects:
- - kind: ServiceAccount
- name: heapster
- namespace: "{{ openshift_metrics_project }}"
+ - kind: ServiceAccount
+ name: heapster
+ namespace: "{{ openshift_metrics_project }}"
changed_when: no
- include_tasks: generate_heapster_secrets.yaml
diff --git a/roles/openshift_metrics/tasks/install_hosa.yaml b/roles/openshift_metrics/tasks/install_hosa.yaml
index 7c9bc26d0..3624cb5ab 100644
--- a/roles/openshift_metrics/tasks/install_hosa.yaml
+++ b/roles/openshift_metrics/tasks/install_hosa.yaml
@@ -1,4 +1,11 @@
---
+- name: Ensure that Hawkular agent has nodes to run on
+ fail:
+ msg: |-
+ No schedulable nodes found matching node selector for Hawkular agent - '{{ openshift_metrics_hawkular_agent_nodeselector }}'
+ when:
+ - openshift_schedulable_node_labels | lib_utils_oo_has_no_matching_selector(openshift_metrics_hawkular_agent_nodeselector)
+
- name: Generate Hawkular Agent (HOSA) Cluster Role
template:
src: hawkular_openshift_agent_role.j2
@@ -38,7 +45,7 @@
kind: ClusterRole
name: hawkular-openshift-agent
subjects:
- - kind: ServiceAccount
- name: hawkular-openshift-agent
- namespace: "{{openshift_metrics_hawkular_agent_namespace}}"
+ - kind: ServiceAccount
+ name: hawkular-openshift-agent
+ namespace: "{{openshift_metrics_hawkular_agent_namespace}}"
changed_when: no
diff --git a/roles/openshift_node/files/bootstrap.yml b/roles/openshift_node/files/bootstrap.yml
index a5545c81b..ea280640f 100644
--- a/roles/openshift_node/files/bootstrap.yml
+++ b/roles/openshift_node/files/bootstrap.yml
@@ -61,11 +61,3 @@
with_items:
- line: "BOOTSTRAP_CONFIG_NAME=node-config-{{ openshift_group_type }}"
regexp: "^BOOTSTRAP_CONFIG_NAME=.*"
-
- - name: "Start the {{ openshift_service_type }}-node service"
- systemd:
- daemon_reload: yes
- state: restarted
- enabled: True
- name: "{{ openshift_service_type }}-node"
- no_block: true
diff --git a/roles/openshift_node/files/networkmanager/99-origin-dns.sh b/roles/openshift_node/files/networkmanager/99-origin-dns.sh
index f4e48b5b7..acf3e2f38 100755
--- a/roles/openshift_node/files/networkmanager/99-origin-dns.sh
+++ b/roles/openshift_node/files/networkmanager/99-origin-dns.sh
@@ -116,8 +116,9 @@ EOF
echo "nameserver "${def_route_ip}"" >> ${NEW_RESOLV_CONF}
if ! grep -qw search ${NEW_RESOLV_CONF}; then
echo 'search cluster.local' >> ${NEW_RESOLV_CONF}
- elif ! grep -q 'search.*cluster.local' ${NEW_RESOLV_CONF}; then
- sed -i '/^search/ s/$/ cluster.local/' ${NEW_RESOLV_CONF}
+ elif ! grep -q 'search cluster.local' ${NEW_RESOLV_CONF}; then
+ # cluster.local should be in first three DNS names so that glibc resolver would work
+ sed -i -e 's/^search \(.\+\)\( cluster\.local\)\{0,1\}$/search cluster.local \1/' ${NEW_RESOLV_CONF}
fi
cp -Z ${NEW_RESOLV_CONF} /etc/resolv.conf
fi
diff --git a/roles/openshift_openstack/defaults/main.yml b/roles/openshift_openstack/defaults/main.yml
index 2bdb81632..75f1300e1 100644
--- a/roles/openshift_openstack/defaults/main.yml
+++ b/roles/openshift_openstack/defaults/main.yml
@@ -55,7 +55,9 @@ openshift_openstack_app_subdomain: "apps"
# heat vars
openshift_openstack_clusterid: openshift
openshift_openstack_stack_name: "{{ openshift_openstack_clusterid }}.{{ openshift_openstack_public_dns_domain }}"
-openshift_openstack_subnet_prefix: "192.168.99"
+openshift_openstack_subnet_cidr: "192.168.99.0/24"
+openshift_openstack_pool_start: "192.168.99.3"
+openshift_openstack_pool_end: "192.168.99.254"
openshift_openstack_master_hostname: master
openshift_openstack_infra_hostname: infra-node
openshift_openstack_cns_hostname: cns
@@ -94,6 +96,8 @@ openshift_openstack_etcd_volume_size: 2
openshift_openstack_lb_volume_size: 5
openshift_openstack_ephemeral_volumes: false
+# User commands for cloud-init executed on all Nova servers provisioned
+openshift_openstack_provision_user_commands: []
# cloud-config
openshift_openstack_disable_root: true
diff --git a/roles/openshift_openstack/templates/heat_stack.yaml.j2 b/roles/openshift_openstack/templates/heat_stack.yaml.j2
index 1d3173022..b62cb2bc8 100644
--- a/roles/openshift_openstack/templates/heat_stack.yaml.j2
+++ b/roles/openshift_openstack/templates/heat_stack.yaml.j2
@@ -78,22 +78,10 @@ resources:
params:
cluster_id: {{ openshift_openstack_stack_name }}
network: { get_resource: net }
- cidr:
- str_replace:
- template: subnet_24_prefix.0/24
- params:
- subnet_24_prefix: {{ openshift_openstack_subnet_prefix }}
+ cidr: {{ openshift_openstack_subnet_cidr }}
allocation_pools:
- - start:
- str_replace:
- template: subnet_24_prefix.3
- params:
- subnet_24_prefix: {{ openshift_openstack_subnet_prefix }}
- end:
- str_replace:
- template: subnet_24_prefix.254
- params:
- subnet_24_prefix: {{ openshift_openstack_subnet_prefix }}
+ - start: {{ openshift_openstack_pool_start }}
+ end: {{ openshift_openstack_pool_end }}
dns_nameservers:
{% for nameserver in openshift_openstack_dns_nameservers %}
- {{ nameserver }}
@@ -261,7 +249,7 @@ resources:
protocol: tcp
port_range_min: 30000
port_range_max: 32767
- remote_ip_prefix: "{{ openshift_openstack_subnet_prefix }}.0/24"
+ remote_ip_prefix: "{{ openshift_openstack_subnet_cidr }}"
{% else %}
master-secgrp:
type: OS::Neutron::SecurityGroup
@@ -393,7 +381,7 @@ resources:
protocol: tcp
port_range_min: 30000
port_range_max: 32767
- remote_ip_prefix: "{{ openshift_openstack_subnet_prefix }}.0/24"
+ remote_ip_prefix: "{{ openshift_openstack_subnet_cidr }}"
{% endif %}
infra-secgrp:
diff --git a/roles/openshift_openstack/templates/user_data.j2 b/roles/openshift_openstack/templates/user_data.j2
index ccaa5d464..1ca87a429 100644
--- a/roles/openshift_openstack/templates/user_data.j2
+++ b/roles/openshift_openstack/templates/user_data.j2
@@ -11,3 +11,19 @@ write_files:
permissions: 440
content: |
Defaults:openshift !requiretty
+
+{% if openshift_openstack_provision_user_commands %}
+ - path: /root/ansible_install.sh
+ permissions: '0544'
+ content: |
+{% for cmd in openshift_openstack_provision_user_commands %}
+{% if cmd is string %}
+ {{ cmd }}
+{% elif cmd is iterable %}
+ {{ cmd|join(' ') }}
+{% endif %}
+{% endfor %}
+
+runcmd:
+ - /root/ansible_install.sh
+{% endif %}
diff --git a/roles/openshift_prometheus/tasks/install_prometheus.yaml b/roles/openshift_prometheus/tasks/install_prometheus.yaml
index 0b565502f..5a8228bc4 100644
--- a/roles/openshift_prometheus/tasks/install_prometheus.yaml
+++ b/roles/openshift_prometheus/tasks/install_prometheus.yaml
@@ -2,6 +2,13 @@
# set facts
- include_tasks: facts.yaml
+- name: Ensure that Prometheus has nodes to run on
+ fail:
+ msg: |-
+ No schedulable nodes found matching node selector for Prometheus - '{{ openshift_prometheus_node_selector }}'
+ when:
+ - openshift_schedulable_node_labels | lib_utils_oo_has_no_matching_selector(openshift_prometheus_node_selector)
+
# namespace
- name: Add prometheus project
oc_project:
@@ -17,12 +24,12 @@
name: "{{ item }}-proxy"
namespace: "{{ openshift_prometheus_namespace }}"
contents:
- - path: session_secret
- data: "{{ 43 | lib_utils_oo_random_word }}="
+ - path: session_secret
+ data: "{{ 43 | lib_utils_oo_random_word }}="
with_items:
- - prometheus
- - alerts
- - alertmanager
+ - prometheus
+ - alerts
+ - alertmanager
# serviceaccount
- name: create prometheus serviceaccount
@@ -62,10 +69,10 @@
oprometheus.io/scheme: https
service.alpha.openshift.io/serving-cert-secret-name: prometheus-tls
ports:
- - name: prometheus
- port: "{{ openshift_prometheus_service_port }}"
- targetPort: "{{ openshift_prometheus_service_targetport }}"
- protocol: TCP
+ - name: prometheus
+ port: "{{ openshift_prometheus_service_port }}"
+ targetPort: "{{ openshift_prometheus_service_targetport }}"
+ protocol: TCP
selector:
app: prometheus
@@ -78,10 +85,10 @@
annotations:
service.alpha.openshift.io/serving-cert-secret-name: alerts-tls
ports:
- - name: prometheus
- port: "{{ openshift_prometheus_service_port }}"
- targetPort: "{{ openshift_prometheus_alerts_service_targetport }}"
- protocol: TCP
+ - name: prometheus
+ port: "{{ openshift_prometheus_service_port }}"
+ targetPort: "{{ openshift_prometheus_alerts_service_targetport }}"
+ protocol: TCP
selector:
app: prometheus
@@ -94,10 +101,10 @@
annotations:
service.alpha.openshift.io/serving-cert-secret-name: alertmanager-tls
ports:
- - name: prometheus
- port: "{{ openshift_prometheus_service_port }}"
- targetPort: "{{ openshift_prometheus_alertmanager_service_targetport }}"
- protocol: TCP
+ - name: prometheus
+ port: "{{ openshift_prometheus_service_port }}"
+ targetPort: "{{ openshift_prometheus_alertmanager_service_targetport }}"
+ protocol: TCP
selector:
app: prometheus
@@ -112,12 +119,12 @@
service_name: "{{ item.name }}"
tls_termination: reencrypt
with_items:
- - name: prometheus
- host: "{{ openshift_prometheus_hostname }}"
- - name: alerts
- host: "{{ openshift_prometheus_alerts_hostname }}"
- - name: alertmanager
- host: "{{ openshift_prometheus_alertmanager_hostname }}"
+ - name: prometheus
+ host: "{{ openshift_prometheus_hostname }}"
+ - name: alerts
+ host: "{{ openshift_prometheus_alerts_hostname }}"
+ - name: alertmanager
+ host: "{{ openshift_prometheus_alertmanager_hostname }}"
# Storage
- name: create prometheus pvc
@@ -157,9 +164,9 @@
src: "{{ openshift_prometheus_additional_rules_file }}"
dest: "{{ tempdir }}/prometheus.additional.rules"
when:
- - openshift_prometheus_additional_rules_file is defined
- - openshift_prometheus_additional_rules_file is not none
- - openshift_prometheus_additional_rules_file | trim | length > 0
+ - openshift_prometheus_additional_rules_file is defined
+ - openshift_prometheus_additional_rules_file is not none
+ - openshift_prometheus_additional_rules_file | trim | length > 0
- stat:
path: "{{ tempdir }}/prometheus.additional.rules"
@@ -227,5 +234,5 @@
namespace: "{{ openshift_prometheus_namespace }}"
kind: statefulset
files:
- - "{{ tempdir }}/templates/prometheus.yaml"
+ - "{{ tempdir }}/templates/prometheus.yaml"
delete_after: true
diff --git a/roles/openshift_provisioners/tasks/install_provisioners.yaml b/roles/openshift_provisioners/tasks/install_provisioners.yaml
index 2d1217c74..1be498489 100644
--- a/roles/openshift_provisioners/tasks/install_provisioners.yaml
+++ b/roles/openshift_provisioners/tasks/install_provisioners.yaml
@@ -15,6 +15,13 @@
fail: msg='the openshift_provisioners_efs_aws_secret_access_key variable is required'
when: (openshift_provisioners_efs | bool) and openshift_provisioners_efs_aws_secret_access_key is not defined
+- name: Ensure that provisioners have nodes to run on
+ fail:
+ msg: |-
+ No schedulable nodes found matching node selector for Prometheus - '{{ openshift_provisioners_efs_nodeselector }}'
+ when:
+ - openshift_schedulable_node_labels | lib_utils_oo_has_no_matching_selector(openshift_provisioners_efs_nodeselector)
+
- name: Install support
include_tasks: install_support.yaml
@@ -34,10 +41,10 @@
- name: Create objects
include_tasks: oc_apply.yaml
vars:
- - kubeconfig: "{{ mktemp.stdout }}/admin.kubeconfig"
- - namespace: "{{ openshift_provisioners_project }}"
- - file_name: "{{ file.source }}"
- - file_content: "{{ file.content | b64decode | from_yaml }}"
+ - kubeconfig: "{{ mktemp.stdout }}/admin.kubeconfig"
+ - namespace: "{{ openshift_provisioners_project }}"
+ - file_name: "{{ file.source }}"
+ - file_content: "{{ file.content | b64decode | from_yaml }}"
with_items: "{{ object_defs.results }}"
loop_control:
loop_var: file
diff --git a/roles/openshift_service_catalog/tasks/install.yml b/roles/openshift_service_catalog/tasks/install.yml
index 4d06c1872..96fa4a93e 100644
--- a/roles/openshift_service_catalog/tasks/install.yml
+++ b/roles/openshift_service_catalog/tasks/install.yml
@@ -1,6 +1,5 @@
---
# do any asserts here
-
- name: Create temp directory for doing work in
command: mktemp -d /tmp/openshift-service-catalog-ansible-XXXXXX
register: mktemp
@@ -9,8 +8,8 @@
- name: Set default image variables based on openshift_deployment_type
include_vars: "{{ item }}"
with_first_found:
- - "{{ openshift_deployment_type }}.yml"
- - "default_images.yml"
+ - "{{ openshift_deployment_type }}.yml"
+ - "default_images.yml"
- name: Set service_catalog image facts
set_fact:
@@ -25,20 +24,20 @@
- when: os_sdn_network_plugin_name == 'redhat/openshift-ovs-multitenant'
block:
- - name: Waiting for netnamespace kube-service-catalog to be ready
- oc_obj:
- kind: netnamespace
- name: kube-service-catalog
- state: list
- register: get_output
- until: not get_output.results.stderr is defined
- retries: 30
- delay: 1
- changed_when: false
-
- - name: Make kube-service-catalog project network global
- command: >
- {{ openshift_client_binary }} --config=/etc/origin/master/admin.kubeconfig adm pod-network make-projects-global kube-service-catalog
+ - name: Waiting for netnamespace kube-service-catalog to be ready
+ oc_obj:
+ kind: netnamespace
+ name: kube-service-catalog
+ state: list
+ register: get_output
+ until: not get_output.results.stderr is defined
+ retries: 30
+ delay: 1
+ changed_when: false
+
+ - name: Make kube-service-catalog project network global
+ command: >
+ {{ openshift_client_binary }} --config=/etc/origin/master/admin.kubeconfig adm pod-network make-projects-global kube-service-catalog
- include_tasks: generate_certs.yml
@@ -51,7 +50,7 @@
kind: template
namespace: "kube-service-catalog"
files:
- - "{{ mktemp.stdout }}/kubeservicecatalog_roles_bindings.yml"
+ - "{{ mktemp.stdout }}/kubeservicecatalog_roles_bindings.yml"
- oc_process:
create: True
@@ -67,7 +66,7 @@
kind: template
namespace: kube-system
files:
- - "{{ mktemp.stdout }}/kubesystem_roles_bindings.yml"
+ - "{{ mktemp.stdout }}/kubesystem_roles_bindings.yml"
- oc_process:
create: True
@@ -132,7 +131,7 @@
kind: daemonset
name: apiserver
files:
- - "{{ mktemp.stdout }}/service_catalog_api_server.yml"
+ - "{{ mktemp.stdout }}/service_catalog_api_server.yml"
delete_after: yes
- name: Set Service Catalog API Server service
@@ -141,10 +140,10 @@
namespace: kube-service-catalog
state: present
ports:
- - name: secure
- port: 443
- protocol: TCP
- targetPort: 6443
+ - name: secure
+ port: 443
+ protocol: TCP
+ targetPort: 6443
selector:
app: apiserver
session_affinity: None
@@ -160,7 +159,7 @@
kind: route
name: apiserver
files:
- - "{{ mktemp.stdout }}/service_catalog_api_route.yml"
+ - "{{ mktemp.stdout }}/service_catalog_api_route.yml"
delete_after: yes
## controller manager
@@ -180,7 +179,7 @@
kind: daemonset
name: controller-manager
files:
- - "{{ mktemp.stdout }}/controller_manager.yml"
+ - "{{ mktemp.stdout }}/controller_manager.yml"
delete_after: yes
- name: Set Controller Manager service
@@ -189,9 +188,9 @@
namespace: kube-service-catalog
state: present
ports:
- - port: 6443
- protocol: TCP
- targetPort: 6443
+ - port: 6443
+ protocol: TCP
+ targetPort: 6443
selector:
app: controller-manager
session_affinity: None
diff --git a/roles/openshift_storage_glusterfs/files/v3.6/deploy-heketi-template.yml b/roles/openshift_storage_glusterfs/files/v3.6/deploy-heketi-template.yml
index 7b705c2d4..34af652c2 100644
--- a/roles/openshift_storage_glusterfs/files/v3.6/deploy-heketi-template.yml
+++ b/roles/openshift_storage_glusterfs/files/v3.6/deploy-heketi-template.yml
@@ -73,13 +73,11 @@ objects:
- name: HEKETI_EXECUTOR
value: ${HEKETI_EXECUTOR}
- name: HEKETI_FSTAB
- value: /var/lib/heketi/fstab
+ value: ${HEKETI_FSTAB}
- name: HEKETI_SNAPSHOT_LIMIT
value: '14'
- name: HEKETI_KUBE_GLUSTER_DAEMONSET
value: '1'
- - name: HEKETI_KUBE_NAMESPACE
- value: ${HEKETI_KUBE_NAMESPACE}
ports:
- containerPort: 8080
volumeMounts:
@@ -115,10 +113,10 @@ parameters:
displayName: heketi executor type
description: Set the executor type, kubernetes or ssh
value: kubernetes
-- name: HEKETI_KUBE_NAMESPACE
- displayName: Namespace
- description: Set the namespace where the GlusterFS pods reside
- value: default
+- name: HEKETI_FSTAB
+ displayName: heketi fstab path
+ description: Set the fstab path, file that is populated with bricks that heketi creates
+ value: /var/lib/heketi/fstab
- name: HEKETI_ROUTE
displayName: heketi route name
description: Set the hostname for the route URL
diff --git a/roles/openshift_storage_glusterfs/files/v3.6/gluster-s3-pvcs-template.yml b/roles/openshift_storage_glusterfs/files/v3.6/gluster-s3-pvcs-template.yml
new file mode 100644
index 000000000..064b51473
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/files/v3.6/gluster-s3-pvcs-template.yml
@@ -0,0 +1,67 @@
+---
+kind: Template
+apiVersion: v1
+metadata:
+ name: gluster-s3-pvcs
+ labels:
+ glusterfs: s3-pvcs-template
+ gluster-s3: pvcs-template
+ annotations:
+ description: Gluster S3 service template
+ tags: glusterfs,heketi,gluster-s3
+objects:
+- kind: PersistentVolumeClaim
+ apiVersion: v1
+ metadata:
+ name: "${PVC}"
+ labels:
+ glusterfs: s3-${CLUSTER_NAME}-${S3_ACCOUNT}-storage
+ gluster-s3: ${CLUSTER_NAME}-${S3_ACCOUNT}-pvc
+ annotations:
+ volume.beta.kubernetes.io/storage-class: "glusterfs-${CLUSTER_NAME}"
+ spec:
+ accessModes:
+ - ReadWriteMany
+ resources:
+ requests:
+ storage: "${PVC_SIZE}"
+- kind: PersistentVolumeClaim
+ apiVersion: v1
+ metadata:
+ name: "${META_PVC}"
+ labels:
+ glusterfs: s3-${CLUSTER_NAME}-${S3_ACCOUNT}-storage
+ gluster-s3: ${CLUSTER_NAME}-${S3_ACCOUNT}-meta-pvc
+ annotations:
+ volume.beta.kubernetes.io/storage-class: "glusterfs-${CLUSTER_NAME}"
+ spec:
+ accessModes:
+ - ReadWriteMany
+ resources:
+ requests:
+ storage: "${META_PVC_SIZE}"
+parameters:
+- name: S3_ACCOUNT
+ displayName: S3 Account Name
+ description: S3 storage account which will provide storage on GlusterFS volumes
+ required: true
+- name: PVC
+ displayName: Primary GlusterFS-backed PVC
+ description: GlusterFS-backed PVC for object storage
+ required: true
+- name: PVC_SIZE
+ displayName: Primary GlusterFS-backed PVC capacity
+ description: Capacity for GlusterFS-backed PVC for object storage
+ value: 2Gi
+- name: META_PVC
+ displayName: Metadata GlusterFS-backed PVC
+ description: GlusterFS-backed PVC for object storage metadata
+ required: true
+- name: META_PVC_SIZE
+ displayName: Metadata GlusterFS-backed PVC capacity
+ description: Capacity for GlusterFS-backed PVC for object storage metadata
+ value: 1Gi
+- name: CLUSTER_NAME
+ displayName: GlusterFS cluster name
+ description: A unique name to identify which heketi service manages this cluster, useful for running multiple heketi instances
+ value: storage
diff --git a/roles/openshift_storage_glusterfs/files/v3.6/gluster-s3-template.yml b/roles/openshift_storage_glusterfs/files/v3.6/gluster-s3-template.yml
new file mode 100644
index 000000000..896a1b226
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/files/v3.6/gluster-s3-template.yml
@@ -0,0 +1,140 @@
+---
+kind: Template
+apiVersion: v1
+metadata:
+ name: gluster-s3
+ labels:
+ glusterfs: s3-template
+ gluster-s3: template
+ annotations:
+ description: Gluster S3 service template
+ tags: glusterfs,heketi,gluster-s3
+objects:
+- kind: Service
+ apiVersion: v1
+ metadata:
+ name: gluster-s3-${CLUSTER_NAME}-${S3_ACCOUNT}-service
+ labels:
+ glusterfs: s3-${CLUSTER_NAME}-${S3_ACCOUNT}-service
+ gluster-s3: ${CLUSTER_NAME}-${S3_ACCOUNT}-service
+ spec:
+ ports:
+ - protocol: TCP
+ port: 8080
+ targetPort: 8080
+ selector:
+ glusterfs: s3-pod
+ type: ClusterIP
+ sessionAffinity: None
+ status:
+ loadBalancer: {}
+- kind: Route
+ apiVersion: v1
+ metadata:
+ name: gluster-s3-${CLUSTER_NAME}-${S3_ACCOUNT}-route
+ labels:
+ glusterfs: s3-${CLUSTER_NAME}-${S3_ACCOUNT}-route
+ gluster-s3: ${CLUSTER_NAME}-${S3_ACCOUNT}-route
+ spec:
+ to:
+ kind: Service
+ name: gluster-s3-${CLUSTER_NAME}-${S3_ACCOUNT}-service
+- kind: DeploymentConfig
+ apiVersion: v1
+ metadata:
+ name: gluster-s3-${CLUSTER_NAME}-${S3_ACCOUNT}-dc
+ labels:
+ glusterfs: s3-${CLUSTER_NAME}-${S3_ACCOUNT}-dc
+ gluster-s3: ${CLUSTER_NAME}-${S3_ACCOUNT}-dc
+ annotations:
+ openshift.io/scc: privileged
+ description: Defines how to deploy gluster s3 object storage
+ spec:
+ replicas: 1
+ selector:
+ glusterfs: s3-${CLUSTER_NAME}-${S3_ACCOUNT}-pod
+ template:
+ metadata:
+ name: gluster-${CLUSTER_NAME}-${S3_ACCOUNT}-s3
+ labels:
+ glusterfs: s3-${CLUSTER_NAME}-${S3_ACCOUNT}-pod
+ gluster-s3: ${CLUSTER_NAME}-${S3_ACCOUNT}-pod
+ spec:
+ containers:
+ - name: gluster-s3
+ image: ${IMAGE_NAME}:${IMAGE_VERSION}
+ imagePullPolicy: IfNotPresent
+ ports:
+ - name: gluster
+ containerPort: 8080
+ protocol: TCP
+ env:
+ - name: S3_ACCOUNT
+ value: "${S3_ACCOUNT}"
+ - name: S3_USER
+ value: "${S3_USER}"
+ - name: S3_PASSWORD
+ value: "${S3_PASSWORD}"
+ resources: {}
+ volumeMounts:
+ - name: gluster-vol1
+ mountPath: "/mnt/gluster-object/${S3_ACCOUNT}"
+ - name: gluster-vol2
+ mountPath: "/mnt/gluster-object/gsmetadata"
+ - name: glusterfs-cgroup
+ readOnly: true
+ mountPath: "/sys/fs/cgroup"
+ terminationMessagePath: "/dev/termination-log"
+ securityContext:
+ privileged: true
+ volumes:
+ - name: glusterfs-cgroup
+ hostPath:
+ path: "/sys/fs/cgroup"
+ - name: gluster-vol1
+ persistentVolumeClaim:
+ claimName: ${PVC}
+ - name: gluster-vol2
+ persistentVolumeClaim:
+ claimName: ${META_PVC}
+ restartPolicy: Always
+ terminationGracePeriodSeconds: 30
+ dnsPolicy: ClusterFirst
+ serviceAccountName: default
+ serviceAccount: default
+ securityContext: {}
+parameters:
+- name: IMAGE_NAME
+ displayName: glusterblock provisioner container image name
+ required: True
+- name: IMAGE_VERSION
+ displayName: glusterblock provisioner container image version
+ required: True
+- name: CLUSTER_NAME
+ displayName: GlusterFS cluster name
+ description: A unique name to identify which heketi service manages this cluster, useful for running multiple heketi instances
+ value: storage
+- name: S3_ACCOUNT
+ displayName: S3 Account Name
+ description: S3 storage account which will provide storage on GlusterFS volumes
+ required: true
+- name: S3_USER
+ displayName: S3 User
+ description: S3 user who can access the S3 storage account
+ required: true
+- name: S3_PASSWORD
+ displayName: S3 User Password
+ description: Password for the S3 user
+ required: true
+- name: PVC
+ displayName: Primary GlusterFS-backed PVC
+ description: GlusterFS-backed PVC for object storage
+ value: gluster-s3-claim
+- name: META_PVC
+ displayName: Metadata GlusterFS-backed PVC
+ description: GlusterFS-backed PVC for object storage metadata
+ value: gluster-s3-meta-claim
+- name: CLUSTER_NAME
+ displayName: GlusterFS cluster name
+ description: A unique name to identify which heketi service manages this cluster, useful for running multiple heketi instances
+ value: storage
diff --git a/roles/openshift_storage_glusterfs/files/v3.6/glusterblock-provisioner.yml b/roles/openshift_storage_glusterfs/files/v3.6/glusterblock-provisioner.yml
new file mode 100644
index 000000000..63dd5cce6
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/files/v3.6/glusterblock-provisioner.yml
@@ -0,0 +1,104 @@
+---
+kind: Template
+apiVersion: v1
+metadata:
+ name: glusterblock-provisioner
+ labels:
+ glusterfs: block-template
+ glusterblock: template
+ annotations:
+ description: glusterblock provisioner template
+ tags: glusterfs
+objects:
+- kind: ClusterRole
+ apiVersion: v1
+ metadata:
+ name: glusterblock-provisioner-runner
+ labels:
+ glusterfs: block-provisioner-runner-clusterrole
+ glusterblock: provisioner-runner-clusterrole
+ rules:
+ - apiGroups: [""]
+ resources: ["persistentvolumes"]
+ verbs: ["get", "list", "watch", "create", "delete"]
+ - apiGroups: [""]
+ resources: ["persistentvolumeclaims"]
+ verbs: ["get", "list", "watch", "update"]
+ - apiGroups: ["storage.k8s.io"]
+ resources: ["storageclasses"]
+ verbs: ["get", "list", "watch"]
+ - apiGroups: [""]
+ resources: ["events"]
+ verbs: ["list", "watch", "create", "update", "patch"]
+ - apiGroups: [""]
+ resources: ["services"]
+ verbs: ["get"]
+ - apiGroups: [""]
+ resources: ["secrets"]
+ verbs: ["get", "create", "delete"]
+ - apiGroups: [""]
+ resources: ["routes"]
+ verbs: ["get", "list"]
+- apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: glusterblock-${CLUSTER_NAME}-provisioner
+ labels:
+ glusterfs: block-${CLUSTER_NAME}-provisioner-sa
+ glusterblock: ${CLUSTER_NAME}-provisioner-sa
+- apiVersion: v1
+ kind: ClusterRoleBinding
+ metadata:
+ name: glusterblock-${CLUSTER_NAME}-provisioner
+ roleRef:
+ name: glusterblock-provisioner-runner
+ subjects:
+ - kind: ServiceAccount
+ name: glusterblock-${CLUSTER_NAME}-provisioner
+ namespace: ${NAMESPACE}
+- kind: DeploymentConfig
+ apiVersion: v1
+ metadata:
+ name: glusterblock-${CLUSTER_NAME}-provisioner-dc
+ labels:
+ glusterfs: block-${CLUSTER_NAME}-provisioner-dc
+ glusterblock: ${CLUSTER_NAME}-provisioner-dc
+ annotations:
+ description: Defines how to deploy the glusterblock provisioner pod.
+ spec:
+ replicas: 1
+ selector:
+ glusterfs: block-${CLUSTER_NAME}-provisioner-pod
+ triggers:
+ - type: ConfigChange
+ strategy:
+ type: Recreate
+ template:
+ metadata:
+ name: glusterblock-provisioner
+ labels:
+ glusterfs: block-${CLUSTER_NAME}-provisioner-pod
+ spec:
+ serviceAccountName: glusterblock-${CLUSTER_NAME}-provisioner
+ containers:
+ - name: glusterblock-provisioner
+ image: ${IMAGE_NAME}:${IMAGE_VERSION}
+ imagePullPolicy: IfNotPresent
+ env:
+ - name: PROVISIONER_NAME
+ value: gluster.org/glusterblock
+parameters:
+- name: IMAGE_NAME
+ displayName: glusterblock provisioner container image name
+ required: True
+- name: IMAGE_VERSION
+ displayName: glusterblock provisioner container image version
+ required: True
+- name: NAMESPACE
+ displayName: glusterblock provisioner namespace
+ description: The namespace in which these resources are being created
+ required: True
+- name: CLUSTER_NAME
+ displayName: GlusterFS cluster name
+ description: A unique name to identify which heketi service manages this cluster, useful for running multiple heketi instances
+ value: storage
diff --git a/roles/openshift_storage_glusterfs/files/v3.6/glusterfs-template.yml b/roles/openshift_storage_glusterfs/files/v3.6/glusterfs-template.yml
index 8c5e1ded3..09850a2c2 100644
--- a/roles/openshift_storage_glusterfs/files/v3.6/glusterfs-template.yml
+++ b/roles/openshift_storage_glusterfs/files/v3.6/glusterfs-template.yml
@@ -35,6 +35,15 @@ objects:
- name: glusterfs
image: ${IMAGE_NAME}:${IMAGE_VERSION}
imagePullPolicy: IfNotPresent
+ env:
+ - name: GB_GLFS_LRU_COUNT
+ value: "${GB_GLFS_LRU_COUNT}"
+ - name: TCMU_LOGDIR
+ value: "${TCMU_LOGDIR}"
+ resources:
+ requests:
+ memory: 100Mi
+ cpu: 100m
volumeMounts:
- name: glusterfs-heketi
mountPath: "/var/lib/heketi"
@@ -83,7 +92,6 @@ objects:
periodSeconds: 25
successThreshold: 1
failureThreshold: 15
- resources: {}
terminationMessagePath: "/dev/termination-log"
volumes:
- name: glusterfs-heketi
@@ -134,3 +142,13 @@ parameters:
displayName: GlusterFS cluster name
description: A unique name to identify which heketi service manages this cluster, useful for running multiple heketi instances
value: storage
+- name: GB_GLFS_LRU_COUNT
+ displayName: Maximum number of block hosting volumes
+ description: This value is to set maximum number of block hosting volumes.
+ value: "15"
+ required: true
+- name: TCMU_LOGDIR
+ displayName: Tcmu runner log directory
+ description: This value is to set tcmu runner log directory
+ value: "/var/log/glusterfs/gluster-block"
+ required: true
diff --git a/roles/openshift_storage_glusterfs/files/v3.6/heketi-template.yml b/roles/openshift_storage_glusterfs/files/v3.6/heketi-template.yml
index 61b6a8c13..28cdb2982 100644
--- a/roles/openshift_storage_glusterfs/files/v3.6/heketi-template.yml
+++ b/roles/openshift_storage_glusterfs/files/v3.6/heketi-template.yml
@@ -15,6 +15,7 @@ objects:
name: heketi-${CLUSTER_NAME}
labels:
glusterfs: heketi-${CLUSTER_NAME}-service
+ heketi: ${CLUSTER_NAME}-service
annotations:
description: Exposes Heketi service
spec:
@@ -30,6 +31,7 @@ objects:
name: ${HEKETI_ROUTE}
labels:
glusterfs: heketi-${CLUSTER_NAME}-route
+ heketi: ${CLUSTER_NAME}-route
spec:
to:
kind: Service
@@ -40,6 +42,7 @@ objects:
name: heketi-${CLUSTER_NAME}
labels:
glusterfs: heketi-${CLUSTER_NAME}-dc
+ heketi: ${CLUSTER_NAME}-dc
annotations:
description: Defines how to deploy Heketi
spec:
@@ -55,6 +58,7 @@ objects:
name: heketi-${CLUSTER_NAME}
labels:
glusterfs: heketi-${CLUSTER_NAME}-pod
+ heketi: ${CLUSTER_NAME}-pod
spec:
serviceAccountName: heketi-${CLUSTER_NAME}-service-account
containers:
@@ -69,13 +73,11 @@ objects:
- name: HEKETI_EXECUTOR
value: ${HEKETI_EXECUTOR}
- name: HEKETI_FSTAB
- value: /var/lib/heketi/fstab
+ value: ${HEKETI_FSTAB}
- name: HEKETI_SNAPSHOT_LIMIT
value: '14'
- name: HEKETI_KUBE_GLUSTER_DAEMONSET
value: '1'
- - name: HEKETI_KUBE_NAMESPACE
- value: ${HEKETI_KUBE_NAMESPACE}
ports:
- containerPort: 8080
volumeMounts:
@@ -114,10 +116,10 @@ parameters:
displayName: heketi executor type
description: Set the executor type, kubernetes or ssh
value: kubernetes
-- name: HEKETI_KUBE_NAMESPACE
- displayName: Namespace
- description: Set the namespace where the GlusterFS pods reside
- value: default
+- name: HEKETI_FSTAB
+ displayName: heketi fstab path
+ description: Set the fstab path, file that is populated with bricks that heketi creates
+ value: /var/lib/heketi/fstab
- name: HEKETI_ROUTE
displayName: heketi route name
description: Set the hostname for the route URL
diff --git a/roles/openshift_storage_glusterfs/templates/v3.6/gluster-block-storageclass.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.6/gluster-block-storageclass.yml.j2
new file mode 100644
index 000000000..02ed8fa8d
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/templates/v3.6/gluster-block-storageclass.yml.j2
@@ -0,0 +1,19 @@
+---
+apiVersion: storage.k8s.io/v1
+kind: StorageClass
+metadata:
+ name: glusterfs-{{ glusterfs_name }}-block
+{% if glusterfs_block_storageclass_default is defined and glusterfs_block_storageclass_default %}
+ annotations:
+ storageclass.kubernetes.io/is-default-class: "true"
+{% endif %}
+provisioner: gluster.org/glusterblock
+parameters:
+ resturl: "http://{% if glusterfs_heketi_is_native %}{{ glusterfs_heketi_route }}{% else %}{{ glusterfs_heketi_url }}:{{ glusterfs_heketi_port }}{% endif %}"
+ restuser: "admin"
+ chapauthenabled: "true"
+ hacount: "3"
+{% if glusterfs_heketi_admin_key is defined %}
+ restsecretnamespace: "{{ glusterfs_namespace }}"
+ restsecretname: "heketi-{{ glusterfs_name }}-admin-secret-block"
+{%- endif -%}
diff --git a/roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-storageclass.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-storageclass.yml.j2
index ca87807fe..095fb780f 100644
--- a/roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-storageclass.yml.j2
+++ b/roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-storageclass.yml.j2
@@ -3,10 +3,6 @@ apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: glusterfs-{{ glusterfs_name }}
-{% if glusterfs_storageclass_default is defined and glusterfs_storageclass_default %}
- annotations:
- storageclass.kubernetes.io/is-default-class: "true"
-{% endif %}
provisioner: kubernetes.io/glusterfs
parameters:
resturl: "http://{% if glusterfs_heketi_is_native %}{{ glusterfs_heketi_route }}{% else %}{{ glusterfs_heketi_url }}:{{ glusterfs_heketi_port }}{% endif %}"
diff --git a/roles/openshift_storage_glusterfs/templates/v3.6/heketi.json.j2 b/roles/openshift_storage_glusterfs/templates/v3.6/heketi.json.j2
index 579b11bb7..565e9be98 100644
--- a/roles/openshift_storage_glusterfs/templates/v3.6/heketi.json.j2
+++ b/roles/openshift_storage_glusterfs/templates/v3.6/heketi.json.j2
@@ -31,6 +31,12 @@
"port" : "{{ glusterfs_heketi_ssh_port }}",
"user" : "{{ glusterfs_heketi_ssh_user }}",
"sudo" : {{ glusterfs_heketi_ssh_sudo | lower }}
- }
+ },
+
+ "_auto_create_block_hosting_volume": "Creates Block Hosting volumes automatically if not found or exsisting volume exhausted",
+ "auto_create_block_hosting_volume": {{ glusterfs_block_host_vol_create | lower }},
+
+ "_block_hosting_volume_size": "New block hosting volume will be created in size mentioned, This is considered only if auto-create is enabled.",
+ "block_hosting_volume_size": {{ glusterfs_block_host_vol_size }}
}
}
diff --git a/roles/openshift_web_console/tasks/install.yml b/roles/openshift_web_console/tasks/install.yml
index 08640d093..ab6613567 100644
--- a/roles/openshift_web_console/tasks/install.yml
+++ b/roles/openshift_web_console/tasks/install.yml
@@ -3,8 +3,8 @@
- name: Set default image variables based on deployment type
include_vars: "{{ item }}"
with_first_found:
- - "{{ openshift_deployment_type | default(deployment_type) }}.yml"
- - "default_images.yml"
+ - "{{ openshift_deployment_type | default(deployment_type) }}.yml"
+ - "default_images.yml"
- name: Set openshift_web_console facts
set_fact:
@@ -19,7 +19,7 @@
name: openshift-web-console
state: present
node_selector:
- - ""
+ - ""
register: create_console_project
- name: Make temp directory for web console templates
@@ -37,9 +37,9 @@
src: "{{ item }}"
dest: "{{ mktemp.stdout }}/{{ item }}"
with_items:
- - "{{ __console_template_file }}"
- - "{{ __console_rbac_file }}"
- - "{{ __console_config_file }}"
+ - "{{ __console_template_file }}"
+ - "{{ __console_rbac_file }}"
+ - "{{ __console_config_file }}"
# Check if an existing webconsole-config config map exists. If so, use those
# contents so we don't overwrite changes.
@@ -62,69 +62,69 @@
# Generate a new config when a config map is not defined.
- when: existing_config_map_data['webconsole-config.yaml'] is not defined
block:
- # Migrate the previous master-config.yaml asset config if it exists into the new
- # web console config config map.
- - name: Read existing assetConfig in master-config.yaml
- slurp:
- src: "{{ openshift.common.config_base }}/master/master-config.yaml"
- register: master_config_output
-
- - set_fact:
- config_to_migrate: "{{ master_config_output.content | b64decode | from_yaml }}"
-
- - set_fact:
- cro_plugin_enabled: "{{ config_to_migrate.admissionConfig is defined and config_to_migrate.admissionConfig.pluginConfig is defined and config_to_migrate.admissionConfig.pluginConfig.ClusterResourceOverrides is defined }}"
-
- # Update properties in the config template based on inventory vars when the
- # asset config does not exist.
- - name: Set web console config properties from inventory variables
- yedit:
- src: "{{ mktemp.stdout }}/{{ __console_config_file }}"
- edits:
- - key: clusterInfo#consolePublicURL
- # Must have a trailing slash
- value: "{{ openshift.master.public_console_url }}/"
- - key: clusterInfo#masterPublicURL
- value: "{{ openshift.master.public_api_url }}"
- - key: clusterInfo#logoutPublicURL
- value: "{{ openshift.master.logout_url | default('') }}"
- - key: features#inactivityTimeoutMinutes
- value: "{{ openshift_web_console_inactivity_timeout_minutes | default(0) }}"
- - key: features#clusterResourceOverridesEnabled
- value: "{{ openshift_web_console_cluster_resource_overrides_enabled | default(cro_plugin_enabled) }}"
- - key: extensions#scriptURLs
- value: "{{ openshift_web_console_extension_script_urls | default([]) }}"
- - key: extensions#stylesheetURLs
- value: "{{ openshift_web_console_extension_stylesheet_urls | default([]) }}"
- - key: extensions#properties
- value: "{{ openshift_web_console_extension_properties | default({}) }}"
- separator: '#'
- state: present
- when: config_to_migrate.assetConfig is not defined
-
- - name: Migrate assetConfig from master-config.yaml
- yedit:
- src: "{{ mktemp.stdout }}/{{ __console_config_file }}"
- edits:
- - key: clusterInfo#consolePublicURL
- value: "{{ config_to_migrate.assetConfig.publicURL }}"
- - key: clusterInfo#masterPublicURL
- value: "{{ config_to_migrate.assetConfig.masterPublicURL }}"
- - key: clusterInfo#logoutPublicURL
- value: "{{ config_to_migrate.assetConfig.logoutURL | default('') }}"
- - key: clusterInfo#metricsPublicURL
- value: "{{ config_to_migrate.assetConfig.metricsPublicURL | default('') }}"
- - key: clusterInfo#loggingPublicURL
- value: "{{ config_to_migrate.assetConfig.loggingPublicURL | default('') }}"
- - key: servingInfo#maxRequestsInFlight
- value: "{{ config_to_migrate.assetConfig.servingInfo.maxRequestsInFlight | default(0) }}"
- - key: servingInfo#requestTimeoutSeconds
- value: "{{ config_to_migrate.assetConfig.servingInfo.requestTimeoutSeconds | default(0) }}"
- - key: features#clusterResourceOverridesEnabled
- value: "{{ openshift_web_console_cluster_resource_overrides_enabled | default(cro_plugin_enabled) }}"
- separator: '#'
- state: present
- when: config_to_migrate.assetConfig is defined
+ # Migrate the previous master-config.yaml asset config if it exists into the new
+ # web console config config map.
+ - name: Read existing assetConfig in master-config.yaml
+ slurp:
+ src: "{{ openshift.common.config_base }}/master/master-config.yaml"
+ register: master_config_output
+
+ - set_fact:
+ config_to_migrate: "{{ master_config_output.content | b64decode | from_yaml }}"
+
+ - set_fact:
+ cro_plugin_enabled: "{{ config_to_migrate.admissionConfig is defined and config_to_migrate.admissionConfig.pluginConfig is defined and config_to_migrate.admissionConfig.pluginConfig.ClusterResourceOverrides is defined }}"
+
+ # Update properties in the config template based on inventory vars when the
+ # asset config does not exist.
+ - name: Set web console config properties from inventory variables
+ yedit:
+ src: "{{ mktemp.stdout }}/{{ __console_config_file }}"
+ edits:
+ - key: clusterInfo#consolePublicURL
+ # Must have a trailing slash
+ value: "{{ openshift.master.public_console_url }}/"
+ - key: clusterInfo#masterPublicURL
+ value: "{{ openshift.master.public_api_url }}"
+ - key: clusterInfo#logoutPublicURL
+ value: "{{ openshift.master.logout_url | default('') }}"
+ - key: features#inactivityTimeoutMinutes
+ value: "{{ openshift_web_console_inactivity_timeout_minutes | default(0) }}"
+ - key: features#clusterResourceOverridesEnabled
+ value: "{{ openshift_web_console_cluster_resource_overrides_enabled | default(cro_plugin_enabled) }}"
+ - key: extensions#scriptURLs
+ value: "{{ openshift_web_console_extension_script_urls | default([]) }}"
+ - key: extensions#stylesheetURLs
+ value: "{{ openshift_web_console_extension_stylesheet_urls | default([]) }}"
+ - key: extensions#properties
+ value: "{{ openshift_web_console_extension_properties | default({}) }}"
+ separator: '#'
+ state: present
+ when: config_to_migrate.assetConfig is not defined
+
+ - name: Migrate assetConfig from master-config.yaml
+ yedit:
+ src: "{{ mktemp.stdout }}/{{ __console_config_file }}"
+ edits:
+ - key: clusterInfo#consolePublicURL
+ value: "{{ config_to_migrate.assetConfig.publicURL }}"
+ - key: clusterInfo#masterPublicURL
+ value: "{{ config_to_migrate.assetConfig.masterPublicURL }}"
+ - key: clusterInfo#logoutPublicURL
+ value: "{{ config_to_migrate.assetConfig.logoutURL | default('') }}"
+ - key: clusterInfo#metricsPublicURL
+ value: "{{ config_to_migrate.assetConfig.metricsPublicURL | default('') }}"
+ - key: clusterInfo#loggingPublicURL
+ value: "{{ config_to_migrate.assetConfig.loggingPublicURL | default('') }}"
+ - key: servingInfo#maxRequestsInFlight
+ value: "{{ config_to_migrate.assetConfig.servingInfo.maxRequestsInFlight | default(0) }}"
+ - key: servingInfo#requestTimeoutSeconds
+ value: "{{ config_to_migrate.assetConfig.servingInfo.requestTimeoutSeconds | default(0) }}"
+ - key: features#clusterResourceOverridesEnabled
+ value: "{{ openshift_web_console_cluster_resource_overrides_enabled | default(cro_plugin_enabled) }}"
+ separator: '#'
+ state: present
+ when: config_to_migrate.assetConfig is defined
- slurp:
src: "{{ mktemp.stdout }}/{{ __console_config_file }}"
diff --git a/roles/template_service_broker/tasks/install.yml b/roles/template_service_broker/tasks/install.yml
index 9f1d66fb5..d674d24e4 100644
--- a/roles/template_service_broker/tasks/install.yml
+++ b/roles/template_service_broker/tasks/install.yml
@@ -1,10 +1,17 @@
---
# Fact setting
+- name: Ensure that Template Service Broker has nodes to run on
+ fail:
+ msg: |-
+ No schedulable nodes found matching node selector for Template Service Broker - '{{ template_service_broker_selector }}'
+ when:
+ - openshift_schedulable_node_labels | lib_utils_oo_has_no_matching_selector(template_service_broker_selector)
+
- name: Set default image variables based on openshift_deployment_type
include_vars: "{{ item }}"
with_first_found:
- - "{{ openshift_deployment_type }}.yml"
- - "default_images.yml"
+ - "{{ openshift_deployment_type }}.yml"
+ - "default_images.yml"
- name: set template_service_broker facts
set_fact:
@@ -16,7 +23,7 @@
name: openshift-template-service-broker
state: present
node_selector:
- - ""
+ - ""
- command: mktemp -d /tmp/tsb-ansible-XXXXXX
register: mktemp
@@ -31,10 +38,10 @@
src: "{{ item }}"
dest: "{{ mktemp.stdout }}/{{ item }}"
with_items:
- - "{{ __tsb_template_file }}"
- - "{{ __tsb_rbac_file }}"
- - "{{ __tsb_broker_file }}"
- - "{{ __tsb_config_file }}"
+ - "{{ __tsb_template_file }}"
+ - "{{ __tsb_rbac_file }}"
+ - "{{ __tsb_broker_file }}"
+ - "{{ __tsb_config_file }}"
- yedit:
src: "{{ mktemp.stdout }}/{{ __tsb_config_file }}"