summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.tito/packages/openshift-ansible2
-rw-r--r--CONTRIBUTING.md21
-rw-r--r--README.md12
-rw-r--r--inventory/hosts.localhost26
-rw-r--r--openshift-ansible.spec50
-rw-r--r--playbooks/aws/provisioning_vars.yml.example6
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml27
-rw-r--r--playbooks/gcp/openshift-cluster/build_base_image.yml3
-rw-r--r--playbooks/init/base_packages.yml1
-rw-r--r--playbooks/init/basic_facts.yml8
-rw-r--r--playbooks/openshift-prometheus/private/uninstall.yml8
-rw-r--r--playbooks/openshift-prometheus/uninstall.yml2
-rwxr-xr-xplaybooks/openstack/inventory.py48
-rw-r--r--roles/container_runtime/defaults/main.yml4
-rw-r--r--roles/container_runtime/templates/docker_storage_setup.j24
-rw-r--r--roles/lib_utils/library/docker_creds.py4
-rw-r--r--roles/lib_utils/library/openshift_container_binary_sync.py2
-rw-r--r--roles/nuage_master/tasks/etcd_certificates.yml21
-rw-r--r--roles/nuage_master/tasks/main.yaml17
-rwxr-xr-xroles/nuage_master/templates/nuage-infra-pod-config-daemonset.j239
-rwxr-xr-xroles/nuage_master/templates/nuage-master-config-daemonset.j29
-rwxr-xr-xroles/nuage_master/templates/nuage-node-config-daemonset.j211
-rw-r--r--roles/nuage_master/vars/main.yaml7
-rw-r--r--roles/openshift_aws/defaults/main.yml124
-rw-r--r--roles/openshift_aws/tasks/elb.yml12
-rw-r--r--roles/openshift_aws/tasks/master_facts.yml2
-rw-r--r--roles/openshift_aws/tasks/provision_elb.yml1
-rwxr-xr-xroles/openshift_facts/library/openshift_facts.py4
-rw-r--r--roles/openshift_logging/tasks/install_logging.yaml23
-rw-r--r--roles/openshift_management/defaults/main.yml2
-rw-r--r--roles/openshift_management/tasks/accounts.yml6
-rw-r--r--roles/openshift_management/tasks/main.yml10
-rw-r--r--roles/openshift_management/tasks/storage/create_nfs_pvs.yml14
-rw-r--r--roles/openshift_management/tasks/storage/nfs.yml10
-rw-r--r--roles/openshift_management/tasks/template.yml38
-rw-r--r--roles/openshift_management/tasks/validate.yml2
-rw-r--r--roles/openshift_management/vars/main.yml20
-rw-r--r--roles/openshift_metrics/defaults/main.yaml2
-rw-r--r--roles/openshift_node/defaults/main.yml1
-rw-r--r--roles/openshift_node/tasks/storage_plugins/iscsi.yml28
-rw-r--r--roles/openshift_node/templates/multipath.conf.j215
-rw-r--r--roles/openshift_openstack/templates/docker-storage-setup-dm.j24
-rw-r--r--roles/openshift_openstack/templates/docker-storage-setup-overlayfs.j24
-rw-r--r--roles/openshift_openstack/templates/heat_stack.yaml.j24
-rw-r--r--roles/openshift_openstack/templates/heat_stack_server.yaml.j23
-rw-r--r--roles/openshift_prometheus/defaults/main.yaml15
-rw-r--r--roles/openshift_prometheus/tasks/facts.yaml10
-rw-r--r--roles/openshift_prometheus/tasks/install_prometheus.yaml119
-rw-r--r--roles/openshift_prometheus/tasks/main.yaml4
-rw-r--r--roles/openshift_prometheus/tasks/uninstall.yaml (renamed from roles/openshift_prometheus/tasks/uninstall_prometheus.yaml)0
-rw-r--r--roles/openshift_prometheus/templates/prometheus.j292
-rw-r--r--roles/openshift_prometheus/templates/prometheus.yml.j2175
-rw-r--r--roles/openshift_provisioners/defaults/main.yaml10
-rw-r--r--roles/openshift_provisioners/tasks/main.yaml5
-rw-r--r--roles/openshift_service_catalog/files/openshift_catalog_clusterroles.yml86
-rw-r--r--roles/openshift_service_catalog/tasks/install.yml79
-rw-r--r--roles/openshift_service_catalog/templates/sc_admin_edit_role_patching.j227
-rw-r--r--roles/openshift_service_catalog/templates/sc_view_role_patching.j211
-rw-r--r--roles/openshift_storage_glusterfs/README.md106
-rw-r--r--roles/openshift_storage_glusterfs/defaults/main.yml10
-rw-r--r--roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml26
-rw-r--r--roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml2
-rw-r--r--roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml2
-rw-r--r--roles/openshift_storage_glusterfs/templates/glusterfs.conf5
-rw-r--r--roles/openshift_storage_glusterfs/templates/v3.7/gluster-block-storageclass.yml.j219
-rw-r--r--roles/openshift_storage_glusterfs/templates/v3.8/gluster-block-storageclass.yml.j219
-rw-r--r--roles/openshift_storage_glusterfs/templates/v3.9/gluster-block-storageclass.yml.j219
-rw-r--r--roles/openshift_version/defaults/main.yml1
-rw-r--r--roles/openshift_version/tasks/first_master.yml6
-rw-r--r--roles/openshift_web_console/files/console-config.yaml (renamed from files/origin-components/console-config.yaml)0
-rw-r--r--roles/openshift_web_console/files/console-rbac-template.yaml (renamed from files/origin-components/console-rbac-template.yaml)0
-rw-r--r--roles/openshift_web_console/files/console-template.yaml (renamed from files/origin-components/console-template.yaml)0
-rw-r--r--roles/openshift_web_console/tasks/install.yml9
-rw-r--r--roles/openshift_web_console/vars/main.yml2
-rw-r--r--roles/template_service_broker/files/apiserver-config.yaml (renamed from files/origin-components/apiserver-config.yaml)0
-rw-r--r--roles/template_service_broker/files/apiserver-template.yaml (renamed from files/origin-components/apiserver-template.yaml)0
-rw-r--r--roles/template_service_broker/files/rbac-template.yaml (renamed from files/origin-components/rbac-template.yaml)0
-rw-r--r--roles/template_service_broker/files/template-service-broker-registration.yaml (renamed from files/origin-components/template-service-broker-registration.yaml)0
-rw-r--r--roles/template_service_broker/tasks/install.yml2
-rw-r--r--roles/template_service_broker/tasks/remove.yml2
-rw-r--r--roles/template_service_broker/vars/main.yml2
81 files changed, 1059 insertions, 467 deletions
diff --git a/.tito/packages/openshift-ansible b/.tito/packages/openshift-ansible
index 39a49270b..6300e1179 100644
--- a/.tito/packages/openshift-ansible
+++ b/.tito/packages/openshift-ansible
@@ -1 +1 @@
-3.9.0-0.28.0 ./
+3.9.0-0.34.0 ./
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 1c0fa73ad..ef0a302dc 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -74,6 +74,27 @@ If you are new to Git, these links might help:
---
+## Simple all-in-one localhost installation
+```
+git clone https://github.com/openshift/openshift-ansible
+cd openshift-ansible
+sudo ansible-playbook -i inventory/hosts.localhost playbooks/prerequisites.yml
+sudo ansible-playbook -i inventory/hosts.localhost playbooks/deploy_cluster.yml
+```
+
+## Development process
+Most changes can be applied by re-running the config playbook. However, while
+the config playbook will run faster the second time through it's still going to
+take a very long time. As such, you may wish to run a smaller subsection of the
+installation playbooks. You can for instance run the node, master, or hosted
+playbooks in playbooks/openshift-node/config.yml,
+playbooks/openshift-master/config.yml, playbooks/openshift-hosted/config.yml
+respectively.
+
+We're actively working to refactor the playbooks into smaller discrete
+components and we'll be documenting that structure shortly, for now those are
+the most sensible logical units of work.
+
## Running tests and other verification tasks
We use [`tox`](http://readthedocs.org/docs/tox/) to manage virtualenvs where
diff --git a/README.md b/README.md
index fdedf2f19..609930dcd 100644
--- a/README.md
+++ b/README.md
@@ -74,7 +74,17 @@ Fedora:
dnf install -y ansible pyOpenSSL python-cryptography python-lxml
```
-## OpenShift Installation Documentation:
+## Simple all-in-one localhost Installation
+This assumes that you've installed the base dependencies and you're running on
+Fedora or RHEL
+```
+git clone https://github.com/openshift/openshift-ansible
+cd openshift-ansible
+sudo ansible-playbook -i inventory/hosts.localhost playbooks/prerequisites.yml
+sudo ansible-playbook -i inventory/hosts.localhost playbooks/deploy_cluster.yml
+```
+
+## Complete Production Installation Documentation:
- [OpenShift Enterprise](https://docs.openshift.com/enterprise/latest/install_config/install/advanced_install.html)
- [OpenShift Origin](https://docs.openshift.org/latest/install_config/install/advanced_install.html)
diff --git a/inventory/hosts.localhost b/inventory/hosts.localhost
new file mode 100644
index 000000000..41ed309e1
--- /dev/null
+++ b/inventory/hosts.localhost
@@ -0,0 +1,26 @@
+#bare minimum hostfile
+
+[OSEv3:children]
+masters
+nodes
+etcd
+
+[OSEv3:vars]
+# if your target hosts are Fedora uncomment this
+#ansible_python_interpreter=/usr/bin/python3
+openshift_deployment_type=origin
+openshift_release=3.7
+osm_cluster_network_cidr=10.128.0.0/14
+openshift_portal_net=172.30.0.0/16
+osm_host_subnet_length=9
+# localhost likely doesn't meet the minimum requirements
+openshift_disable_check=disk_availability,memory_availability
+
+[masters]
+localhost ansible_connection=local
+
+[etcd]
+localhost ansible_connection=local
+
+[nodes]
+localhost ansible_connection=local openshift_schedulable=true openshift_node_labels="{'region': 'infra', 'zone': 'default'}"
diff --git a/openshift-ansible.spec b/openshift-ansible.spec
index 229d43a41..ab00e9d0f 100644
--- a/openshift-ansible.spec
+++ b/openshift-ansible.spec
@@ -10,7 +10,7 @@
Name: openshift-ansible
Version: 3.9.0
-Release: 0.28.0%{?dist}
+Release: 0.34.0%{?dist}
Summary: Openshift and Atomic Enterprise Ansible
License: ASL 2.0
URL: https://github.com/openshift/openshift-ansible
@@ -65,9 +65,6 @@ rm -f %{buildroot}%{python_sitelib}/openshift_ansible/gce
mkdir -p docs/example-inventories
cp inventory/hosts.* inventory/README.md docs/example-inventories/
-# openshift-ansible-files install
-cp -rp files %{buildroot}%{_datadir}/ansible/%{name}/
-
# openshift-ansible-playbooks install
cp -rp playbooks %{buildroot}%{_datadir}/ansible/%{name}/
# remove contiv plabooks
@@ -101,7 +98,6 @@ popd
%doc README*
%license LICENSE
%dir %{_datadir}/ansible/%{name}
-%{_datadir}/ansible/%{name}/files
%{_datadir}/ansible/%{name}/inventory/dynamic
%ghost %{_datadir}/ansible/%{name}/playbooks/common/openshift-master/library.rpmmoved
@@ -204,6 +200,50 @@ Atomic OpenShift Utilities includes
%changelog
+* Tue Jan 30 2018 Justin Pierce <jupierce@redhat.com> 3.9.0-0.34.0
+- docker_creds: decode docker_config for py3 only if its a string
+ (vrutkovs@redhat.com)
+- Removing ability to change default cassandra_pvc_prefix based on metrics
+ volume name (ewolinet@redhat.com)
+- Don't deploy the console if disabled or registry subtype (sdodson@redhat.com)
+- [1538960] Correct ability to overried openshift_management_app_template
+ (rteague@redhat.com)
+
+* Tue Jan 30 2018 Justin Pierce <jupierce@redhat.com> 3.9.0-0.33.0
+-
+
+* Tue Jan 30 2018 Justin Pierce <jupierce@redhat.com> 3.9.0-0.32.0
+- Revert "Revert "use non-deprecated REGISTRY_OPENSHIFT_SERVER_ADDR variable to
+ set the registry hostname"" (bparees@users.noreply.github.com)
+- Rebase Prometheus example for new scrape endpoints and expose alert manager
+ (m.judeikis@gmail.com)
+- Revert "use non-deprecated REGISTRY_OPENSHIFT_SERVER_ADDR variable to set the
+ registry hostname" (bparees@users.noreply.github.com)
+- Bug 1539182: Detect if ClusterResourceOverrides enabled during console
+ install (spadgett@redhat.com)
+- Fix container_runtime variable typo (mgugino@redhat.com)
+- Correct 3.7 to 3.9 upgrade openshift_image_tag (mgugino@redhat.com)
+- Fix misaligned ports for sg,elb,api (mazzystr@gmail.com)
+- Add GPG keys in the base image and don't install docker (ccoleman@redhat.com)
+- Change catalog roles install to use aggregation (jpeeler@redhat.com)
+- Make IP object a string (fabian@fabianism.us)
+- Add kube service ipaddress to no_proxy list (sdodson@redhat.com)
+
+* Sat Jan 27 2018 Jenkins CD Merge Bot <smunilla@redhat.com> 3.9.0-0.31.0
+- removed references to 'files' dir in spec file (dyocum@redhat.com)
+- files in ansible roles do not need to have the path specified to them when
+ referenced by a builtin module, i.e., copy: (dyocum@redhat.com)
+- moving files to their correct <role>/files dir for the openshift_web_console
+ and template_service_broker roles (dyocum@redhat.com)
+
+* Fri Jan 26 2018 Jenkins CD Merge Bot <smunilla@redhat.com> 3.9.0-0.30.0
+- Removing dependency on the extra stroage device. (kwoodson@redhat.com)
+
+* Fri Jan 26 2018 Jenkins CD Merge Bot <smunilla@redhat.com> 3.9.0-0.29.0
+- Add prometheus annotations to console service (spadgett@redhat.com)
+- Add resource requests to console template (spadgett@redhat.com)
+- ignore 'users' field in oc_group module (jdiaz@redhat.com)
+
* Fri Jan 26 2018 Jenkins CD Merge Bot <smunilla@redhat.com> 3.9.0-0.28.0
- Updating deprecations to use callback plugin (ewolinet@redhat.com)
- Run console pods on the master (spadgett@redhat.com)
diff --git a/playbooks/aws/provisioning_vars.yml.example b/playbooks/aws/provisioning_vars.yml.example
index a1a8a5b08..78484fdbd 100644
--- a/playbooks/aws/provisioning_vars.yml.example
+++ b/playbooks/aws/provisioning_vars.yml.example
@@ -21,6 +21,12 @@ openshift_release: # v3.7
# This will be dependent on the version provided by the yum repository
openshift_pkg_version: # -3.7.0
+# OpenShift api port
+# Fulfills a chicken/egg scenario with how Ansible treats host inventory file
+# and extra_vars. This is used for SecurityGroups, ELB Listeners as well as
+# an override to installer inventory openshift_master_api_port key
+# openshift_master_api_port: 8443
+
# specify a clusterid
# This value is also used as the default value for many other components.
#openshift_aws_clusterid: default
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml
index 4e4ed54fc..fe1fdefff 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml
@@ -25,10 +25,18 @@
openshift_upgrade_target: '3.8'
openshift_upgrade_min: '3.7'
openshift_release: '3.8'
- _requested_pkg_version: "{{openshift_pkg_version if openshift_pkg_version is defined else omit }}"
- _requested_image_tag: "{{openshift_image_tag if openshift_image_tag is defined else omit }}"
+ _requested_pkg_version: "{{ openshift_pkg_version if openshift_pkg_version is defined else omit }}"
+ _requested_image_tag: "{{ openshift_image_tag if openshift_image_tag is defined else omit }}"
+ l_double_upgrade_cp: True
when: hostvars[groups.oo_first_master.0].openshift_currently_installed_version | version_compare('3.8','<')
+ - name: set l_force_image_tag_to_version = True
+ set_fact:
+ # Need to set this during 3.8 upgrade to ensure image_tag is set correctly
+ # to match 3.8 version
+ l_force_image_tag_to_version: True
+ when: _requested_image_tag is defined
+
- import_playbook: ../pre/config.yml
# These vars a meant to exclude oo_nodes from plays that would otherwise include
# them by default.
@@ -69,7 +77,20 @@
openshift_upgrade_min: '3.8'
openshift_release: '3.9'
openshift_pkg_version: "{{ _requested_pkg_version | default ('-3.9*') }}"
- openshift_image_tag: "{{ _requested_image_tag | default('v3.9') }}"
+ # Set the user's specified image_tag for 3.9 upgrade if it was provided.
+ - set_fact:
+ openshift_image_tag: "{{ _requested_image_tag }}"
+ l_force_image_tag_to_version: False
+ when: _requested_image_tag is defined
+ # If the user didn't specify an image_tag, we need to force update image_tag
+ # because it will have already been set during 3.8. If we aren't running
+ # a double upgrade, then we can preserve image_tag because it will still
+ # be the user provided value.
+ - set_fact:
+ l_force_image_tag_to_version: True
+ when:
+ - l_double_upgrade_cp is defined and l_double_upgrade_cp
+ - _requested_image_tag is not defined
- import_playbook: ../pre/config.yml
# These vars a meant to exclude oo_nodes from plays that would otherwise include
diff --git a/playbooks/gcp/openshift-cluster/build_base_image.yml b/playbooks/gcp/openshift-cluster/build_base_image.yml
index 75d0ddf9d..8e9b0024a 100644
--- a/playbooks/gcp/openshift-cluster/build_base_image.yml
+++ b/playbooks/gcp/openshift-cluster/build_base_image.yml
@@ -90,6 +90,8 @@
repo_gpgcheck: no
state: present
when: ansible_os_family == "RedHat"
+ - name: Accept GPG keys for the repos
+ command: yum -q makecache -y --disablerepo='*' --enablerepo='google-cloud,jdetiber-qemu-user-static'
- name: Install qemu-user-static
package:
name: qemu-user-static
@@ -121,7 +123,6 @@
with_items:
# required by Ansible
- PyYAML
- - docker
- google-compute-engine
- google-compute-engine-init
- google-config
diff --git a/playbooks/init/base_packages.yml b/playbooks/init/base_packages.yml
index e1052fb6c..0a730a88a 100644
--- a/playbooks/init/base_packages.yml
+++ b/playbooks/init/base_packages.yml
@@ -16,6 +16,7 @@
- iproute
- "{{ 'python3-dbus' if ansible_distribution == 'Fedora' else 'dbus-python' }}"
- "{{ 'python3-PyYAML' if ansible_distribution == 'Fedora' else 'PyYAML' }}"
+ - "{{ 'python-ipaddress' if ansible_distribution != 'Fedora' else omit }}"
- yum-utils
register: result
until: result is succeeded
diff --git a/playbooks/init/basic_facts.yml b/playbooks/init/basic_facts.yml
index 06a4e7291..a9bf06693 100644
--- a/playbooks/init/basic_facts.yml
+++ b/playbooks/init/basic_facts.yml
@@ -67,3 +67,11 @@
first_master_client_binary: "{{ openshift_client_binary }}"
#Some roles may require this to be set for first master
openshift_client_binary: "{{ openshift_client_binary }}"
+
+- name: Disable web console if required
+ hosts: oo_masters_to_config
+ gather_facts: no
+ tasks:
+ - set_fact:
+ openshift_web_console_install: False
+ when: openshift_deployment_subtype == 'registry' or ( osm_disabled_features is defined and 'WebConsole' in osm_disabled_features )
diff --git a/playbooks/openshift-prometheus/private/uninstall.yml b/playbooks/openshift-prometheus/private/uninstall.yml
new file mode 100644
index 000000000..2df39c2a8
--- /dev/null
+++ b/playbooks/openshift-prometheus/private/uninstall.yml
@@ -0,0 +1,8 @@
+---
+- name: Uninstall Prometheus
+ hosts: masters[0]
+ tasks:
+ - name: Run the Prometheus Uninstall Role Tasks
+ include_role:
+ name: openshift_prometheus
+ tasks_from: uninstall
diff --git a/playbooks/openshift-prometheus/uninstall.yml b/playbooks/openshift-prometheus/uninstall.yml
new file mode 100644
index 000000000..c92ade786
--- /dev/null
+++ b/playbooks/openshift-prometheus/uninstall.yml
@@ -0,0 +1,2 @@
+---
+- import_playbook: private/uninstall.yml
diff --git a/playbooks/openstack/inventory.py b/playbooks/openstack/inventory.py
index 76e658eb7..d5a8c3e24 100755
--- a/playbooks/openstack/inventory.py
+++ b/playbooks/openstack/inventory.py
@@ -15,18 +15,10 @@ import json
import shade
-def build_inventory():
- '''Build the dynamic inventory.'''
- cloud = shade.openstack_cloud()
-
+def base_openshift_inventory(cluster_hosts):
+ '''Set the base openshift inventory.'''
inventory = {}
- # TODO(shadower): filter the servers based on the `OPENSHIFT_CLUSTER`
- # environment variable.
- cluster_hosts = [
- server for server in cloud.list_servers()
- if 'metadata' in server and 'clusterid' in server.metadata]
-
masters = [server.name for server in cluster_hosts
if server.metadata['host-type'] == 'master']
@@ -67,6 +59,34 @@ def build_inventory():
inventory['dns'] = {'hosts': dns}
inventory['lb'] = {'hosts': load_balancers}
+ return inventory
+
+
+def get_docker_storage_mountpoints(volumes):
+ '''Check volumes to see if they're being used for docker storage'''
+ docker_storage_mountpoints = {}
+ for volume in volumes:
+ if volume.metadata.get('purpose') == "openshift_docker_storage":
+ for attachment in volume.attachments:
+ if attachment.server_id in docker_storage_mountpoints:
+ docker_storage_mountpoints[attachment.server_id].append(attachment.device)
+ else:
+ docker_storage_mountpoints[attachment.server_id] = [attachment.device]
+ return docker_storage_mountpoints
+
+
+def build_inventory():
+ '''Build the dynamic inventory.'''
+ cloud = shade.openstack_cloud()
+
+ # TODO(shadower): filter the servers based on the `OPENSHIFT_CLUSTER`
+ # environment variable.
+ cluster_hosts = [
+ server for server in cloud.list_servers()
+ if 'metadata' in server and 'clusterid' in server.metadata]
+
+ inventory = base_openshift_inventory(cluster_hosts)
+
for server in cluster_hosts:
if 'group' in server.metadata:
group = server.metadata.group
@@ -76,6 +96,9 @@ def build_inventory():
inventory['_meta'] = {'hostvars': {}}
+ # cinder volumes used for docker storage
+ docker_storage_mountpoints = get_docker_storage_mountpoints(cloud.list_volumes())
+
for server in cluster_hosts:
ssh_ip_address = server.public_v4 or server.private_v4
hostvars = {
@@ -111,6 +134,11 @@ def build_inventory():
if node_labels:
hostvars['openshift_node_labels'] = node_labels
+ # check for attached docker storage volumes
+ if 'os-extended-volumes:volumes_attached' in server:
+ if server.id in docker_storage_mountpoints:
+ hostvars['docker_storage_mountpoints'] = ' '.join(docker_storage_mountpoints[server.id])
+
inventory['_meta']['hostvars'][server.name] = hostvars
return inventory
diff --git a/roles/container_runtime/defaults/main.yml b/roles/container_runtime/defaults/main.yml
index 8203d15f5..7397e2bec 100644
--- a/roles/container_runtime/defaults/main.yml
+++ b/roles/container_runtime/defaults/main.yml
@@ -64,7 +64,7 @@ docker_storage_setup_options:
root_lv_mount_path: "{{ docker_storage_path }}"
docker_storage_extra_options:
- "--storage-opt overlay2.override_kernel_check=true"
-- "--storage-opt overlay2.size={{ docker_storage_size }}"
+- "{{ '--storage-opt overlay2.size=' ~ docker_storage_size if container_runtime_docker_storage_setup_device is defined and container_runtime_docker_storage_setup_device != '' else '' }}"
- "--graph={{ docker_storage_path}}"
@@ -117,7 +117,7 @@ l_crio_image: "{{ openshift_crio_systemcontainer_image_override | default(l_crio
# ----------------------- #
l_crt_docker_image_dict:
Fedora: "registry.fedoraproject.org/latest/docker"
- Centos: "registry.centos.org/projectatomic/docker"
+ CentOS: "registry.centos.org/projectatomic/docker"
RedHat: "registry.access.redhat.com/openshift3/container-engine"
openshift_docker_image_tag_default: "latest"
diff --git a/roles/container_runtime/templates/docker_storage_setup.j2 b/roles/container_runtime/templates/docker_storage_setup.j2
index b056087e0..ec540ea44 100644
--- a/roles/container_runtime/templates/docker_storage_setup.j2
+++ b/roles/container_runtime/templates/docker_storage_setup.j2
@@ -2,6 +2,7 @@
# /usr/lib/docker-storage-setup/docker-storage-setup.
#
# For more details refer to "man docker-storage-setup"
+{% if container_runtime_docker_storage_setup_device is defined and container_runtime_docker_storage_setup_device != '' %}
DEVS={{ container_runtime_docker_storage_setup_device }}
VG={{ docker_storage_setup_options.vg }}
DATA_SIZE={{ docker_storage_setup_options.data_size }}
@@ -9,4 +10,7 @@ STORAGE_DRIVER="{{ docker_storage_setup_options.storage_driver }}"
CONTAINER_ROOT_LV_NAME="{{ docker_storage_setup_options.root_lv_name }}"
CONTAINER_ROOT_LV_SIZE="{{ docker_storage_setup_options.root_lv_size }}"
CONTAINER_ROOT_LV_MOUNT_PATH="{{ docker_storage_setup_options.root_lv_mount_path }}"
+{% else %}
+STORAGE_DRIVER="{{ docker_storage_setup_options.storage_driver }}"
+{% endif %}
EXTRA_STORAGE_OPTIONS="{{ docker_storage_extra_options | join(' ') }}"
diff --git a/roles/lib_utils/library/docker_creds.py b/roles/lib_utils/library/docker_creds.py
index b94c0b779..936fb1c38 100644
--- a/roles/lib_utils/library/docker_creds.py
+++ b/roles/lib_utils/library/docker_creds.py
@@ -148,10 +148,12 @@ def update_config(docker_config, registry, username, password):
def write_config(module, docker_config, dest):
'''Write updated credentials into dest/config.json'''
+ if not isinstance(docker_config, dict):
+ docker_config = docker_config.decode()
conf_file_path = os.path.join(dest, 'config.json')
try:
with open(conf_file_path, 'w') as conf_file:
- json.dump(docker_config.decode(), conf_file, indent=8)
+ json.dump(docker_config, conf_file, indent=8)
except IOError as ioerror:
result = {'failed': True,
'changed': False,
diff --git a/roles/lib_utils/library/openshift_container_binary_sync.py b/roles/lib_utils/library/openshift_container_binary_sync.py
index 440b8ec28..efdfcf1c7 100644
--- a/roles/lib_utils/library/openshift_container_binary_sync.py
+++ b/roles/lib_utils/library/openshift_container_binary_sync.py
@@ -107,7 +107,7 @@ class BinarySyncer(object):
self._sync_binary('oc')
# Ensure correct symlinks created:
- self._sync_symlink('kubectl', 'openshift')
+ self._sync_symlink('kubectl', 'oc')
# Remove old oadm binary
if os.path.exists(os.path.join(self.bin_dir, 'oadm')):
diff --git a/roles/nuage_master/tasks/etcd_certificates.yml b/roles/nuage_master/tasks/etcd_certificates.yml
new file mode 100644
index 000000000..99ec27f91
--- /dev/null
+++ b/roles/nuage_master/tasks/etcd_certificates.yml
@@ -0,0 +1,21 @@
+---
+- name: Generate openshift etcd certs
+ become: yes
+ include_role:
+ name: etcd
+ tasks_from: client_certificates
+ vars:
+ etcd_cert_prefix: nuageEtcd-
+ etcd_cert_config_dir: "{{ cert_output_dir }}"
+ embedded_etcd: "{{ hostvars[groups.oo_first_master.0].openshift.master.embedded_etcd }}"
+ etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
+ etcd_cert_subdir: "openshift-nuage-{{ openshift.common.hostname }}"
+
+
+- name: Error if etcd certs are not copied
+ stat:
+ path: "{{ item }}"
+ with_items:
+ - "{{ cert_output_dir }}/nuageEtcd-ca.crt"
+ - "{{ cert_output_dir }}/nuageEtcd-client.crt"
+ - "{{ cert_output_dir }}/nuageEtcd-client.key"
diff --git a/roles/nuage_master/tasks/main.yaml b/roles/nuage_master/tasks/main.yaml
index 29e16b6f8..a1781dc56 100644
--- a/roles/nuage_master/tasks/main.yaml
+++ b/roles/nuage_master/tasks/main.yaml
@@ -81,6 +81,7 @@
- nuage.key
- nuage.kubeconfig
+- include_tasks: etcd_certificates.yml
- include_tasks: certificates.yml
- name: Install Nuage VSD user certificate
@@ -99,7 +100,16 @@
become: yes
template: src=nuage-node-config-daemonset.j2 dest=/etc/nuage-node-config-daemonset.yaml owner=root mode=0644
-- name: Add the service account to the privileged scc to have root permissions
+- name: Create Nuage Infra Pod daemon set yaml file
+ become: yes
+ template: src=nuage-infra-pod-config-daemonset.j2 dest=/etc/nuage-infra-pod-config-daemonset.yaml owner=root mode=0644
+
+- name: Add the service account to the privileged scc to have root permissions for kube-system
+ shell: oc adm policy add-scc-to-user privileged system:serviceaccount:kube-system:daemon-set-controller
+ ignore_errors: true
+ when: inventory_hostname == groups.oo_first_master.0
+
+- name: Add the service account to the privileged scc to have root permissions for openshift-infra
shell: oc adm policy add-scc-to-user privileged system:serviceaccount:openshift-infra:daemonset-controller
ignore_errors: true
when: inventory_hostname == groups.oo_first_master.0
@@ -114,6 +124,11 @@
ignore_errors: true
when: inventory_hostname == groups.oo_first_master.0
+- name: Spawn Nuage Infra daemon sets pod
+ shell: oc create -f /etc/nuage-infra-pod-config-daemonset.yaml
+ ignore_errors: true
+ when: inventory_hostname == groups.oo_first_master.0
+
- name: Restart daemons
command: /bin/true
notify:
diff --git a/roles/nuage_master/templates/nuage-infra-pod-config-daemonset.j2 b/roles/nuage_master/templates/nuage-infra-pod-config-daemonset.j2
new file mode 100755
index 000000000..534a1517f
--- /dev/null
+++ b/roles/nuage_master/templates/nuage-infra-pod-config-daemonset.j2
@@ -0,0 +1,39 @@
+# This manifest installs Nuage Infra pod on
+# each worker node in an Openshift cluster.
+kind: DaemonSet
+apiVersion: extensions/v1beta1
+metadata:
+ name: nuage-infra-ds
+ namespace: kube-system
+ labels:
+ k8s-app: nuage-infra-ds
+spec:
+ selector:
+ matchLabels:
+ k8s-app: nuage-infra-ds
+ updateStrategy:
+ type: RollingUpdate
+ template:
+ metadata:
+ labels:
+ k8s-app: nuage-infra-ds
+ spec:
+ tolerations:
+ - key: node-role.kubernetes.io/master
+ effect: NoSchedule
+ operator: Exists
+ containers:
+ # This container spawns a Nuage Infra pod
+ # on each worker node
+ - name: install-nuage-infra
+ image: nuage/infra:{{ nuage_infra_container_image_version }}
+ command: ["/install-nuage-infra-pod.sh"]
+ securityContext:
+ privileged: true
+ volumeMounts:
+ - mountPath: /var/log
+ name: log-dir
+ volumes:
+ - name: log-dir
+ hostPath:
+ path: /var/log
diff --git a/roles/nuage_master/templates/nuage-master-config-daemonset.j2 b/roles/nuage_master/templates/nuage-master-config-daemonset.j2
index 7be5d6743..3543eeb56 100755
--- a/roles/nuage_master/templates/nuage-master-config-daemonset.j2
+++ b/roles/nuage_master/templates/nuage-master-config-daemonset.j2
@@ -37,11 +37,14 @@ data:
nuageMonServer:
URL: 0.0.0.0:9443
certificateDirectory: {{ nuage_master_crt_dir }}
+ clientCA: ""
+ serverCertificate: ""
+ serverKey: ""
# etcd config required for HA
etcdClientConfig:
- ca: {{ nuage_master_crt_dir }}/nuageMonCA.crt
- certFile: {{ nuage_master_crt_dir }}/nuageMonServer.crt
- keyFile: {{ nuage_master_crt_dir }}/master.etcd-client.key
+ ca: {{ nuage_master_crt_dir }}/nuageEtcd-ca.crt
+ certFile: {{ nuage_master_crt_dir }}/nuageEtcd-client.crt
+ keyFile: {{ nuage_master_crt_dir }}/nuageEtcd-client.key
urls:
{% for etcd_url in openshift.master.etcd_urls %}
- {{ etcd_url }}
diff --git a/roles/nuage_master/templates/nuage-node-config-daemonset.j2 b/roles/nuage_master/templates/nuage-node-config-daemonset.j2
index 6a1267d94..996a2d2b0 100755
--- a/roles/nuage_master/templates/nuage-node-config-daemonset.j2
+++ b/roles/nuage_master/templates/nuage-node-config-daemonset.j2
@@ -61,6 +61,8 @@ spec:
selector:
matchLabels:
k8s-app: nuage-cni-ds
+ updateStrategy:
+ type: RollingUpdate
template:
metadata:
labels:
@@ -104,6 +106,8 @@ spec:
- mountPath: /var/log
name: cni-log-dir
- mountPath: {{ nuage_node_config_dsets_mount_dir }}
+ name: var-usr-share-dir
+ - mountPath: /usr/share/
name: usr-share-dir
volumes:
- name: cni-bin-dir
@@ -121,9 +125,12 @@ spec:
- name: cni-log-dir
hostPath:
path: /var/log
- - name: usr-share-dir
+ - name: var-usr-share-dir
hostPath:
path: {{ nuage_node_config_dsets_mount_dir }}
+ - name: usr-share-dir
+ hostPath:
+ path: /usr/share/
---
@@ -164,7 +171,7 @@ spec:
- name: NUAGE_PLATFORM
value: '"kvm, k8s"'
- name: NUAGE_K8S_SERVICE_IPV4_SUBNET
- value: '192.168.0.0\/16'
+ value: '172.30.0.0\/16'
- name: NUAGE_NETWORK_UPLINK_INTF
value: "eth0"
volumeMounts:
diff --git a/roles/nuage_master/vars/main.yaml b/roles/nuage_master/vars/main.yaml
index 114514d7c..5045e1cc5 100644
--- a/roles/nuage_master/vars/main.yaml
+++ b/roles/nuage_master/vars/main.yaml
@@ -26,9 +26,10 @@ nuage_master_config_dsets_mount_dir: /usr/share/
nuage_node_config_dsets_mount_dir: /usr/share/
nuage_cni_bin_dsets_mount_dir: /opt/cni/bin
nuage_cni_netconf_dsets_mount_dir: /etc/cni/net.d
-nuage_monitor_container_image_version: "{{ nuage_monitor_image_version | default('v5.1.1') }}"
-nuage_vrs_container_image_version: "{{ nuage_vrs_image_version | default('v5.1.1') }}"
-nuage_cni_container_image_version: "{{ nuage_cni_image_version | default('v5.1.1') }}"
+nuage_monitor_container_image_version: "{{ nuage_monitor_image_version | default('v5.2.1') }}"
+nuage_vrs_container_image_version: "{{ nuage_vrs_image_version | default('v5.2.1') }}"
+nuage_cni_container_image_version: "{{ nuage_cni_image_version | default('v5.2.1') }}"
+nuage_infra_container_image_version: "{{ nuage_infra_image_version | default('v5.2.1') }}"
api_server_url: "{{ hostvars[groups.oo_first_master.0].openshift.master.api_url }}"
nuage_vport_mtu: "{{ nuage_interface_mtu | default('1460') }}"
master_host_type: "{{ master_base_host_type | default('is_rhel_server') }}"
diff --git a/roles/openshift_aws/defaults/main.yml b/roles/openshift_aws/defaults/main.yml
index e14d57702..178e0849c 100644
--- a/roles/openshift_aws/defaults/main.yml
+++ b/roles/openshift_aws/defaults/main.yml
@@ -42,60 +42,77 @@ openshift_aws_ami_tags:
openshift_aws_s3_mode: create
openshift_aws_s3_bucket_name: "{{ openshift_aws_clusterid }}-docker-registry"
-openshift_aws_elb_health_check:
- ping_protocol: tcp
- ping_port: 443
- response_timeout: 5
- interval: 30
- unhealthy_threshold: 2
- healthy_threshold: 2
-
openshift_aws_elb_basename: "{{ openshift_aws_clusterid }}"
-openshift_aws_elb_name_dict:
- master:
- external: "{{ openshift_aws_elb_basename }}-master-external"
- internal: "{{ openshift_aws_elb_basename }}-master-internal"
- infra:
- external: "{{ openshift_aws_elb_basename }}-infra"
-
-openshift_aws_elb_idle_timout: 400
-
-openshift_aws_elb_cert_arn: ''
openshift_aws_elb_dict:
master:
external:
- - protocol: tcp
- load_balancer_port: 80
- instance_protocol: ssl
- instance_port: 443
- - protocol: ssl
- load_balancer_port: 443
- instance_protocol: ssl
- instance_port: 443
- # ssl certificate required for https or ssl
- ssl_certificate_id: "{{ openshift_aws_elb_cert_arn }}"
+ cross_az_load_balancing: False
+ health_check:
+ ping_protocol: tcp
+ ping_port: "{{ openshift_master_api_port | default(8443) }}"
+ response_timeout: 5
+ interval: 30
+ unhealthy_threshold: 2
+ healthy_threshold: 2
+ idle_timout: 400
+ listeners:
+ - protocol: tcp
+ load_balancer_port: 80
+ instance_protocol: ssl
+ instance_port: "{{ openshift_master_api_port | default(8443) }}"
+ - protocol: ssl
+ load_balancer_port: "{{ openshift_master_api_port | default(8443) }}"
+ instance_protocol: ssl
+ instance_port: "{{ openshift_master_api_port | default(8443) }}"
+ ssl_certificate_id: ''
+ name: "{{ openshift_aws_elb_basename }}-master-external"
+ tags: "{{ openshift_aws_kube_tags }}"
internal:
- - protocol: tcp
- load_balancer_port: 80
- instance_protocol: tcp
- instance_port: 80
- - protocol: tcp
- load_balancer_port: 443
- instance_protocol: tcp
- instance_port: 443
+ cross_az_load_balancing: False
+ health_check:
+ ping_protocol: tcp
+ ping_port: "{{ openshift_master_api_port | default(8443) }}"
+ response_timeout: 5
+ interval: 30
+ unhealthy_threshold: 2
+ healthy_threshold: 2
+ idle_timout: 400
+ listeners:
+ - protocol: tcp
+ load_balancer_port: 80
+ instance_protocol: tcp
+ instance_port: 80
+ - protocol: tcp
+ load_balancer_port: "{{ openshift_master_api_port | default(8443) }}"
+ instance_protocol: tcp
+ instance_port: "{{ openshift_master_api_port | default(8443) }}"
+ name: "{{ openshift_aws_elb_basename }}-master-internal"
+ tags: "{{ openshift_aws_kube_tags }}"
infra:
external:
- - protocol: tcp
- load_balancer_port: 80
- instance_protocol: tcp
- instance_port: 443
- proxy_protocol: True
- - protocol: tcp
- load_balancer_port: 443
- instance_protocol: tcp
- instance_port: 443
- proxy_protocol: True
+ cross_az_load_balancing: False
+ health_check:
+ ping_protocol: tcp
+ ping_port: 443
+ response_timeout: 5
+ interval: 30
+ unhealthy_threshold: 2
+ healthy_threshold: 2
+ idle_timout: 400
+ listeners:
+ - protocol: tcp
+ load_balancer_port: 80
+ instance_protocol: tcp
+ instance_port: 443
+ proxy_protocol: True
+ - protocol: tcp
+ load_balancer_port: 443
+ instance_protocol: tcp
+ instance_port: 443
+ proxy_protocol: True
+ name: "{{ openshift_aws_elb_basename }}-infra"
+ tags: "{{ openshift_aws_kube_tags }}"
openshift_aws_node_group_config_master_volumes:
- device_name: /dev/sda1
@@ -172,7 +189,7 @@ openshift_aws_master_group_config:
iam_role: "{{ openshift_aws_iam_role_name }}"
policy_name: "{{ openshift_aws_iam_role_policy_name }}"
policy_json: "{{ openshift_aws_iam_role_policy_json }}"
- elbs: "{{ openshift_aws_elb_name_dict['master'].keys()| map('extract', openshift_aws_elb_name_dict['master']) | list }}"
+ elbs: "{{ openshift_aws_elb_dict | json_query('master.[*][0][*].name') }}"
openshift_aws_node_group_config:
# The 'compute' key is always required here.
@@ -205,10 +222,7 @@ openshift_aws_node_group_config:
iam_role: "{{ openshift_aws_iam_role_name }}"
policy_name: "{{ openshift_aws_iam_role_policy_name }}"
policy_json: "{{ openshift_aws_iam_role_policy_json }}"
- elbs: "{{ openshift_aws_elb_name_dict['infra'].keys()| map('extract', openshift_aws_elb_name_dict['infra']) | list }}"
-
-openshift_aws_elb_tags: "{{ openshift_aws_kube_tags }}"
-openshift_aws_elb_az_load_balancing: False
+ elbs: "{{ openshift_aws_elb_dict | json_query('infra.[*][0][*].name') }}"
# build_instance_tags is a custom filter in role lib_utils
openshift_aws_kube_tags: "{{ openshift_aws_clusterid | build_instance_tags }}"
@@ -253,8 +267,8 @@ openshift_aws_node_security_groups:
to_port: 80
cidr_ip: 0.0.0.0/0
- proto: tcp
- from_port: 443
- to_port: 443
+ from_port: "{{ openshift_master_api_port | default(8443) }}"
+ to_port: "{{ openshift_master_api_port | default(8443) }}"
cidr_ip: 0.0.0.0/0
compute:
name: "{{ openshift_aws_clusterid }}_compute"
@@ -268,8 +282,8 @@ openshift_aws_node_security_groups:
to_port: 80
cidr_ip: 0.0.0.0/0
- proto: tcp
- from_port: 443
- to_port: 443
+ from_port: "{{ openshift_master_api_port | default(8443) }}"
+ to_port: "{{ openshift_master_api_port | default(8443) }}"
cidr_ip: 0.0.0.0/0
- proto: tcp
from_port: 30000
diff --git a/roles/openshift_aws/tasks/elb.yml b/roles/openshift_aws/tasks/elb.yml
index 6f0028a3d..d8257cf31 100644
--- a/roles/openshift_aws/tasks/elb.yml
+++ b/roles/openshift_aws/tasks/elb.yml
@@ -5,18 +5,18 @@
- name: "Create ELB {{ l_elb_dict_item.key }}"
ec2_elb_lb:
- name: "{{ l_openshift_aws_elb_name_dict[l_elb_dict_item.key][item.key] }}"
+ name: "{{ item.value.name }}"
state: present
- cross_az_load_balancing: "{{ openshift_aws_elb_az_load_balancing }}"
+ cross_az_load_balancing: "{{ item.value.cross_az_load_balancing }}"
security_group_names: "{{ l_elb_security_groups[l_elb_dict_item.key] }}"
- idle_timeout: "{{ openshift_aws_elb_idle_timout }}"
+ idle_timeout: "{{ item.value.idle_timout }}"
region: "{{ openshift_aws_region }}"
subnets:
- "{{ subnetout.subnets[0].id }}"
- health_check: "{{ openshift_aws_elb_health_check }}"
- listeners: "{{ item.value }}"
+ health_check: "{{ item.value.health_check }}"
+ listeners: "{{ item.value.listeners }}"
scheme: "{{ (item.key == 'internal') | ternary('internal','internet-facing') }}"
- tags: "{{ openshift_aws_elb_tags }}"
+ tags: "{{ item.value.tags }}"
wait: True
register: new_elb
with_dict: "{{ l_elb_dict_item.value }}"
diff --git a/roles/openshift_aws/tasks/master_facts.yml b/roles/openshift_aws/tasks/master_facts.yml
index 530b0134d..c2e362acd 100644
--- a/roles/openshift_aws/tasks/master_facts.yml
+++ b/roles/openshift_aws/tasks/master_facts.yml
@@ -3,7 +3,7 @@
ec2_elb_facts:
region: "{{ openshift_aws_region }}"
names:
- - "{{ openshift_aws_elb_name_dict['master']['internal'] }}"
+ - "{{ openshift_aws_elb_dict['master']['internal']['name'] }}"
delegate_to: localhost
register: elbs
diff --git a/roles/openshift_aws/tasks/provision_elb.yml b/roles/openshift_aws/tasks/provision_elb.yml
index a52f63bd5..fcc49c3ea 100644
--- a/roles/openshift_aws/tasks/provision_elb.yml
+++ b/roles/openshift_aws/tasks/provision_elb.yml
@@ -10,6 +10,5 @@
with_dict: "{{ openshift_aws_elb_dict }}"
vars:
l_elb_security_groups: "{{ openshift_aws_elb_security_groups }}"
- l_openshift_aws_elb_name_dict: "{{ openshift_aws_elb_name_dict }}"
loop_control:
loop_var: l_elb_dict_item
diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py
index d6d31effd..452cc4ef6 100755
--- a/roles/openshift_facts/library/openshift_facts.py
+++ b/roles/openshift_facts/library/openshift_facts.py
@@ -15,8 +15,10 @@ import os
import yaml
import struct
import socket
+import ipaddress
from distutils.util import strtobool
from distutils.version import LooseVersion
+from ansible.module_utils.six import u
from ansible.module_utils.six import string_types
from ansible.module_utils.six.moves import configparser
@@ -1146,6 +1148,8 @@ def set_proxy_facts(facts):
if 'no_proxy_internal_hostnames' in common:
common['no_proxy'].extend(common['no_proxy_internal_hostnames'].split(','))
# We always add local dns domain and ourselves no matter what
+ kube_svc_ip = str(ipaddress.ip_network(u(common['portal_net']))[1])
+ common['no_proxy'].append(kube_svc_ip)
common['no_proxy'].append('.' + common['dns_domain'])
common['no_proxy'].append('.svc')
common['no_proxy'].append(common['hostname'])
diff --git a/roles/openshift_logging/tasks/install_logging.yaml b/roles/openshift_logging/tasks/install_logging.yaml
index e4883bfa0..1b4bdb11f 100644
--- a/roles/openshift_logging/tasks/install_logging.yaml
+++ b/roles/openshift_logging/tasks/install_logging.yaml
@@ -71,10 +71,17 @@
- set_fact: openshift_logging_es_pvc_prefix="logging-es"
when: openshift_logging_es_pvc_prefix == ""
+# Using this module for setting this fact because otherwise we were getting a value of "" trying to
+# use default() in the set_fact after this which caused us to not correctly evaluate
+# openshift_logging_elasticsearch_storage_type
+- conditional_set_fact:
+ facts: "{{ hostvars[inventory_hostname] }}"
+ vars:
+ elasticsearch_storage_type: openshift_logging_elasticsearch_storage_type
+
- set_fact:
- elasticsearch_storage_type: "{{ openshift_logging_elasticsearch_storage_type | default('pvc' if ( openshift_logging_es_pvc_dynamic | bool or openshift_hosted_logging_storage_kind | default('') == 'nfs' or openshift_logging_es_pvc_size | length > 0) else 'emptydir') }}"
+ default_elasticsearch_storage_type: "{{ 'pvc' if ( openshift_logging_es_pvc_dynamic | bool or openshift_logging_storage_kind | default('') == 'nfs' or openshift_logging_es_pvc_size | length > 0) else 'emptydir' }}"
-# We don't allow scaling down of ES nodes currently
- include_role:
name: openshift_logging_elasticsearch
vars:
@@ -85,7 +92,8 @@
openshift_logging_elasticsearch_pvc_size: "{{ openshift_logging_es_pvc_size }}"
openshift_logging_elasticsearch_replica_count: "{{ openshift_logging_es_cluster_size | int }}"
- openshift_logging_elasticsearch_storage_type: "{{ elasticsearch_storage_type }}"
+ openshift_logging_elasticsearch_storage_type: "{{ elasticsearch_storage_type | default('pvc' if outer_item.0.volumes['elasticsearch-storage'].persistentVolumeClaim is defined else 'hostmount' if outer_item.0.volumes['elasticsearch-storage'].hostPath is defined else 'emptydir' if outer_item.0.volumes['elasticsearch-storage'].emptyDir is defined else default_elasticsearch_storage_type) }}"
+ openshift_logging_elasticsearch_hostmount_path: "{{ outer_item.0.volumes['elasticsearch-storage'].hostPath.path if outer_item.0.volumes['elasticsearch-storage'].hostPath is defined else '' }}"
openshift_logging_elasticsearch_pvc_pv_selector: "{{ openshift_logging_es_pv_selector }}"
openshift_logging_elasticsearch_pvc_storage_class_name: "{{ openshift_logging_es_pvc_storage_class_name | default() }}"
openshift_logging_elasticsearch_nodeselector: "{{ openshift_logging_es_nodeselector if outer_item.0.nodeSelector | default(None) is none else outer_item.0.nodeSelector }}"
@@ -112,7 +120,7 @@
openshift_logging_elasticsearch_pvc_size: "{{ openshift_logging_es_pvc_size }}"
openshift_logging_elasticsearch_replica_count: "{{ openshift_logging_es_cluster_size | int }}"
- openshift_logging_elasticsearch_storage_type: "{{ elasticsearch_storage_type }}"
+ openshift_logging_elasticsearch_storage_type: "{{ elasticsearch_storage_type | default(default_elasticsearch_storage_type) }}"
openshift_logging_elasticsearch_pvc_pv_selector: "{{ openshift_logging_es_pv_selector }}"
openshift_logging_elasticsearch_pvc_storage_class_name: "{{ openshift_logging_es_pvc_storage_class_name | default() }}"
@@ -133,7 +141,7 @@
when: openshift_logging_es_ops_pvc_prefix == ""
- set_fact:
- elasticsearch_storage_type: "{{ openshift_logging_elasticsearch_storage_type | default('pvc' if ( openshift_logging_es_ops_pvc_dynamic | bool or openshift_hosted_logging_storage_kind | default('') == 'nfs' or openshift_logging_es_ops_pvc_size | length > 0) else 'emptydir') }}"
+ default_elasticsearch_storage_type: "{{ 'pvc' if ( openshift_logging_es_ops_pvc_dynamic | bool or openshift_logging_storage_kind | default('') == 'nfs' or openshift_logging_es_ops_pvc_size | length > 0) else 'emptydir' }}"
when:
- openshift_logging_use_ops | bool
@@ -147,7 +155,8 @@
openshift_logging_elasticsearch_ops_deployment: true
openshift_logging_elasticsearch_replica_count: "{{ openshift_logging_es_ops_cluster_size | int }}"
- openshift_logging_elasticsearch_storage_type: "{{ elasticsearch_storage_type }}"
+ openshift_logging_elasticsearch_storage_type: "{{ elasticsearch_storage_type | default('pvc' if outer_item.0.volumes['elasticsearch-storage'].persistentVolumeClaim is defined else 'hostmount' if outer_item.0.volumes['elasticsearch-storage'].hostPath is defined else 'emptydir' if outer_item.0.volumes['elasticsearch-storage'].emptyDir is defined else default_elasticsearch_storage_type) }}"
+ openshift_logging_elasticsearch_hostmount_path: "{{ outer_item.0.volumes['elasticsearch-storage'].hostPath.path if outer_item.0.volumes['elasticsearch-storage'].hostPath is defined else '' }}"
openshift_logging_elasticsearch_pvc_size: "{{ openshift_logging_es_ops_pvc_size }}"
openshift_logging_elasticsearch_pvc_dynamic: "{{ openshift_logging_es_ops_pvc_dynamic }}"
openshift_logging_elasticsearch_pvc_pv_selector: "{{ openshift_logging_es_ops_pv_selector }}"
@@ -189,7 +198,7 @@
openshift_logging_elasticsearch_ops_deployment: true
openshift_logging_elasticsearch_replica_count: "{{ openshift_logging_es_ops_cluster_size | int }}"
- openshift_logging_elasticsearch_storage_type: "{{ elasticsearch_storage_type }}"
+ openshift_logging_elasticsearch_storage_type: "{{ elasticsearch_storage_type | default(default_elasticsearch_storage_type) }}"
openshift_logging_elasticsearch_pvc_size: "{{ openshift_logging_es_ops_pvc_size }}"
openshift_logging_elasticsearch_pvc_dynamic: "{{ openshift_logging_es_ops_pvc_dynamic }}"
openshift_logging_elasticsearch_pvc_pv_selector: "{{ openshift_logging_es_ops_pv_selector }}"
diff --git a/roles/openshift_management/defaults/main.yml b/roles/openshift_management/defaults/main.yml
index b5e234b7f..57bc97e3e 100644
--- a/roles/openshift_management/defaults/main.yml
+++ b/roles/openshift_management/defaults/main.yml
@@ -15,6 +15,8 @@ openshift_management_pod_rollout_retries: 30
#
# Choose 'miq-template' for a podified database install
# Choose 'miq-template-ext-db' for an external database install
+# TODO: Swap this var declaration once CFME is fully supported
+#openshift_management_app_template: "{{ 'cfme-template' if openshift_deployment_type == 'openshift-enterprise' else 'miq-template' }}"
openshift_management_app_template: miq-template
# If you are using the miq-template-ext-db template then you must add
# the required database parameters to the
diff --git a/roles/openshift_management/tasks/accounts.yml b/roles/openshift_management/tasks/accounts.yml
index e45ea8d43..80318fec0 100644
--- a/roles/openshift_management/tasks/accounts.yml
+++ b/roles/openshift_management/tasks/accounts.yml
@@ -5,14 +5,14 @@
oc_serviceaccount:
namespace: "{{ openshift_management_project }}"
state: present
- name: "{{ openshift_management_flavor_short }}{{ item.name }}"
+ name: "{{ __openshift_management_flavor_short }}{{ item.name }}"
with_items:
- "{{ __openshift_system_account_sccs }}"
- name: Ensure the CFME system accounts have all the required SCCs
oc_adm_policy_user:
namespace: "{{ openshift_management_project }}"
- user: "system:serviceaccount:{{ openshift_management_project }}:{{ openshift_management_flavor_short }}{{ item.name }}"
+ user: "system:serviceaccount:{{ openshift_management_project }}:{{ __openshift_management_flavor_short }}{{ item.name }}"
resource_kind: scc
resource_name: "{{ item.resource_name }}"
with_items:
@@ -21,7 +21,7 @@
- name: Ensure the CFME system accounts have the required roles
oc_adm_policy_user:
namespace: "{{ openshift_management_project }}"
- user: "system:serviceaccount:{{ openshift_management_project }}:{{ openshift_management_flavor_short }}{{ item.name }}"
+ user: "system:serviceaccount:{{ openshift_management_project }}:{{ __openshift_management_flavor_short }}{{ item.name }}"
resource_kind: role
resource_name: "{{ item.resource_name }}"
with_items:
diff --git a/roles/openshift_management/tasks/main.yml b/roles/openshift_management/tasks/main.yml
index c4b204b98..5209eba56 100644
--- a/roles/openshift_management/tasks/main.yml
+++ b/roles/openshift_management/tasks/main.yml
@@ -71,15 +71,15 @@
# CREATE APP
- name: Note the correct ext-db template name
set_fact:
- openshift_management_template_name: "{{ openshift_management_flavor }}-ext-db"
+ openshift_management_template_name: "{{ __openshift_management_flavor }}-ext-db"
when:
- - openshift_management_app_template in ['miq-template-ext-db', 'cfme-template-ext-db']
+ - __openshift_management_use_ext_db
- name: Note the correct podified db template name
set_fact:
- openshift_management_template_name: "{{ openshift_management_flavor }}"
+ openshift_management_template_name: "{{ __openshift_management_flavor }}"
when:
- - openshift_management_app_template in ['miq-template', 'cfme-template']
+ - not __openshift_management_use_ext_db
- name: Ensure the Management App is created
oc_process:
@@ -89,7 +89,7 @@
params: "{{ openshift_management_template_parameters }}"
- name: Wait for the app to come up. May take several minutes, 30s check intervals, {{ openshift_management_pod_rollout_retries }} retries
- command: "oc logs {{ openshift_management_flavor }}-0 -n {{ openshift_management_project }}"
+ command: "oc logs {{ __openshift_management_flavor }}-0 -n {{ openshift_management_project }}"
register: app_seeding_logs
until: app_seeding_logs.stdout.find('Server starting complete') != -1
delay: 30
diff --git a/roles/openshift_management/tasks/storage/create_nfs_pvs.yml b/roles/openshift_management/tasks/storage/create_nfs_pvs.yml
index d1b9a8d5c..1f8cac6c6 100644
--- a/roles/openshift_management/tasks/storage/create_nfs_pvs.yml
+++ b/roles/openshift_management/tasks/storage/create_nfs_pvs.yml
@@ -12,7 +12,7 @@
when:
- openshift_management_template_parameters.APPLICATION_VOLUME_CAPACITY is not defined
-- when: openshift_management_app_template in ['miq-template', 'cfme-template']
+- when: not __openshift_management_use_ext_db
block:
- name: Note the DB PV Size from Template Parameters
set_fact:
@@ -31,7 +31,7 @@
namespace: "{{ openshift_management_project }}"
state: list
kind: pv
- name: "{{ openshift_management_flavor_short }}-app"
+ name: "{{ __openshift_management_flavor_short }}-app"
register: miq_app_pv_check
- name: Check if the Management DB PV has been created
@@ -39,15 +39,15 @@
namespace: "{{ openshift_management_project }}"
state: list
kind: pv
- name: "{{ openshift_management_flavor_short }}-db"
+ name: "{{ __openshift_management_flavor_short }}-db"
register: miq_db_pv_check
when:
- - openshift_management_app_template in ['miq-template', 'cfme-template']
+ - not __openshift_management_use_ext_db
- name: Ensure the Management App PV is created
oc_process:
namespace: "{{ openshift_management_project }}"
- template_name: "{{ openshift_management_flavor }}-app-pv"
+ template_name: "{{ __openshift_management_flavor }}-app-pv"
create: True
params:
PV_SIZE: "{{ openshift_management_app_pv_size }}"
@@ -58,12 +58,12 @@
- name: Ensure the Management DB PV is created
oc_process:
namespace: "{{ openshift_management_project }}"
- template_name: "{{ openshift_management_flavor }}-db-pv"
+ template_name: "{{ __openshift_management_flavor }}-db-pv"
create: True
params:
PV_SIZE: "{{ openshift_management_db_pv_size }}"
BASE_PATH: "{{ openshift_management_storage_nfs_base_dir }}"
NFS_HOST: "{{ openshift_management_nfs_server }}"
when:
- - openshift_management_app_template in ['miq-template', 'cfme-template']
+ - not __openshift_management_use_ext_db
- miq_db_pv_check.results.results == [{}]
diff --git a/roles/openshift_management/tasks/storage/nfs.yml b/roles/openshift_management/tasks/storage/nfs.yml
index 9e3a4d43a..4a00efb1d 100644
--- a/roles/openshift_management/tasks/storage/nfs.yml
+++ b/roles/openshift_management/tasks/storage/nfs.yml
@@ -17,8 +17,8 @@
tasks_from: create_export
vars:
l_nfs_base_dir: "{{ openshift_management_storage_nfs_base_dir }}"
- l_nfs_export_config: "{{ openshift_management_flavor_short }}"
- l_nfs_export_name: "{{ openshift_management_flavor_short }}-app"
+ l_nfs_export_config: "{{ __openshift_management_flavor_short }}"
+ l_nfs_export_name: "{{ __openshift_management_flavor_short }}-app"
l_nfs_options: "*(rw,no_root_squash,no_wdelay)"
- name: Create the DB export
@@ -27,10 +27,10 @@
tasks_from: create_export
vars:
l_nfs_base_dir: "{{ openshift_management_storage_nfs_base_dir }}"
- l_nfs_export_config: "{{ openshift_management_flavor_short }}"
- l_nfs_export_name: "{{ openshift_management_flavor_short }}-db"
+ l_nfs_export_config: "{{ __openshift_management_flavor_short }}"
+ l_nfs_export_name: "{{ __openshift_management_flavor_short }}-db"
l_nfs_options: "*(rw,no_root_squash,no_wdelay)"
when:
- - openshift_management_app_template in ['miq-template', 'cfme-template']
+ - not __openshift_management_use_ext_db
delegate_to: "{{ openshift_management_nfs_server }}"
diff --git a/roles/openshift_management/tasks/template.yml b/roles/openshift_management/tasks/template.yml
index 9f97cdcb9..f40af7349 100644
--- a/roles/openshift_management/tasks/template.yml
+++ b/roles/openshift_management/tasks/template.yml
@@ -13,59 +13,59 @@
######################################################################
# STANDARD PODIFIED DATABASE TEMPLATE
-- when: openshift_management_app_template in ['miq-template', 'cfme-template']
+- when: not __openshift_management_use_ext_db
block:
- name: Check if the Management Server template has been created already
oc_obj:
namespace: "{{ openshift_management_project }}"
state: list
kind: template
- name: "{{ openshift_management_flavor }}"
+ name: "{{ __openshift_management_flavor }}"
register: miq_server_check
- when: miq_server_check.results.results == [{}]
block:
- name: Copy over Management Server template
copy:
- src: "templates/{{ openshift_management_flavor }}/{{ openshift_management_flavor_short }}-template.yaml"
+ src: "templates/{{ __openshift_management_flavor }}/{{ __openshift_management_flavor_short }}-template.yaml"
dest: "{{ template_dir }}/"
- name: Ensure Management Server Template is created
oc_obj:
namespace: "{{ openshift_management_project }}"
- name: "{{ openshift_management_flavor }}"
+ name: "{{ __openshift_management_flavor }}"
state: present
kind: template
files:
- - "{{ template_dir }}/{{ openshift_management_flavor_short }}-template.yaml"
+ - "{{ template_dir }}/{{ __openshift_management_flavor_short }}-template.yaml"
######################################################################
# EXTERNAL DATABASE TEMPLATE
-- when: openshift_management_app_template in ['miq-template-ext-db', 'cfme-template-ext-db']
+- when: __openshift_management_use_ext_db
block:
- name: Check if the Management Ext-DB Server template has been created already
oc_obj:
namespace: "{{ openshift_management_project }}"
state: list
kind: template
- name: "{{ openshift_management_flavor }}-ext-db"
+ name: "{{ __openshift_management_flavor }}-ext-db"
register: miq_ext_db_server_check
- when: miq_ext_db_server_check.results.results == [{}]
block:
- name: Copy over Management Ext-DB Server template
copy:
- src: "templates/{{ openshift_management_flavor }}/{{openshift_management_flavor_short}}-template-ext-db.yaml"
+ src: "templates/{{ __openshift_management_flavor }}/{{__openshift_management_flavor_short}}-template-ext-db.yaml"
dest: "{{ template_dir }}/"
- name: Ensure Management Ext-DB Server Template is created
oc_obj:
namespace: "{{ openshift_management_project }}"
- name: "{{ openshift_management_flavor }}-ext-db"
+ name: "{{ __openshift_management_flavor }}-ext-db"
state: present
kind: template
files:
- - "{{ template_dir }}/{{ openshift_management_flavor_short }}-template-ext-db.yaml"
+ - "{{ template_dir }}/{{ __openshift_management_flavor_short }}-template-ext-db.yaml"
# End app template creation.
######################################################################
@@ -79,50 +79,50 @@
namespace: "{{ openshift_management_project }}"
state: list
kind: template
- name: "{{ openshift_management_flavor }}-app-pv"
+ name: "{{ __openshift_management_flavor }}-app-pv"
register: miq_app_pv_check
- when: miq_app_pv_check.results.results == [{}]
block:
- name: Copy over Management App PV template
copy:
- src: "templates/{{ openshift_management_flavor }}/{{ openshift_management_flavor_short }}-pv-server-example.yaml"
+ src: "templates/{{ __openshift_management_flavor }}/{{ __openshift_management_flavor_short }}-pv-server-example.yaml"
dest: "{{ template_dir }}/"
- name: Ensure Management App PV Template is created
oc_obj:
namespace: "{{ openshift_management_project }}"
- name: "{{ openshift_management_flavor }}-app-pv"
+ name: "{{ __openshift_management_flavor }}-app-pv"
state: present
kind: template
files:
- - "{{ template_dir }}/{{ openshift_management_flavor_short }}-pv-server-example.yaml"
+ - "{{ template_dir }}/{{ __openshift_management_flavor_short }}-pv-server-example.yaml"
#---------------------------------------------------------------------
# Required for database if the installation is fully podified
-- when: openshift_management_app_template in ['miq-template', 'cfme-template']
+- when: not __openshift_management_use_ext_db
block:
- name: Check if the Management DB PV template has been created already
oc_obj:
namespace: "{{ openshift_management_project }}"
state: list
kind: template
- name: "{{ openshift_management_flavor }}-db-pv"
+ name: "{{ __openshift_management_flavor }}-db-pv"
register: miq_db_pv_check
- when: miq_db_pv_check.results.results == [{}]
block:
- name: Copy over Management DB PV template
copy:
- src: "templates/{{ openshift_management_flavor }}/{{ openshift_management_flavor_short }}-pv-db-example.yaml"
+ src: "templates/{{ __openshift_management_flavor }}/{{ __openshift_management_flavor_short }}-pv-db-example.yaml"
dest: "{{ template_dir }}/"
- name: Ensure Management DB PV Template is created
oc_obj:
namespace: "{{ openshift_management_project }}"
- name: "{{ openshift_management_flavor }}-db-pv"
+ name: "{{ __openshift_management_flavor }}-db-pv"
state: present
kind: template
files:
- - "{{ template_dir }}/{{ openshift_management_flavor_short }}-pv-db-example.yaml"
+ - "{{ template_dir }}/{{ __openshift_management_flavor_short }}-pv-db-example.yaml"
diff --git a/roles/openshift_management/tasks/validate.yml b/roles/openshift_management/tasks/validate.yml
index b22f36a4f..2dc895190 100644
--- a/roles/openshift_management/tasks/validate.yml
+++ b/roles/openshift_management/tasks/validate.yml
@@ -100,4 +100,4 @@
'openshift_management_template_parameters'"
with_items: "{{ __openshift_management_required_db_conn_params }}"
when:
- - openshift_management_app_template in ['miq-template-ext-db', 'cfme-template-ext-db']
+ - __openshift_management_use_ext_db
diff --git a/roles/openshift_management/vars/main.yml b/roles/openshift_management/vars/main.yml
index da3ad0af7..d7b18df3a 100644
--- a/roles/openshift_management/vars/main.yml
+++ b/roles/openshift_management/vars/main.yml
@@ -30,14 +30,18 @@ __openshift_management_db_parameters:
- DATABASE_PORT
- DATABASE_NAME
-# # Commented out until we can support both CFME and MIQ
-# # openshift_management_flavor: "{{ 'cloudforms' if openshift_deployment_type == 'openshift-enterprise' else 'manageiq' }}"
-#openshift_management_flavor: cloudforms
-openshift_management_flavor: manageiq
-# TODO: Make this conditional as well based on the prior variable
-# # openshift_management_flavor_short: "{{ 'cfme' if openshift_deployment_type == 'openshift-enterprise' else 'miq' }}"
-# openshift_management_flavor_short: cfme
-openshift_management_flavor_short: miq
+__openshift_management_flavors:
+ miq:
+ short: miq
+ long: manageiq
+ cfme:
+ short: cfme
+ long: cloudforms
+
+__openshift_management_flavor: "{{ __openshift_management_flavors[openshift_management_app_template.split('-')[0]]['long'] }}"
+__openshift_management_flavor_short: "{{ __openshift_management_flavors[openshift_management_app_template.split('-')[0]]['short'] }}"
+
+__openshift_management_use_ext_db: "{{ true if 'ext-db' in openshift_management_app_template else false }}"
######################################################################
# ACCOUNTING
diff --git a/roles/openshift_metrics/defaults/main.yaml b/roles/openshift_metrics/defaults/main.yaml
index 8da74430f..293d8f451 100644
--- a/roles/openshift_metrics/defaults/main.yaml
+++ b/roles/openshift_metrics/defaults/main.yaml
@@ -54,7 +54,7 @@ openshift_metrics_master_url: https://kubernetes.default.svc
openshift_metrics_node_id: nodename
openshift_metrics_project: openshift-infra
-openshift_metrics_cassandra_pvc_prefix: "{{ openshift_metrics_storage_volume_name | default('metrics-cassandra') }}"
+openshift_metrics_cassandra_pvc_prefix: metrics-cassandra
openshift_metrics_cassandra_pvc_access: "{{ openshift_metrics_storage_access_modes | default(['ReadWriteOnce']) }}"
openshift_metrics_hawkular_user_write_access: False
diff --git a/roles/openshift_node/defaults/main.yml b/roles/openshift_node/defaults/main.yml
index 0fe4c2035..9f887891b 100644
--- a/roles/openshift_node/defaults/main.yml
+++ b/roles/openshift_node/defaults/main.yml
@@ -137,6 +137,7 @@ default_r_openshift_node_image_prep_packages:
- yum-utils
# gluster
- glusterfs-fuse
+- device-mapper-multipath
# nfs
- nfs-utils
- flannel
diff --git a/roles/openshift_node/tasks/storage_plugins/iscsi.yml b/roles/openshift_node/tasks/storage_plugins/iscsi.yml
index a8048c42f..72415f9a6 100644
--- a/roles/openshift_node/tasks/storage_plugins/iscsi.yml
+++ b/roles/openshift_node/tasks/storage_plugins/iscsi.yml
@@ -1,6 +1,32 @@
---
- name: Install iSCSI storage plugin dependencies
- package: name=iscsi-initiator-utils state=present
+ package:
+ name: "{{ item }}"
+ state: present
when: not openshift_is_atomic | bool
register: result
until: result is succeeded
+ with_items:
+ - iscsi-initiator-utils
+ - device-mapper-multipath
+
+- name: restart services
+ systemd:
+ name: "{{ item }}"
+ state: started
+ enabled: True
+ with_items:
+ - multipathd
+ - rpcbind
+
+- name: Template multipath configuration
+ template:
+ dest: "/etc/multipath.conf"
+ src: multipath.conf.j2
+ backup: true
+ when: not openshift_is_atomic | bool
+
+#enable multipath
+- name: Enable multipath
+ command: "mpathconf --enable"
+ when: not openshift_is_atomic | bool
diff --git a/roles/openshift_node/templates/multipath.conf.j2 b/roles/openshift_node/templates/multipath.conf.j2
new file mode 100644
index 000000000..8a0abc2c1
--- /dev/null
+++ b/roles/openshift_node/templates/multipath.conf.j2
@@ -0,0 +1,15 @@
+# LIO iSCSI
+# TODO: Add env variables for tweaking
+devices {
+ device {
+ vendor "LIO-ORG"
+ user_friendly_names "yes"
+ path_grouping_policy "failover"
+ path_selector "round-robin 0"
+ failback immediate
+ path_checker "tur"
+ prio "const"
+ no_path_retry 120
+ rr_weight "uniform"
+ }
+}
diff --git a/roles/openshift_openstack/templates/docker-storage-setup-dm.j2 b/roles/openshift_openstack/templates/docker-storage-setup-dm.j2
index 32c6b5838..9015c561f 100644
--- a/roles/openshift_openstack/templates/docker-storage-setup-dm.j2
+++ b/roles/openshift_openstack/templates/docker-storage-setup-dm.j2
@@ -1,4 +1,8 @@
+{% if docker_storage_mountpoints is defined %}
+DEVS="{{ docker_storage_mountpoints }}"
+{% else %}
DEVS="{{ openshift_openstack_container_storage_setup.docker_dev }}"
+{% endif %}
VG="{{ openshift_openstack_container_storage_setup.docker_vg }}"
DATA_SIZE="{{ openshift_openstack_container_storage_setup.docker_data_size }}"
EXTRA_DOCKER_STORAGE_OPTIONS="--storage-opt dm.basesize={{ openshift_openstack_container_storage_setup.docker_dm_basesize }}"
diff --git a/roles/openshift_openstack/templates/docker-storage-setup-overlayfs.j2 b/roles/openshift_openstack/templates/docker-storage-setup-overlayfs.j2
index 1bf366bdc..917347073 100644
--- a/roles/openshift_openstack/templates/docker-storage-setup-overlayfs.j2
+++ b/roles/openshift_openstack/templates/docker-storage-setup-overlayfs.j2
@@ -1,4 +1,8 @@
+{% if docker_storage_mountpoints is defined %}
+DEVS="{{ docker_storage_mountpoints }}"
+{% else %}
DEVS="{{ openshift_openstack_container_storage_setup.docker_dev }}"
+{% endif %}
VG="{{ openshift_openstack_container_storage_setup.docker_vg }}"
DATA_SIZE="{{ openshift_openstack_container_storage_setup.docker_data_size }}"
STORAGE_DRIVER=overlay2
diff --git a/roles/openshift_openstack/templates/heat_stack.yaml.j2 b/roles/openshift_openstack/templates/heat_stack.yaml.j2
index 8e7c6288a..1d3173022 100644
--- a/roles/openshift_openstack/templates/heat_stack.yaml.j2
+++ b/roles/openshift_openstack/templates/heat_stack.yaml.j2
@@ -418,6 +418,10 @@ resources:
protocol: tcp
port_range_min: 443
port_range_max: 443
+ - direction: ingress
+ protocol: tcp
+ port_range_min: 1936
+ port_range_max: 1936
cns-secgrp:
type: OS::Neutron::SecurityGroup
diff --git a/roles/openshift_openstack/templates/heat_stack_server.yaml.j2 b/roles/openshift_openstack/templates/heat_stack_server.yaml.j2
index 29b09f3c9..9aeecfa74 100644
--- a/roles/openshift_openstack/templates/heat_stack_server.yaml.j2
+++ b/roles/openshift_openstack/templates/heat_stack_server.yaml.j2
@@ -261,11 +261,12 @@ resources:
properties:
size: { get_param: volume_size }
availability_zone: { get_param: availability_zone }
+ metadata:
+ purpose: openshift_docker_storage
volume_attachment:
type: OS::Cinder::VolumeAttachment
properties:
volume_id: { get_resource: cinder_volume }
instance_uuid: { get_resource: server }
- mountpoint: /dev/sdb
{% endif %}
diff --git a/roles/openshift_prometheus/defaults/main.yaml b/roles/openshift_prometheus/defaults/main.yaml
index 1b21c4739..37a05f3f0 100644
--- a/roles/openshift_prometheus/defaults/main.yaml
+++ b/roles/openshift_prometheus/defaults/main.yaml
@@ -7,9 +7,24 @@ openshift_prometheus_namespace: openshift-metrics
# defaults hosts for routes
openshift_prometheus_hostname: prometheus-{{openshift_prometheus_namespace}}.{{openshift_master_default_subdomain}}
openshift_prometheus_alerts_hostname: alerts-{{openshift_prometheus_namespace}}.{{openshift_master_default_subdomain}}
+openshift_prometheus_alertmanager_hostname: alertmanager-{{openshift_prometheus_namespace}}.{{openshift_master_default_subdomain}}
+
openshift_prometheus_node_selector: {"region":"infra"}
+openshift_prometheus_service_port: 443
+openshift_prometheus_service_targetport: 8443
+openshift_prometheus_service_name: prometheus
+openshift_prometheus_alerts_service_targetport: 9443
+openshift_prometheus_alerts_service_name: alerts
+openshift_prometheus_alertmanager_service_targetport: 10443
+openshift_prometheus_alertmanager_service_name: alertmanager
+openshift_prometheus_serviceaccount_annotations: []
+l_openshift_prometheus_serviceaccount_annotations:
+ - serviceaccounts.openshift.io/oauth-redirectreference.prom='{"kind":"OAuthRedirectReference","apiVersion":"v1","reference":{"kind":"Route","name":"prometheus"}}'
+ - serviceaccounts.openshift.io/oauth-redirectreference.alerts='{"kind":"OAuthRedirectReference","apiVersion":"v1","reference":{"kind":"Route","name":"alerts"}}'
+ - serviceaccounts.openshift.io/oauth-redirectreference.alertmanager='{"kind":"OAuthRedirectReference","apiVersion":"v1","reference":{"kind":"Route","name":"alertmanager"}}'
+
# additional prometheus rules file
openshift_prometheus_additional_rules_file: null
diff --git a/roles/openshift_prometheus/tasks/facts.yaml b/roles/openshift_prometheus/tasks/facts.yaml
new file mode 100644
index 000000000..214089732
--- /dev/null
+++ b/roles/openshift_prometheus/tasks/facts.yaml
@@ -0,0 +1,10 @@
+---
+# The kubernetes version impacts the prometheus scraping endpoint
+# so gathering it before constructing the configmap
+- name: get oc version
+ oc_version:
+ register: oc_version
+
+- set_fact:
+ kubernetes_version: "{{ oc_version.results.kubernetes_short | float }}"
+ openshift_prometheus_serviceaccount_annotations: "{{ l_openshift_prometheus_serviceaccount_annotations + openshift_prometheus_serviceaccount_annotations|list }}"
diff --git a/roles/openshift_prometheus/tasks/install_prometheus.yaml b/roles/openshift_prometheus/tasks/install_prometheus.yaml
index 749df5152..0b565502f 100644
--- a/roles/openshift_prometheus/tasks/install_prometheus.yaml
+++ b/roles/openshift_prometheus/tasks/install_prometheus.yaml
@@ -1,4 +1,6 @@
---
+# set facts
+- include_tasks: facts.yaml
# namespace
- name: Add prometheus project
@@ -9,7 +11,7 @@
description: Prometheus
# secrets
-- name: Set alert and prometheus secrets
+- name: Set alert, alertmanager and prometheus secrets
oc_secret:
state: present
name: "{{ item }}-proxy"
@@ -20,30 +22,24 @@
with_items:
- prometheus
- alerts
+ - alertmanager
# serviceaccount
- name: create prometheus serviceaccount
oc_serviceaccount:
state: present
- name: prometheus
+ name: "{{ openshift_prometheus_service_name }}"
namespace: "{{ openshift_prometheus_namespace }}"
- # TODO add annotations when supproted
- # annotations:
- # serviceaccounts.openshift.io/oauth-redirectreference.prom: '{"kind":"OAuthRedirectReference","apiVersion":"v1","reference":{"kind":"Route","name":"prometheus"}}'
- # serviceaccounts.openshift.io/oauth-redirectreference.alerts: '{"kind":"OAuthRedirectReference","apiVersion":"v1","reference":{"kind":"Route","name":"alerts"}}'
-
- secrets:
- - prometheus-secrets
changed_when: no
+
# TODO remove this when annotations are supported by oc_serviceaccount
- name: annotate serviceaccount
command: >
{{ openshift_client_binary }} annotate --overwrite -n {{ openshift_prometheus_namespace }}
- serviceaccount prometheus
- serviceaccounts.openshift.io/oauth-redirectreference.prom='{"kind":"OAuthRedirectReference","apiVersion":"v1","reference":{"kind":"Route","name":"prometheus"}}'
- serviceaccounts.openshift.io/oauth-redirectreference.alerts='{"kind":"OAuthRedirectReference","apiVersion":"v1","reference":{"kind":"Route","name":"alerts"}}'
-
+ serviceaccount {{ openshift_prometheus_service_name }} {{ item }}
+ with_items:
+ "{{ openshift_prometheus_serviceaccount_annotations }}"
# create clusterrolebinding for prometheus serviceaccount
- name: Set cluster-reader permissions for prometheus
@@ -52,63 +48,61 @@
namespace: "{{ openshift_prometheus_namespace }}"
resource_kind: cluster-role
resource_name: cluster-reader
- user: "system:serviceaccount:{{ openshift_prometheus_namespace }}:prometheus"
+ user: "system:serviceaccount:{{ openshift_prometheus_namespace }}:{{ openshift_prometheus_service_name }}"
+
-# create prometheus and alerts services
-# TODO join into 1 task with loop
-- name: Create prometheus service
+- name: create services for prometheus
oc_service:
- state: present
- name: "{{ item.name }}"
+ name: "{{ openshift_prometheus_service_name }}"
namespace: "{{ openshift_prometheus_namespace }}"
- selector:
- app: prometheus
labels:
- name: "{{ item.name }}"
- # TODO add annotations when supported
- # annotations:
- # service.alpha.openshift.io/serving-cert-secret-name: "{{item.name}}-tls"
+ name: prometheus
+ annotations:
+ oprometheus.io/scrape: 'true'
+ oprometheus.io/scheme: https
+ service.alpha.openshift.io/serving-cert-secret-name: prometheus-tls
ports:
- - port: 443
- targetPort: 8443
- with_items:
- - name: prometheus
+ - name: prometheus
+ port: "{{ openshift_prometheus_service_port }}"
+ targetPort: "{{ openshift_prometheus_service_targetport }}"
+ protocol: TCP
+ selector:
+ app: prometheus
-- name: Create alerts service
+- name: create services for alert buffer
oc_service:
- state: present
- name: "{{ item.name }}"
+ name: "{{ openshift_prometheus_alerts_service_name }}"
namespace: "{{ openshift_prometheus_namespace }}"
+ labels:
+ name: prometheus
+ annotations:
+ service.alpha.openshift.io/serving-cert-secret-name: alerts-tls
+ ports:
+ - name: prometheus
+ port: "{{ openshift_prometheus_service_port }}"
+ targetPort: "{{ openshift_prometheus_alerts_service_targetport }}"
+ protocol: TCP
selector:
app: prometheus
+
+- name: create services for alertmanager
+ oc_service:
+ name: "{{ openshift_prometheus_alertmanager_service_name }}"
+ namespace: "{{ openshift_prometheus_namespace }}"
labels:
- name: "{{ item.name }}"
- # TODO add annotations when supported
- # annotations:
- # service.alpha.openshift.io/serving-cert-secret-name: "{{item.name}}-tls"
+ name: prometheus
+ annotations:
+ service.alpha.openshift.io/serving-cert-secret-name: alertmanager-tls
ports:
- - port: 443
- targetPort: 9443
- with_items:
- - name: alerts
-
-
-# Annotate services with secret name
-# TODO remove this when annotations are supported by oc_service
-- name: annotate prometheus service
- command: >
- {{ openshift_client_binary }} annotate --overwrite -n {{ openshift_prometheus_namespace }}
- service prometheus
- prometheus.io/scrape='true'
- prometheus.io/scheme=https
- service.alpha.openshift.io/serving-cert-secret-name=prometheus-tls
-
-- name: annotate alerts service
- command: >
- {{ openshift_client_binary }} annotate --overwrite -n {{ openshift_prometheus_namespace }}
- service alerts 'service.alpha.openshift.io/serving-cert-secret-name=prometheus-alerts-tls'
+ - name: prometheus
+ port: "{{ openshift_prometheus_service_port }}"
+ targetPort: "{{ openshift_prometheus_alertmanager_service_targetport }}"
+ protocol: TCP
+ selector:
+ app: prometheus
# create prometheus and alerts routes
+# TODO: oc_route module should support insecureEdgeTerminationPolicy: Redirect
- name: create prometheus and alerts routes
oc_route:
state: present
@@ -122,6 +116,8 @@
host: "{{ openshift_prometheus_hostname }}"
- name: alerts
host: "{{ openshift_prometheus_alerts_hostname }}"
+ - name: alertmanager
+ host: "{{ openshift_prometheus_alertmanager_hostname }}"
# Storage
- name: create prometheus pvc
@@ -169,15 +165,6 @@
path: "{{ tempdir }}/prometheus.additional.rules"
register: additional_rules_stat
-# The kubernetes version impacts the prometheus scraping endpoint
-# so gathering it before constructing the configmap
-- name: get oc version
- oc_version:
- register: oc_version
-
-- set_fact:
- kubernetes_version: "{{ oc_version.results.kubernetes_short | float }}"
-
- template:
src: prometheus.yml.j2
dest: "{{ tempdir }}/prometheus.yml"
@@ -219,7 +206,7 @@
- name: Set alertmanager configmap
oc_configmap:
state: present
- name: "prometheus-alerts"
+ name: "alertmanager"
namespace: "{{ openshift_prometheus_namespace }}"
from_file:
alertmanager.yml: "{{ tempdir }}/alertmanager.yml"
diff --git a/roles/openshift_prometheus/tasks/main.yaml b/roles/openshift_prometheus/tasks/main.yaml
index b859eb111..66d65a3f2 100644
--- a/roles/openshift_prometheus/tasks/main.yaml
+++ b/roles/openshift_prometheus/tasks/main.yaml
@@ -16,9 +16,11 @@
- name: Create templates subdirectory
file:
state: directory
- path: "{{ tempdir }}/templates"
+ path: "{{ tempdir }}/{{ item }}"
mode: 0755
changed_when: False
+ with_items:
+ - templates
- include_tasks: install_prometheus.yaml
when: openshift_prometheus_state == 'present'
diff --git a/roles/openshift_prometheus/tasks/uninstall_prometheus.yaml b/roles/openshift_prometheus/tasks/uninstall.yaml
index d746402db..d746402db 100644
--- a/roles/openshift_prometheus/tasks/uninstall_prometheus.yaml
+++ b/roles/openshift_prometheus/tasks/uninstall.yaml
diff --git a/roles/openshift_prometheus/templates/prometheus.j2 b/roles/openshift_prometheus/templates/prometheus.j2
index d780550b8..c0abd483b 100644
--- a/roles/openshift_prometheus/templates/prometheus.j2
+++ b/roles/openshift_prometheus/templates/prometheus.j2
@@ -19,7 +19,7 @@ spec:
labels:
app: prometheus
spec:
- serviceAccountName: prometheus
+ serviceAccountName: "{{ openshift_prometheus_service_name }}"
{% if openshift_prometheus_node_selector is iterable and openshift_prometheus_node_selector | length > 0 %}
nodeSelector:
{% for key, value in openshift_prometheus_node_selector.items() %}
@@ -47,15 +47,15 @@ spec:
cpu: "{{ openshift_prometheus_oauth_proxy_cpu_limit }}"
{% endif %}
ports:
- - containerPort: 8443
+ - containerPort: {{ openshift_prometheus_service_targetport }}
name: web
args:
- -provider=openshift
- - -https-address=:8443
+ - -https-address=:{{ openshift_prometheus_service_targetport }}
- -http-address=
- -email-domain=*
- -upstream=http://localhost:9090
- - -client-id=system:serviceaccount:{{ namespace }}:prometheus
+ - -client-id=system:serviceaccount:{{ namespace }}:{{ openshift_prometheus_service_name }}
- '-openshift-sar={"resource": "namespaces", "verb": "get", "resourceName": "{{ namespace }}", "namespace": "{{ namespace }}"}'
- '-openshift-delegate-urls={"/": {"resource": "namespaces", "verb": "get", "resourceName": "{{ namespace }}", "namespace": "{{ namespace }}"}}'
- -tls-cert=/etc/tls/private/tls.crt
@@ -67,9 +67,9 @@ spec:
- -skip-auth-regex=^/metrics
volumeMounts:
- mountPath: /etc/tls/private
- name: prometheus-tls
+ name: prometheus-tls-secret
- mountPath: /etc/proxy/secrets
- name: prometheus-secrets
+ name: prometheus-proxy-secret
- mountPath: /prometheus
name: prometheus-data
@@ -104,7 +104,7 @@ spec:
- mountPath: /prometheus
name: prometheus-data
- # Deploy alertmanager behind prometheus-alert-buffer behind an oauth proxy
+ # Deploy alert-buffer behind oauth alerts-proxy
- name: alerts-proxy
image: "{{ l_openshift_prometheus_proxy_image_prefix }}oauth-proxy:{{ l_openshift_prometheus_proxy_image_version }}"
imagePullPolicy: IfNotPresent
@@ -124,15 +124,15 @@ spec:
cpu: "{{ openshift_prometheus_oauth_proxy_cpu_limit }}"
{% endif %}
ports:
- - containerPort: 9443
+ - containerPort: {{ openshift_prometheus_alerts_service_targetport }}
name: web
args:
- -provider=openshift
- - -https-address=:9443
+ - -https-address=:{{ openshift_prometheus_alerts_service_targetport }}
- -http-address=
- -email-domain=*
- -upstream=http://localhost:9099
- - -client-id=system:serviceaccount:{{ namespace }}:prometheus
+ - -client-id=system:serviceaccount:{{ namespace }}:{{ openshift_prometheus_service_name }}
- '-openshift-sar={"resource": "namespaces", "verb": "get", "resourceName": "{{ namespace }}", "namespace": "{{ namespace }}"}'
- '-openshift-delegate-urls={"/": {"resource": "namespaces", "verb": "get", "resourceName": "{{ namespace }}", "namespace": "{{ namespace }}"}}'
- -tls-cert=/etc/tls/private/tls.crt
@@ -143,9 +143,9 @@ spec:
- -openshift-ca=/var/run/secrets/kubernetes.io/serviceaccount/ca.crt
volumeMounts:
- mountPath: /etc/tls/private
- name: alerts-tls
+ name: alerts-tls-secret
- mountPath: /etc/proxy/secrets
- name: alerts-secrets
+ name: alerts-proxy-secret
- name: alert-buffer
args:
@@ -169,11 +169,54 @@ spec:
{% endif %}
volumeMounts:
- mountPath: /alert-buffer
- name: alert-buffer-data
+ name: alerts-data
ports:
- containerPort: 9099
name: alert-buf
+ # Deploy alertmanager behind oauth alertmanager-proxy
+ - name: alertmanager-proxy
+ image: "{{ l_openshift_prometheus_proxy_image_prefix }}oauth-proxy:{{ l_openshift_prometheus_proxy_image_version }}"
+ imagePullPolicy: IfNotPresent
+ requests:
+{% if openshift_prometheus_oauth_proxy_memory_requests is defined and openshift_prometheus_oauth_proxy_memory_requests is not none %}
+ memory: "{{ openshift_prometheus_oauth_proxy_memory_requests }}"
+{% endif %}
+{% if openshift_prometheus_oauth_proxy_cpu_requests is defined and openshift_prometheus_oauth_proxy_cpu_requests is not none %}
+ cpu: "{{ openshift_prometheus_oauth_proxy_cpu_requests }}"
+{% endif %}
+ limits:
+{% if openshift_prometheus_oauth_proxy_memory_limit is defined and openshift_prometheus_oauth_proxy_memory_limit is not none %}
+ memory: "{{ openshift_prometheus_oauth_proxy_memory_limit }}"
+{% endif %}
+{% if openshift_prometheus_oauth_proxy_cpu_limit is defined and openshift_prometheus_oauth_proxy_cpu_limit is not none %}
+ cpu: "{{ openshift_prometheus_oauth_proxy_cpu_limit }}"
+{% endif %}
+ ports:
+ - containerPort: {{ openshift_prometheus_alertmanager_service_targetport }}
+ name: web
+ args:
+ - -provider=openshift
+ - -https-address=:{{ openshift_prometheus_alertmanager_service_targetport }}
+ - -http-address=
+ - -email-domain=*
+ - -upstream=http://localhost:9093
+ - -client-id=system:serviceaccount:{{ namespace }}:{{ openshift_prometheus_service_name }}
+ - -openshift-ca=/etc/pki/tls/cert.pem
+ - -openshift-ca=/var/run/secrets/kubernetes.io/serviceaccount/ca.crt
+ - '-openshift-sar={"resource": "namespaces", "verb": "get", "resourceName": "{{ namespace }}", "namespace": "{{ namespace }}"}'
+ - '-openshift-delegate-urls={"/": {"resource": "namespaces", "verb": "get", "resourceName": "{{ namespace }}", "namespace": "{{ namespace }}"}}'
+ - -tls-cert=/etc/tls/private/tls.crt
+ - -tls-key=/etc/tls/private/tls.key
+ - -client-secret-file=/var/run/secrets/kubernetes.io/serviceaccount/token
+ - -cookie-secret-file=/etc/proxy/secrets/session_secret
+ - -skip-auth-regex=^/metrics
+ volumeMounts:
+ - mountPath: /etc/tls/private
+ name: alertmanager-tls-secret
+ - mountPath: /etc/proxy/secrets
+ name: alertmanager-proxy-secret
+
- name: alertmanager
args:
- -config.file=/etc/alertmanager/alertmanager.yml
@@ -205,14 +248,15 @@ spec:
restartPolicy: Always
volumes:
+
- name: prometheus-config
configMap:
defaultMode: 420
name: prometheus
- - name: prometheus-secrets
+ - name: prometheus-proxy-secret
secret:
secretName: prometheus-proxy
- - name: prometheus-tls
+ - name: prometheus-tls-secret
secret:
secretName: prometheus-tls
- name: prometheus-data
@@ -225,13 +269,19 @@ spec:
- name: alertmanager-config
configMap:
defaultMode: 420
- name: prometheus-alerts
- - name: alerts-secrets
+ name: alertmanager
+ - name: alertmanager-proxy-secret
secret:
- secretName: alerts-proxy
- - name: alerts-tls
+ secretName: alertmanager-proxy
+ - name: alertmanager-tls-secret
+ secret:
+ secretName: alertmanager-tls
+ - name: alerts-tls-secret
secret:
- secretName: prometheus-alerts-tls
+ secretName: alerts-tls
+ - name: alerts-proxy-secret
+ secret:
+ secretName: alerts-proxy
- name: alertmanager-data
{% if openshift_prometheus_alertmanager_storage_type == 'pvc' %}
persistentVolumeClaim:
@@ -239,7 +289,7 @@ spec:
{% else %}
emptydir: {}
{% endif %}
- - name: alert-buffer-data
+ - name: alerts-data
{% if openshift_prometheus_alertbuffer_storage_type == 'pvc' %}
persistentVolumeClaim:
claimName: {{ openshift_prometheus_alertbuffer_pvc_name }}
diff --git a/roles/openshift_prometheus/templates/prometheus.yml.j2 b/roles/openshift_prometheus/templates/prometheus.yml.j2
index 63430f834..005c2c564 100644
--- a/roles/openshift_prometheus/templates/prometheus.yml.j2
+++ b/roles/openshift_prometheus/templates/prometheus.yml.j2
@@ -1,10 +1,5 @@
rule_files:
- - 'prometheus.rules'
-{% if openshift_prometheus_additional_rules_file is defined and openshift_prometheus_additional_rules_file is not none %}
- - 'prometheus.additional.rules'
-{% endif %}
-
-
+ - '*.rules'
# A scrape configuration for running Prometheus on a Kubernetes cluster.
# This uses separate scrape configs for cluster components (i.e. API server, node)
@@ -39,31 +34,11 @@ scrape_configs:
action: keep
regex: default;kubernetes;https
-# Scrape config for nodes.
-#
-# Each node exposes a /metrics endpoint that contains operational metrics for
-# the Kubelet and other components.
-- job_name: 'kubernetes-nodes'
-
- scheme: https
- tls_config:
- ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
- bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
-
- kubernetes_sd_configs:
- - role: node
-
- relabel_configs:
- - action: labelmap
- regex: __meta_kubernetes_node_label_(.+)
-
# Scrape config for controllers.
#
# Each master node exposes a /metrics endpoint on :8444 that contains operational metrics for
# the controllers.
#
-# TODO: move this to a pure endpoints based metrics gatherer when controllers are exposed via
-# endpoints.
- job_name: 'kubernetes-controllers'
scheme: https
@@ -87,6 +62,27 @@ scrape_configs:
regex: (.+)(?::\d+)
replacement: $1:8444
+# Scrape config for nodes.
+#
+# Each node exposes a /metrics endpoint that contains operational metrics for
+# the Kubelet and other components.
+- job_name: 'kubernetes-nodes'
+ scheme: https
+ tls_config:
+ ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
+ bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
+ kubernetes_sd_configs:
+ - role: node
+ # Drop a very high cardinality metric that is incorrect in 3.7. It will be
+ # fixed in 3.9.
+ metric_relabel_configs:
+ - source_labels: [__name__]
+ action: drop
+ regex: 'openshift_sdn_pod_(setup|teardown)_latency(.*)'
+ relabel_configs:
+ - action: labelmap
+ regex: __meta_kubernetes_node_label_(.+)
+
# Scrape config for cAdvisor.
#
# Beginning in Kube 1.7, each node exposes a /metrics/cadvisor endpoint that
@@ -107,6 +103,14 @@ scrape_configs:
kubernetes_sd_configs:
- role: node
+ # Exclude a set of high cardinality metrics that can contribute to significant
+ # memory use in large clusters. These can be selectively enabled as necessary
+ # for medium or small clusters.
+ metric_relabel_configs:
+ - source_labels: [__name__]
+ action: drop
+ regex: 'container_(cpu_user_seconds_total|cpu_cfs_periods_total|memory_usage_bytes|memory_swap|memory_working_set_bytes|memory_cache|last_seen|fs_(read_seconds_total|write_seconds_total|sector_(.*)|io_(.*)|reads_merged_total|writes_merged_total)|tasks_state|memory_failcnt|memory_failures_total|spec_memory_swap_limit_bytes|fs_(.*)_bytes_total|spec_(.*))'
+
relabel_configs:
- action: labelmap
regex: __meta_kubernetes_node_label_(.+)
@@ -133,38 +137,101 @@ scrape_configs:
- role: endpoints
relabel_configs:
- - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scrape]
- action: keep
- regex: true
- - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scheme]
- action: replace
- target_label: __scheme__
- regex: (https?)
- - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_path]
+ # only scrape infrastructure components
+ - source_labels: [__meta_kubernetes_namespace]
+ action: keep
+ regex: 'default|logging|metrics|kube-.+|openshift|openshift-.+'
+ # drop infrastructure components managed by other scrape targets
+ - source_labels: [__meta_kubernetes_service_name]
+ action: drop
+ regex: 'prometheus-node-exporter'
+ # only those that have requested scraping
+ - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scrape]
+ action: keep
+ regex: true
+ - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scheme]
+ action: replace
+ target_label: __scheme__
+ regex: (https?)
+ - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_path]
+ action: replace
+ target_label: __metrics_path__
+ regex: (.+)
+ - source_labels: [__address__, __meta_kubernetes_service_annotation_prometheus_io_port]
+ action: replace
+ target_label: __address__
+ regex: (.+)(?::\d+);(\d+)
+ replacement: $1:$2
+ - action: labelmap
+ regex: __meta_kubernetes_service_label_(.+)
+ - source_labels: [__meta_kubernetes_namespace]
+ action: replace
+ target_label: kubernetes_namespace
+ - source_labels: [__meta_kubernetes_service_name]
+ action: replace
+ target_label: kubernetes_name
+
+# Scrape config for node-exporter, which is expected to be running on port 9100.
+- job_name: 'kubernetes-nodes-exporter'
+
+ tls_config:
+ ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
+
+ kubernetes_sd_configs:
+ - role: node
+
+ metric_relabel_configs:
+ - source_labels: [__name__]
+ action: drop
+ regex: 'node_cpu|node_(disk|scrape_collector)_.+'
+ # preserve a subset of the network, netstat, vmstat, and filesystem series
+ - source_labels: [__name__]
action: replace
- target_label: __metrics_path__
- regex: (.+)
- - source_labels: [__address__, __meta_kubernetes_service_annotation_prometheus_io_port]
+ regex: '(node_(netstat_Ip_.+|vmstat_(nr|thp)_.+|filesystem_(free|size|device_error)|network_(transmit|receive)_(drop|errs)))'
+ target_label: __name__
+ replacement: renamed_$1
+ - source_labels: [__name__]
+ action: drop
+ regex: 'node_(netstat|vmstat|filesystem|network)_.+'
+ - source_labels: [__name__]
action: replace
+ regex: 'renamed_(.+)'
+ target_label: __name__
+ replacement: $1
+ # drop any partial expensive series
+ - source_labels: [__name__, device]
+ action: drop
+ regex: 'node_network_.+;veth.+'
+ - source_labels: [__name__, mountpoint]
+ action: drop
+ regex: 'node_filesystem_(free|size|device_error);([^/].*|/.+)'
+
+ relabel_configs:
+ - source_labels: [__address__]
+ regex: '(.*):10250'
+ replacement: '${1}:9100'
target_label: __address__
- regex: (.+)(?::\d+);(\d+)
- replacement: $1:$2
- - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_username]
- action: replace
- target_label: __basic_auth_username__
- regex: (.+)
- - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_password]
- action: replace
- target_label: __basic_auth_password__
- regex: (.+)
+ - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname]
+ target_label: __instance__
- action: labelmap
- regex: __meta_kubernetes_service_label_(.+)
- - source_labels: [__meta_kubernetes_namespace]
- action: replace
- target_label: kubernetes_namespace
- - source_labels: [__meta_kubernetes_service_name]
- action: replace
- target_label: kubernetes_name
+ regex: __meta_kubernetes_node_label_(.+)
+
+# Scrape config for the template service broker
+- job_name: 'openshift-template-service-broker'
+ scheme: https
+ tls_config:
+ ca_file: /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt
+ server_name: apiserver.openshift-template-service-broker.svc
+ bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
+
+ kubernetes_sd_configs:
+ - role: endpoints
+
+ relabel_configs:
+ - source_labels: [__meta_kubernetes_namespace, __meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name]
+ action: keep
+ regex: openshift-template-service-broker;apiserver;https
+
alerting:
alertmanagers:
diff --git a/roles/openshift_provisioners/defaults/main.yaml b/roles/openshift_provisioners/defaults/main.yaml
index a6f040831..34ba78404 100644
--- a/roles/openshift_provisioners/defaults/main.yaml
+++ b/roles/openshift_provisioners/defaults/main.yaml
@@ -1,7 +1,5 @@
---
openshift_provisioners_install_provisioners: True
-openshift_provisioners_image_prefix: docker.io/openshift/origin-
-openshift_provisioners_image_version: latest
openshift_provisioners_efs: False
openshift_provisioners_efs_path: /persistentvolumes
@@ -10,3 +8,11 @@ openshift_provisioners_efs_nodeselector: ""
openshift_provisioners_efs_supplementalgroup: '65534'
openshift_provisioners_project: openshift-infra
+
+openshift_provisioners_image_prefix_dict:
+ origin: "docker.io/openshift/origin-"
+ openshift-enterprise: "registry.access.redhat.com/openshift3/ose-"
+
+openshift_provisioners_image_version_dict:
+ origin: "latest"
+ openshift-enterprise: "{{ openshift_image_tag }}"
diff --git a/roles/openshift_provisioners/tasks/main.yaml b/roles/openshift_provisioners/tasks/main.yaml
index 4ba26b2b8..d00573b07 100644
--- a/roles/openshift_provisioners/tasks/main.yaml
+++ b/roles/openshift_provisioners/tasks/main.yaml
@@ -12,6 +12,11 @@
check_mode: no
tags: provisioners_init
+- name: Set eventrouter image facts
+ set_fact:
+ openshift_provisioners_image_prefix: "{{ openshift_provisioners_image_prefix | default(openshift_provisioners_image_prefix_dict[openshift_deployment_type]) }}"
+ openshift_provisioners_image_version: "{{ openshift_provisioners_image_version | default(openshift_provisioners_image_version_dict[openshift_deployment_type]) }}"
+
- include_tasks: install_provisioners.yaml
when: openshift_provisioners_install_provisioners | default(false) | bool
diff --git a/roles/openshift_service_catalog/files/openshift_catalog_clusterroles.yml b/roles/openshift_service_catalog/files/openshift_catalog_clusterroles.yml
new file mode 100644
index 000000000..28abcbcfc
--- /dev/null
+++ b/roles/openshift_service_catalog/files/openshift_catalog_clusterroles.yml
@@ -0,0 +1,86 @@
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRole
+metadata:
+ labels:
+ rbac.authorization.k8s.io/aggregate-to-admin: "true"
+ name: system:service-catalog:aggregate-to-admin
+rules:
+- apiGroups:
+ - "servicecatalog.k8s.io"
+ attributeRestrictions: null
+ resources:
+ - serviceinstances
+ - servicebindings
+ verbs:
+ - create
+ - update
+ - delete
+ - get
+ - list
+ - watch
+ - patch
+- apiGroups:
+ - "settings.k8s.io"
+ attributeRestrictions: null
+ resources:
+ - podpresets
+ verbs:
+ - create
+ - update
+ - delete
+ - get
+ - list
+ - watch
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRole
+metadata:
+ labels:
+ rbac.authorization.k8s.io/aggregate-to-edit: "true"
+ name: system:service-catalog:aggregate-to-edit
+rules:
+- apiGroups:
+ - "servicecatalog.k8s.io"
+ attributeRestrictions: null
+ resources:
+ - serviceinstances
+ - servicebindings
+ verbs:
+ - create
+ - update
+ - delete
+ - get
+ - list
+ - watch
+ - patch
+- apiGroups:
+ - "settings.k8s.io"
+ attributeRestrictions: null
+ resources:
+ - podpresets
+ verbs:
+ - create
+ - update
+ - delete
+ - get
+ - list
+ - watch
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRole
+metadata:
+ labels:
+ rbac.authorization.k8s.io/aggregate-to-view: "true"
+ name: system:service-catalog:aggregate-to-view
+rules:
+- apiGroups:
+ - "servicecatalog.k8s.io"
+ attributeRestrictions: null
+ resources:
+ - serviceinstances
+ - servicebindings
+ verbs:
+ - get
+ - list
+ - watch
diff --git a/roles/openshift_service_catalog/tasks/install.yml b/roles/openshift_service_catalog/tasks/install.yml
index 9b38a85c4..4d06c1872 100644
--- a/roles/openshift_service_catalog/tasks/install.yml
+++ b/roles/openshift_service_catalog/tasks/install.yml
@@ -74,74 +74,17 @@
template_name: kube-system-service-catalog-role-bindings
namespace: kube-system
-- oc_obj:
- name: edit
- kind: clusterrole
- state: list
- register: edit_yaml
-
-# only do this if we don't already have the updated role info
-- name: Generate apply template for clusterrole/edit
- template:
- src: sc_admin_edit_role_patching.j2
- dest: "{{ mktemp.stdout }}/edit_sc_patch.yml"
- vars:
- original_content: "{{ edit_yaml.results.results[0] | to_yaml }}"
- when:
- - not edit_yaml.results.results[0] | lib_utils_oo_contains_rule(['servicecatalog.k8s.io'], ['serviceinstances', 'servicebindings'], ['create', 'update', 'delete', 'get', 'list', 'watch', 'patch']) or not edit_yaml.results.results[0] | lib_utils_oo_contains_rule(['settings.k8s.io'], ['podpresets'], ['create', 'update', 'delete', 'get', 'list', 'watch'])
-
-# only do this if we don't already have the updated role info
-- name: update edit role for service catalog and pod preset access
- command: >
- {{ openshift_client_binary }} --config=/etc/origin/master/admin.kubeconfig replace -f {{ mktemp.stdout }}/edit_sc_patch.yml
- when:
- - not edit_yaml.results.results[0] | lib_utils_oo_contains_rule(['servicecatalog.k8s.io'], ['serviceinstances', 'servicebindings'], ['create', 'update', 'delete', 'get', 'list', 'watch', 'patch']) or not edit_yaml.results.results[0] | lib_utils_oo_contains_rule(['settings.k8s.io'], ['podpresets'], ['create', 'update', 'delete', 'get', 'list', 'watch'])
-
-- oc_obj:
- name: admin
- kind: clusterrole
- state: list
- register: admin_yaml
-
-# only do this if we don't already have the updated role info
-- name: Generate apply template for clusterrole/admin
- template:
- src: sc_admin_edit_role_patching.j2
- dest: "{{ mktemp.stdout }}/admin_sc_patch.yml"
- vars:
- original_content: "{{ admin_yaml.results.results[0] | to_yaml }}"
- when:
- - not admin_yaml.results.results[0] | lib_utils_oo_contains_rule(['servicecatalog.k8s.io'], ['serviceinstances', 'servicebindings'], ['create', 'update', 'delete', 'get', 'list', 'watch', 'patch']) or not admin_yaml.results.results[0] | lib_utils_oo_contains_rule(['settings.k8s.io'], ['podpresets'], ['create', 'update', 'delete', 'get', 'list', 'watch'])
-
-# only do this if we don't already have the updated role info
-- name: update admin role for service catalog and pod preset access
- command: >
- {{ openshift_client_binary }} --config=/etc/origin/master/admin.kubeconfig replace -f {{ mktemp.stdout }}/admin_sc_patch.yml
- when:
- - not admin_yaml.results.results[0] | lib_utils_oo_contains_rule(['servicecatalog.k8s.io'], ['serviceinstances', 'servicebindings'], ['create', 'update', 'delete', 'get', 'list', 'watch', 'patch']) or not admin_yaml.results.results[0] | lib_utils_oo_contains_rule(['settings.k8s.io'], ['podpresets'], ['create', 'update', 'delete', 'get', 'list', 'watch'])
-
-- oc_obj:
- name: view
- kind: clusterrole
- state: list
- register: view_yaml
-
-# only do this if we don't already have the updated role info
-- name: Generate apply template for clusterrole/view
- template:
- src: sc_view_role_patching.j2
- dest: "{{ mktemp.stdout }}/view_sc_patch.yml"
- vars:
- original_content: "{{ view_yaml.results.results[0] | to_yaml }}"
- when:
- - not view_yaml.results.results[0] | lib_utils_oo_contains_rule(['servicecatalog.k8s.io'], ['serviceinstances', 'servicebindings'], ['get', 'list', 'watch'])
-
-# only do this if we don't already have the updated role info
-- name: update view role for service catalog access
- command: >
- {{ openshift_client_binary }} --config=/etc/origin/master/admin.kubeconfig replace -f {{ mktemp.stdout }}/view_sc_patch.yml
- when:
- - not view_yaml.results.results[0] | lib_utils_oo_contains_rule(['servicecatalog.k8s.io'], ['serviceinstances', 'servicebindings'], ['get', 'list', 'watch'])
+- copy:
+ src: openshift_catalog_clusterroles.yml
+ dest: "{{ mktemp.stdout }}/openshift_catalog_clusterroles.yml"
+
+- name: Apply Service Catalog cluster roles
+ retries: 5
+ delay: 2
+ register: task_result
+ until: task_result.rc == 0
+ shell: >
+ {{ openshift_client_binary }} auth reconcile --config={{ openshift.common.config_base }}/master/admin.kubeconfig -f {{ mktemp.stdout}}/openshift_catalog_clusterroles.yml
- oc_adm_policy_user:
namespace: kube-service-catalog
diff --git a/roles/openshift_service_catalog/templates/sc_admin_edit_role_patching.j2 b/roles/openshift_service_catalog/templates/sc_admin_edit_role_patching.j2
deleted file mode 100644
index 59cceafcf..000000000
--- a/roles/openshift_service_catalog/templates/sc_admin_edit_role_patching.j2
+++ /dev/null
@@ -1,27 +0,0 @@
-{{ original_content }}
-- apiGroups:
- - "servicecatalog.k8s.io"
- attributeRestrictions: null
- resources:
- - serviceinstances
- - servicebindings
- verbs:
- - create
- - update
- - delete
- - get
- - list
- - watch
- - patch
-- apiGroups:
- - "settings.k8s.io"
- attributeRestrictions: null
- resources:
- - podpresets
- verbs:
- - create
- - update
- - delete
- - get
- - list
- - watch
diff --git a/roles/openshift_service_catalog/templates/sc_view_role_patching.j2 b/roles/openshift_service_catalog/templates/sc_view_role_patching.j2
deleted file mode 100644
index 838993854..000000000
--- a/roles/openshift_service_catalog/templates/sc_view_role_patching.j2
+++ /dev/null
@@ -1,11 +0,0 @@
-{{ original_content }}
-- apiGroups:
- - "servicecatalog.k8s.io"
- attributeRestrictions: null
- resources:
- - serviceinstances
- - servicebindings
- verbs:
- - get
- - list
- - watch
diff --git a/roles/openshift_storage_glusterfs/README.md b/roles/openshift_storage_glusterfs/README.md
index f7bd58db3..70a89b0ba 100644
--- a/roles/openshift_storage_glusterfs/README.md
+++ b/roles/openshift_storage_glusterfs/README.md
@@ -73,49 +73,51 @@ Role Variables
This role has the following variables that control the integration of a
GlusterFS cluster into a new or existing OpenShift cluster:
-| Name | Default value | Description |
-|--------------------------------------------------|-------------------------|-----------------------------------------|
-| openshift_storage_glusterfs_timeout | 300 | Seconds to wait for pods to become ready
-| openshift_storage_glusterfs_namespace | 'glusterfs' | Namespace/project in which to create GlusterFS resources
-| openshift_storage_glusterfs_is_native | True | GlusterFS should be containerized
-| openshift_storage_glusterfs_name | 'storage' | A name to identify the GlusterFS cluster, which will be used in resource names
-| openshift_storage_glusterfs_nodeselector | 'glusterfs=storage-host'| Selector to determine which nodes will host GlusterFS pods in native mode. **NOTE:** The label value is taken from the cluster name
-| openshift_storage_glusterfs_use_default_selector | False | Whether to use a default node selector for the GlusterFS namespace/project. If False, the namespace/project will have no restricting node selector. If True, uses pre-existing or default (e.g. osm_default_node_selector) node selectors. **NOTE:** If True, nodes which will host GlusterFS pods must already have the additional labels.
-| openshift_storage_glusterfs_storageclass | True | Automatically create a StorageClass for each GlusterFS cluster
-| openshift_storage_glusterfs_storageclass_default | False | Sets the StorageClass for each GlusterFS cluster as default
-| openshift_storage_glusterfs_image | 'gluster/gluster-centos'| Container image to use for GlusterFS pods, enterprise default is 'rhgs3/rhgs-server-rhel7'
-| openshift_storage_glusterfs_version | 'latest' | Container image version to use for GlusterFS pods
-| openshift_storage_glusterfs_block_deploy | True | Deploy glusterblock provisioner service
-| openshift_storage_glusterfs_block_image | 'gluster/glusterblock-provisioner'| Container image to use for glusterblock-provisioner pod, enterprise default is 'rhgs3/rhgs-gluster-block-prov-rhel7'
-| openshift_storage_glusterfs_block_version | 'latest' | Container image version to use for glusterblock-provisioner pod
-| openshift_storage_glusterfs_block_host_vol_create| True | Automatically create GlusterFS volumes to host glusterblock volumes. **NOTE:** If this is False, block-hosting volumes will need to be manually created before glusterblock volumes can be provisioned
-| openshift_storage_glusterfs_block_host_vol_size | 100 | Size, in GB, of GlusterFS volumes that will be automatically create to host glusterblock volumes if not enough space is available for a glusterblock volume create request. **NOTE:** This value is effectively an upper limit on the size of glusterblock volumes unless you manually create larger GlusterFS block-hosting volumes
-| openshift_storage_glusterfs_block_host_vol_max | 15 | Max number of GlusterFS volumes to host glusterblock volumes
-| openshift_storage_glusterfs_s3_deploy | True | Deploy gluster-s3 service
-| openshift_storage_glusterfs_s3_image | 'gluster/gluster-object'| Container image to use for gluster-s3 pod, enterprise default is 'rhgs3/rhgs-gluster-s3-server-rhel7'
-| openshift_storage_glusterfs_s3_version | 'latest' | Container image version to use for gluster=s3 pod
-| openshift_storage_glusterfs_s3_account | Undefined | S3 account name for the S3 service, required for S3 service deployment
-| openshift_storage_glusterfs_s3_user | Undefined | S3 user name for the S3 service, required for S3 service deployment
-| openshift_storage_glusterfs_s3_password | Undefined | S3 user password for the S3 service, required for S3 service deployment
-| openshift_storage_glusterfs_s3_pvc | Dynamic | Name of the GlusterFS-backed PVC which will be used for S3 object data storage, generated from the cluster name and S3 account by default
-| openshift_storage_glusterfs_s3_pvc_size | "2Gi" | Size, in Gi, of the GlusterFS-backed PVC which will be used for S3 object data storage
-| openshift_storage_glusterfs_s3_meta_pvc | Dynamic | Name of the GlusterFS-backed PVC which will be used for S3 object metadata storage, generated from the cluster name and S3 account by default
-| openshift_storage_glusterfs_s3_meta_pvc_size | "1Gi" | Size, in Gi, of the GlusterFS-backed PVC which will be used for S3 object metadata storage
-| openshift_storage_glusterfs_wipe | False | Destroy any existing GlusterFS resources and wipe storage devices. **WARNING: THIS WILL DESTROY ANY DATA ON THOSE DEVICES.**
-| openshift_storage_glusterfs_heketi_is_native | True | heketi should be containerized
-| openshift_storage_glusterfs_heketi_cli | 'heketi-cli' | Command/Path to invoke the heketi-cli tool **NOTE:** Change this only for **non-native heketi** if heketi-cli is not in the global `$PATH` of the machine running openshift-ansible
-| openshift_storage_glusterfs_heketi_image | 'heketi/heketi' | Container image to use for heketi pods, enterprise default is 'rhgs3/rhgs-volmanager-rhel7'
-| openshift_storage_glusterfs_heketi_version | 'latest' | Container image version to use for heketi pods
-| openshift_storage_glusterfs_heketi_admin_key | auto-generated | String to use as secret key for performing heketi commands as admin
-| openshift_storage_glusterfs_heketi_user_key | auto-generated | String to use as secret key for performing heketi commands as user that can only view or modify volumes
-| openshift_storage_glusterfs_heketi_topology_load | True | Load the GlusterFS topology information into heketi
-| openshift_storage_glusterfs_heketi_url | Undefined | When heketi is native, this sets the hostname portion of the final heketi route URL. When heketi is external, this is the FQDN or IP address to the heketi service.
-| openshift_storage_glusterfs_heketi_port | 8080 | TCP port for external heketi service **NOTE:** This has no effect in native mode
-| openshift_storage_glusterfs_heketi_executor | 'kubernetes' | Selects how a native heketi service will manage GlusterFS nodes: 'kubernetes' for native nodes, 'ssh' for external nodes
-| openshift_storage_glusterfs_heketi_ssh_port | 22 | SSH port for external GlusterFS nodes via native heketi
-| openshift_storage_glusterfs_heketi_ssh_user | 'root' | SSH user for external GlusterFS nodes via native heketi
-| openshift_storage_glusterfs_heketi_ssh_sudo | False | Whether to sudo (if non-root user) for SSH to external GlusterFS nodes via native heketi
-| openshift_storage_glusterfs_heketi_ssh_keyfile | Undefined | Path to a private key file for use with SSH connections to external GlusterFS nodes via native heketi **NOTE:** This must be an absolute path
+| Name | Default value | Description |
+|--------------------------------------------------------|-------------------------|-----------------------------------------|
+| openshift_storage_glusterfs_timeout | 300 | Seconds to wait for pods to become ready
+| openshift_storage_glusterfs_namespace | 'glusterfs' | Namespace/project in which to create GlusterFS resources
+| openshift_storage_glusterfs_is_native | True | GlusterFS should be containerized
+| openshift_storage_glusterfs_name | 'storage' | A name to identify the GlusterFS cluster, which will be used in resource names
+| openshift_storage_glusterfs_nodeselector | 'glusterfs=storage-host'| Selector to determine which nodes will host GlusterFS pods in native mode. **NOTE:** The label value is taken from the cluster name
+| openshift_storage_glusterfs_use_default_selector | False | Whether to use a default node selector for the GlusterFS namespace/project. If False, the namespace/project will have no restricting node selector. If True, uses pre-existing or default (e.g. osm_default_node_selector) node selectors. **NOTE:** If True, nodes which will host GlusterFS pods must already have the additional labels.
+| openshift_storage_glusterfs_storageclass | True | Automatically create a StorageClass for each GlusterFS cluster
+| openshift_storage_glusterfs_storageclass_default | False | Sets the StorageClass for each GlusterFS cluster as default
+| openshift_storage_glusterfs_image | 'gluster/gluster-centos'| Container image to use for GlusterFS pods, enterprise default is 'rhgs3/rhgs-server-rhel7'
+| openshift_storage_glusterfs_version | 'latest' | Container image version to use for GlusterFS pods
+| openshift_storage_glusterfs_block_deploy | True | Deploy glusterblock provisioner service
+| openshift_storage_glusterfs_block_image | 'gluster/glusterblock-provisioner'| Container image to use for glusterblock-provisioner pod, enterprise default is 'rhgs3/rhgs-gluster-block-prov-rhel7'
+| openshift_storage_glusterfs_block_version | 'latest' | Container image version to use for glusterblock-provisioner pod
+| openshift_storage_glusterfs_block_host_vol_create | True | Automatically create GlusterFS volumes to host glusterblock volumes. **NOTE:** If this is False, block-hosting volumes will need to be manually created before glusterblock volumes can be provisioned
+| openshift_storage_glusterfs_block_host_vol_size | 100 | Size, in GB, of GlusterFS volumes that will be automatically create to host glusterblock volumes if not enough space is available for a glusterblock volume create request. **NOTE:** This value is effectively an upper limit on the size of glusterblock volumes unless you manually create larger GlusterFS block-hosting volumes
+| openshift_storage_glusterfs_block_host_vol_max | 15 | Max number of GlusterFS volumes to host glusterblock volumes
+| openshift_storage_glusterfs_block_storageclass | False | Automatically create a StorageClass for each Gluster Block cluster
+| openshift_storage_glusterfs_block_storageclass_default | False | Sets the StorageClass for each Gluster Block cluster as default
+| openshift_storage_glusterfs_s3_deploy | True | Deploy gluster-s3 service
+| openshift_storage_glusterfs_s3_image | 'gluster/gluster-object'| Container image to use for gluster-s3 pod, enterprise default is 'rhgs3/rhgs-gluster-s3-server-rhel7'
+| openshift_storage_glusterfs_s3_version | 'latest' | Container image version to use for gluster=s3 pod
+| openshift_storage_glusterfs_s3_account | Undefined | S3 account name for the S3 service, required for S3 service deployment
+| openshift_storage_glusterfs_s3_user | Undefined | S3 user name for the S3 service, required for S3 service deployment
+| openshift_storage_glusterfs_s3_password | Undefined | S3 user password for the S3 service, required for S3 service deployment
+| openshift_storage_glusterfs_s3_pvc | Dynamic | Name of the GlusterFS-backed PVC which will be used for S3 object data storage, generated from the cluster name and S3 account by default
+| openshift_storage_glusterfs_s3_pvc_size | "2Gi" | Size, in Gi, of the GlusterFS-backed PVC which will be used for S3 object data storage
+| openshift_storage_glusterfs_s3_meta_pvc | Dynamic | Name of the GlusterFS-backed PVC which will be used for S3 object metadata storage, generated from the cluster name and S3 account by default
+| openshift_storage_glusterfs_s3_meta_pvc_size | "1Gi" | Size, in Gi, of the GlusterFS-backed PVC which will be used for S3 object metadata storage
+| openshift_storage_glusterfs_wipe | False | Destroy any existing GlusterFS resources and wipe storage devices. **WARNING: THIS WILL DESTROY ANY DATA ON THOSE DEVICES.**
+| openshift_storage_glusterfs_heketi_is_native | True | heketi should be containerized
+| openshift_storage_glusterfs_heketi_cli | 'heketi-cli' | Command/Path to invoke the heketi-cli tool **NOTE:** Change this only for **non-native heketi** if heketi-cli is not in the global `$PATH` of the machine running openshift-ansible
+| openshift_storage_glusterfs_heketi_image | 'heketi/heketi' | Container image to use for heketi pods, enterprise default is 'rhgs3/rhgs-volmanager-rhel7'
+| openshift_storage_glusterfs_heketi_version | 'latest' | Container image version to use for heketi pods
+| openshift_storage_glusterfs_heketi_admin_key | auto-generated | String to use as secret key for performing heketi commands as admin
+| openshift_storage_glusterfs_heketi_user_key | auto-generated | String to use as secret key for performing heketi commands as user that can only view or modify volumes
+| openshift_storage_glusterfs_heketi_topology_load | True | Load the GlusterFS topology information into heketi
+| openshift_storage_glusterfs_heketi_url | Undefined | When heketi is native, this sets the hostname portion of the final heketi route URL. When heketi is external, this is the FQDN or IP address to the heketi service.
+| openshift_storage_glusterfs_heketi_port | 8080 | TCP port for external heketi service **NOTE:** This has no effect in native mode
+| openshift_storage_glusterfs_heketi_executor | 'kubernetes' | Selects how a native heketi service will manage GlusterFS nodes: 'kubernetes' for native nodes, 'ssh' for external nodes
+| openshift_storage_glusterfs_heketi_ssh_port | 22 | SSH port for external GlusterFS nodes via native heketi
+| openshift_storage_glusterfs_heketi_ssh_user | 'root' | SSH user for external GlusterFS nodes via native heketi
+| openshift_storage_glusterfs_heketi_ssh_sudo | False | Whether to sudo (if non-root user) for SSH to external GlusterFS nodes via native heketi
+| openshift_storage_glusterfs_heketi_ssh_keyfile | Undefined | Path to a private key file for use with SSH connections to external GlusterFS nodes via native heketi **NOTE:** This must be an absolute path
| openshift_storage_glusterfs_heketi_fstab | '/var/lib/heketi/fstab' | When heketi is native, sets the path to the fstab file on the GlusterFS nodes to update on LVM volume mounts, changes to '/etc/fstab/' when the heketi executor is 'ssh' **NOTE:** This should not need to be changed
| openshift_storage_glusterfs_heketi_wipe | False | Destroy any existing heketi resources, defaults to the value of `openshift_storage_glusterfs_wipe`
@@ -126,14 +128,16 @@ registry. These variables start with the prefix
values in their corresponding non-registry variables. The following variables
are an exception:
-| Name | Default value | Description |
-|-----------------------------------------------------------|-----------------------|-----------------------------------------|
-| openshift_storage_glusterfs_registry_namespace | registry namespace | Default is to use the hosted registry's namespace, otherwise 'glusterfs'
-| openshift_storage_glusterfs_registry_name | 'registry' | This allows for the logical separation of the registry GlusterFS cluster from other GlusterFS clusters
-| openshift_storage_glusterfs_registry_storageclass | False | It is recommended to not create a StorageClass for GlusterFS clusters serving registry storage, so as to avoid performance penalties
-| openshift_storage_glusterfs_registry_storageclass_default | False | Sets the StorageClass for each GlusterFS cluster as default
-| openshift_storage_glusterfs_registry_heketi_admin_key | auto-generated | Separate from the above
-| openshift_storage_glusterfs_registry_heketi_user_key | auto-generated | Separate from the above
+| Name | Default value | Description |
+|-----------------------------------------------------------------|-----------------------|-----------------------------------------|
+| openshift_storage_glusterfs_registry_namespace | registry namespace | Default is to use the hosted registry's namespace, otherwise 'glusterfs'
+| openshift_storage_glusterfs_registry_name | 'registry' | This allows for the logical separation of the registry GlusterFS cluster from other GlusterFS clusters
+| openshift_storage_glusterfs_registry_storageclass | False | It is recommended to not create a StorageClass for GlusterFS clusters serving registry storage, so as to avoid performance penalties
+| openshift_storage_glusterfs_registry_storageclass_default | False | Sets the StorageClass for each GlusterFS cluster as default
+| openshift_storage_glusterfs_registry_block_storageclass | False | It is recommended to not create a StorageClass for Gluster Block clusters serving registry storage, so as to avoid performance penalties
+| openshift_storage_glusterfs_registry_block_storageclass_default | False | Sets the StorageClass for each Gluster Block cluster as default
+| openshift_storage_glusterfs_registry_heketi_admin_key | auto-generated | Separate from the above
+| openshift_storage_glusterfs_registry_heketi_user_key | auto-generated | Separate from the above
Additionally, this role's behavior responds to several registry-specific variables in the [openshift_hosted role](../openshift_hosted/README.md):
diff --git a/roles/openshift_storage_glusterfs/defaults/main.yml b/roles/openshift_storage_glusterfs/defaults/main.yml
index 4cbe262d2..7e751cc7a 100644
--- a/roles/openshift_storage_glusterfs/defaults/main.yml
+++ b/roles/openshift_storage_glusterfs/defaults/main.yml
@@ -14,6 +14,8 @@ openshift_storage_glusterfs_block_version: 'latest'
openshift_storage_glusterfs_block_host_vol_create: True
openshift_storage_glusterfs_block_host_vol_size: 100
openshift_storage_glusterfs_block_host_vol_max: 15
+openshift_storage_glusterfs_block_storageclass: False
+openshift_storage_glusterfs_block_storageclass_default: False
openshift_storage_glusterfs_s3_deploy: True
openshift_storage_glusterfs_s3_image: "{{ 'rhgs3/rhgs-gluster-s3-server-rhel7' | quote if openshift_deployment_type == 'openshift-enterprise' else 'gluster/gluster-object' | quote }}"
openshift_storage_glusterfs_s3_version: 'latest'
@@ -61,6 +63,8 @@ openshift_storage_glusterfs_registry_block_version: "{{ openshift_storage_gluste
openshift_storage_glusterfs_registry_block_host_vol_create: "{{ openshift_storage_glusterfs_block_host_vol_create }}"
openshift_storage_glusterfs_registry_block_host_vol_size: "{{ openshift_storage_glusterfs_block_host_vol_size }}"
openshift_storage_glusterfs_registry_block_host_vol_max: "{{ openshift_storage_glusterfs_block_host_vol_max }}"
+openshift_storage_glusterfs_registry_block_storageclass: False
+openshift_storage_glusterfs_registry_block_storageclass_default: False
openshift_storage_glusterfs_registry_s3_deploy: "{{ openshift_storage_glusterfs_s3_deploy }}"
openshift_storage_glusterfs_registry_s3_image: "{{ openshift_storage_glusterfs_s3_image }}"
openshift_storage_glusterfs_registry_s3_version: "{{ openshift_storage_glusterfs_s3_version }}"
@@ -103,3 +107,9 @@ r_openshift_storage_glusterfs_os_firewall_allow:
port: "24008/tcp"
- service: glusterfs_bricks
port: "49152-49251/tcp"
+- service: glusterblockd
+ port: "24010/tcp"
+- service: iscsi-targets
+ port: "3260/tcp"
+- service: rpcbind
+ port: "111/tcp"
diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml
index 001578406..a5fdae803 100644
--- a/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml
+++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml
@@ -315,5 +315,31 @@
- include_tasks: glusterblock_deploy.yml
when: glusterfs_block_deploy
+- block:
+ - name: Create heketi block secret
+ oc_secret:
+ namespace: "{{ glusterfs_namespace }}"
+ state: present
+ name: "heketi-{{ glusterfs_name }}-admin-secret-block"
+ type: "gluster.org/glusterblock"
+ force: True
+ contents:
+ - path: key
+ data: "{{ glusterfs_heketi_admin_key }}"
+ when: glusterfs_heketi_admin_key is defined
+ - name: Generate Gluster Block StorageClass file
+ template:
+ src: "{{ openshift.common.examples_content_version }}/gluster-block-storageclass.yml.j2"
+ dest: "{{ mktemp.stdout }}/gluster-block-storageclass.yml"
+
+ - name: Create Gluster Block StorageClass
+ oc_obj:
+ state: present
+ kind: storageclass
+ name: "glusterfs-{{ glusterfs_name }}-block"
+ files:
+ - "{{ mktemp.stdout }}/gluster-block-storageclass.yml"
+ when: glusterfs_block_storageclass
+
- include_tasks: gluster_s3_deploy.yml
when: glusterfs_s3_deploy
diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml
index a374df0ce..92de1b64d 100644
--- a/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml
+++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml
@@ -17,6 +17,8 @@
glusterfs_block_host_vol_create: "{{ openshift_storage_glusterfs_block_host_vol_create }}"
glusterfs_block_host_vol_size: "{{ openshift_storage_glusterfs_block_host_vol_size }}"
glusterfs_block_host_vol_max: "{{ openshift_storage_glusterfs_block_host_vol_max }}"
+ glusterfs_block_storageclass: "{{ openshift_storage_glusterfs_block_storageclass | bool }}"
+ glusterfs_block_storageclass_default: "{{ openshift_storage_glusterfs_block_storageclass_default | bool }}"
glusterfs_s3_deploy: "{{ openshift_storage_glusterfs_s3_deploy | bool }}"
glusterfs_s3_image: "{{ openshift_storage_glusterfs_s3_image }}"
glusterfs_s3_version: "{{ openshift_storage_glusterfs_s3_version }}"
diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml
index 544a6f491..befacb04f 100644
--- a/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml
+++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml
@@ -17,6 +17,8 @@
glusterfs_block_host_vol_create: "{{ openshift_storage_glusterfs_registry_block_host_vol_create }}"
glusterfs_block_host_vol_size: "{{ openshift_storage_glusterfs_registry_block_host_vol_size }}"
glusterfs_block_host_vol_max: "{{ openshift_storage_glusterfs_registry_block_host_vol_max }}"
+ glusterfs_block_storageclass: "{{ openshift_storage_glusterfs_registry_block_storageclass | bool }}"
+ glusterfs_block_storageclass_default: "{{ openshift_storage_glusterfs_registry_block_storageclass_default | bool }}"
glusterfs_s3_deploy: "{{ openshift_storage_glusterfs_registry_s3_deploy | bool }}"
glusterfs_s3_image: "{{ openshift_storage_glusterfs_registry_s3_image }}"
glusterfs_s3_version: "{{ openshift_storage_glusterfs_registry_s3_version }}"
diff --git a/roles/openshift_storage_glusterfs/templates/glusterfs.conf b/roles/openshift_storage_glusterfs/templates/glusterfs.conf
index dd4d6e6f7..bcc02e217 100644
--- a/roles/openshift_storage_glusterfs/templates/glusterfs.conf
+++ b/roles/openshift_storage_glusterfs/templates/glusterfs.conf
@@ -1,4 +1,7 @@
#{{ ansible_managed }}
dm_thin_pool
dm_snapshot
-dm_mirror \ No newline at end of file
+dm_mirror
+#glusterblock
+dm_multipath
+target_core_user
diff --git a/roles/openshift_storage_glusterfs/templates/v3.7/gluster-block-storageclass.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.7/gluster-block-storageclass.yml.j2
new file mode 100644
index 000000000..02ed8fa8d
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/templates/v3.7/gluster-block-storageclass.yml.j2
@@ -0,0 +1,19 @@
+---
+apiVersion: storage.k8s.io/v1
+kind: StorageClass
+metadata:
+ name: glusterfs-{{ glusterfs_name }}-block
+{% if glusterfs_block_storageclass_default is defined and glusterfs_block_storageclass_default %}
+ annotations:
+ storageclass.kubernetes.io/is-default-class: "true"
+{% endif %}
+provisioner: gluster.org/glusterblock
+parameters:
+ resturl: "http://{% if glusterfs_heketi_is_native %}{{ glusterfs_heketi_route }}{% else %}{{ glusterfs_heketi_url }}:{{ glusterfs_heketi_port }}{% endif %}"
+ restuser: "admin"
+ chapauthenabled: "true"
+ hacount: "3"
+{% if glusterfs_heketi_admin_key is defined %}
+ restsecretnamespace: "{{ glusterfs_namespace }}"
+ restsecretname: "heketi-{{ glusterfs_name }}-admin-secret-block"
+{%- endif -%}
diff --git a/roles/openshift_storage_glusterfs/templates/v3.8/gluster-block-storageclass.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.8/gluster-block-storageclass.yml.j2
new file mode 100644
index 000000000..02ed8fa8d
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/templates/v3.8/gluster-block-storageclass.yml.j2
@@ -0,0 +1,19 @@
+---
+apiVersion: storage.k8s.io/v1
+kind: StorageClass
+metadata:
+ name: glusterfs-{{ glusterfs_name }}-block
+{% if glusterfs_block_storageclass_default is defined and glusterfs_block_storageclass_default %}
+ annotations:
+ storageclass.kubernetes.io/is-default-class: "true"
+{% endif %}
+provisioner: gluster.org/glusterblock
+parameters:
+ resturl: "http://{% if glusterfs_heketi_is_native %}{{ glusterfs_heketi_route }}{% else %}{{ glusterfs_heketi_url }}:{{ glusterfs_heketi_port }}{% endif %}"
+ restuser: "admin"
+ chapauthenabled: "true"
+ hacount: "3"
+{% if glusterfs_heketi_admin_key is defined %}
+ restsecretnamespace: "{{ glusterfs_namespace }}"
+ restsecretname: "heketi-{{ glusterfs_name }}-admin-secret-block"
+{%- endif -%}
diff --git a/roles/openshift_storage_glusterfs/templates/v3.9/gluster-block-storageclass.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.9/gluster-block-storageclass.yml.j2
new file mode 100644
index 000000000..02ed8fa8d
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/templates/v3.9/gluster-block-storageclass.yml.j2
@@ -0,0 +1,19 @@
+---
+apiVersion: storage.k8s.io/v1
+kind: StorageClass
+metadata:
+ name: glusterfs-{{ glusterfs_name }}-block
+{% if glusterfs_block_storageclass_default is defined and glusterfs_block_storageclass_default %}
+ annotations:
+ storageclass.kubernetes.io/is-default-class: "true"
+{% endif %}
+provisioner: gluster.org/glusterblock
+parameters:
+ resturl: "http://{% if glusterfs_heketi_is_native %}{{ glusterfs_heketi_route }}{% else %}{{ glusterfs_heketi_url }}:{{ glusterfs_heketi_port }}{% endif %}"
+ restuser: "admin"
+ chapauthenabled: "true"
+ hacount: "3"
+{% if glusterfs_heketi_admin_key is defined %}
+ restsecretnamespace: "{{ glusterfs_namespace }}"
+ restsecretname: "heketi-{{ glusterfs_name }}-admin-secret-block"
+{%- endif -%}
diff --git a/roles/openshift_version/defaults/main.yml b/roles/openshift_version/defaults/main.yml
index e2e6538c9..513dff045 100644
--- a/roles/openshift_version/defaults/main.yml
+++ b/roles/openshift_version/defaults/main.yml
@@ -10,3 +10,4 @@ openshift_service_type: "{{ openshift_service_type_dict[openshift_deployment_typ
openshift_use_crio_only: False
l_first_master_version_task_file: "{{ openshift_is_containerized | ternary('first_master_containerized_version.yml', 'first_master_rpm_version.yml') }}"
+l_force_image_tag_to_version: False
diff --git a/roles/openshift_version/tasks/first_master.yml b/roles/openshift_version/tasks/first_master.yml
index 374725086..e01a56dc1 100644
--- a/roles/openshift_version/tasks/first_master.yml
+++ b/roles/openshift_version/tasks/first_master.yml
@@ -24,7 +24,9 @@
- block:
- debug:
- msg: "openshift_image_tag was not defined. Falling back to v{{ openshift_version }}"
+ msg: "openshift_image_tag set to v{{ openshift_version }}"
- set_fact:
openshift_image_tag: v{{ openshift_version }}
- when: openshift_image_tag is not defined
+ when: >
+ openshift_image_tag is not defined
+ or l_force_image_tag_to_version | bool
diff --git a/files/origin-components/console-config.yaml b/roles/openshift_web_console/files/console-config.yaml
index 55c650fbe..55c650fbe 100644
--- a/files/origin-components/console-config.yaml
+++ b/roles/openshift_web_console/files/console-config.yaml
diff --git a/files/origin-components/console-rbac-template.yaml b/roles/openshift_web_console/files/console-rbac-template.yaml
index 9ee117199..9ee117199 100644
--- a/files/origin-components/console-rbac-template.yaml
+++ b/roles/openshift_web_console/files/console-rbac-template.yaml
diff --git a/files/origin-components/console-template.yaml b/roles/openshift_web_console/files/console-template.yaml
index 547e7a265..547e7a265 100644
--- a/files/origin-components/console-template.yaml
+++ b/roles/openshift_web_console/files/console-template.yaml
diff --git a/roles/openshift_web_console/tasks/install.yml b/roles/openshift_web_console/tasks/install.yml
index cc5eef47d..f79a05c94 100644
--- a/roles/openshift_web_console/tasks/install.yml
+++ b/roles/openshift_web_console/tasks/install.yml
@@ -33,7 +33,7 @@
- name: Copy web console templates to temp directory
copy:
- src: "{{ __console_files_location }}/{{ item }}"
+ src: "{{ item }}"
dest: "{{ mktemp.stdout }}/{{ item }}"
with_items:
- "{{ __console_template_file }}"
@@ -71,6 +71,9 @@
- set_fact:
config_to_migrate: "{{ master_config_output.content | b64decode | from_yaml }}"
+ - set_fact:
+ cro_plugin_enabled: "{{ config_to_migrate.admissionConfig is defined and config_to_migrate.admissionConfig.pluginConfig is defined and config_to_migrate.admissionConfig.pluginConfig.ClusterResourceOverrides is defined }}"
+
# Update properties in the config template based on inventory vars when the
# asset config does not exist.
- name: Set web console config properties from inventory variables
@@ -87,7 +90,7 @@
- key: features#inactivityTimeoutMinutes
value: "{{ openshift_web_console_inactivity_timeout_minutes | default(0) }}"
- key: features#clusterResourceOverridesEnabled
- value: "{{ openshift_web_console_cluster_resource_overrides_enabled | default(false) }}"
+ value: "{{ openshift_web_console_cluster_resource_overrides_enabled | default(cro_plugin_enabled) }}"
- key: extensions#scriptURLs
value: "{{ openshift_web_console_extension_script_urls | default([]) }}"
- key: extensions#stylesheetURLs
@@ -116,6 +119,8 @@
value: "{{ config_to_migrate.assetConfig.servingInfo.maxRequestsInFlight | default(0) }}"
- key: servingInfo#requestTimeoutSeconds
value: "{{ config_to_migrate.assetConfig.servingInfo.requestTimeoutSeconds | default(0) }}"
+ - key: features#clusterResourceOverridesEnabled
+ value: "{{ openshift_web_console_cluster_resource_overrides_enabled | default(cro_plugin_enabled) }}"
separator: '#'
state: present
when: config_to_migrate.assetConfig is defined
diff --git a/roles/openshift_web_console/vars/main.yml b/roles/openshift_web_console/vars/main.yml
index e91048e38..72bff5d01 100644
--- a/roles/openshift_web_console/vars/main.yml
+++ b/roles/openshift_web_console/vars/main.yml
@@ -1,6 +1,4 @@
---
-__console_files_location: "../../../files/origin-components/"
-
__console_template_file: "console-template.yaml"
__console_rbac_file: "console-rbac-template.yaml"
__console_config_file: "console-config.yaml"
diff --git a/files/origin-components/apiserver-config.yaml b/roles/template_service_broker/files/apiserver-config.yaml
index e4048d1da..e4048d1da 100644
--- a/files/origin-components/apiserver-config.yaml
+++ b/roles/template_service_broker/files/apiserver-config.yaml
diff --git a/files/origin-components/apiserver-template.yaml b/roles/template_service_broker/files/apiserver-template.yaml
index 4dd9395d0..4dd9395d0 100644
--- a/files/origin-components/apiserver-template.yaml
+++ b/roles/template_service_broker/files/apiserver-template.yaml
diff --git a/files/origin-components/rbac-template.yaml b/roles/template_service_broker/files/rbac-template.yaml
index 0937a9065..0937a9065 100644
--- a/files/origin-components/rbac-template.yaml
+++ b/roles/template_service_broker/files/rbac-template.yaml
diff --git a/files/origin-components/template-service-broker-registration.yaml b/roles/template_service_broker/files/template-service-broker-registration.yaml
index 95fb72924..95fb72924 100644
--- a/files/origin-components/template-service-broker-registration.yaml
+++ b/roles/template_service_broker/files/template-service-broker-registration.yaml
diff --git a/roles/template_service_broker/tasks/install.yml b/roles/template_service_broker/tasks/install.yml
index 4e6ad2ae5..d0a07c48d 100644
--- a/roles/template_service_broker/tasks/install.yml
+++ b/roles/template_service_broker/tasks/install.yml
@@ -28,7 +28,7 @@
changed_when: false
- copy:
- src: "{{ __tsb_files_location }}/{{ item }}"
+ src: "{{ item }}"
dest: "{{ mktemp.stdout }}/{{ item }}"
with_items:
- "{{ __tsb_template_file }}"
diff --git a/roles/template_service_broker/tasks/remove.yml b/roles/template_service_broker/tasks/remove.yml
index 48dc1327e..b46dd4771 100644
--- a/roles/template_service_broker/tasks/remove.yml
+++ b/roles/template_service_broker/tasks/remove.yml
@@ -9,7 +9,7 @@
changed_when: false
- copy:
- src: "{{ __tsb_files_location }}/{{ item }}"
+ src: "{{ item }}"
dest: "{{ mktemp.stdout }}/{{ item }}"
with_items:
- "{{ __tsb_template_file }}"
diff --git a/roles/template_service_broker/vars/main.yml b/roles/template_service_broker/vars/main.yml
index a65340f16..7dec24a79 100644
--- a/roles/template_service_broker/vars/main.yml
+++ b/roles/template_service_broker/vars/main.yml
@@ -1,6 +1,4 @@
---
-__tsb_files_location: "../../../files/origin-components/"
-
__tsb_template_file: "apiserver-template.yaml"
__tsb_config_file: "apiserver-config.yaml"
__tsb_rbac_file: "rbac-template.yaml"