summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.tito/packages/openshift-ansible2
-rw-r--r--CONTRIBUTING.md21
-rw-r--r--README.md12
-rw-r--r--images/installer/Dockerfile2
-rw-r--r--images/installer/Dockerfile.rhel72
-rw-r--r--inventory/hosts.example5
-rw-r--r--inventory/hosts.localhost26
-rw-r--r--openshift-ansible.spec32
-rw-r--r--playbooks/adhoc/openshift_hosted_logging_efk.yaml16
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/config.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml1
-rw-r--r--playbooks/init/basic_facts.yml8
-rw-r--r--playbooks/openshift-hosted/private/openshift_default_storage_class.yml4
-rw-r--r--playbooks/openshift-logging/private/config.yml1
-rw-r--r--playbooks/openshift-master/private/additional_config.yml1
-rw-r--r--playbooks/openshift-master/private/config.yml3
-rw-r--r--playbooks/openshift-master/private/restart.yml9
-rw-r--r--playbooks/openshift-master/private/scaleup.yml1
-rw-r--r--playbooks/openshift-master/private/tasks/restart_services.yml4
-rw-r--r--playbooks/openshift-metrics/private/config.yml1
-rw-r--r--playbooks/openshift-node/private/restart.yml1
-rw-r--r--playbooks/openshift-node/redeploy-certificates.yml2
-rwxr-xr-xplaybooks/openstack/inventory.py48
-rw-r--r--roles/lib_openshift/library/oc_adm_ca_server_cert.py2
-rw-r--r--roles/lib_openshift/library/oc_adm_csr.py2
-rw-r--r--roles/lib_openshift/library/oc_adm_manage_node.py2
-rw-r--r--roles/lib_openshift/library/oc_adm_policy_group.py2
-rw-r--r--roles/lib_openshift/library/oc_adm_policy_user.py2
-rw-r--r--roles/lib_openshift/library/oc_adm_registry.py2
-rw-r--r--roles/lib_openshift/library/oc_adm_router.py2
-rw-r--r--roles/lib_openshift/library/oc_clusterrole.py2
-rw-r--r--roles/lib_openshift/library/oc_configmap.py2
-rw-r--r--roles/lib_openshift/library/oc_edit.py2
-rw-r--r--roles/lib_openshift/library/oc_env.py2
-rw-r--r--roles/lib_openshift/library/oc_group.py2
-rw-r--r--roles/lib_openshift/library/oc_image.py2
-rw-r--r--roles/lib_openshift/library/oc_label.py2
-rw-r--r--roles/lib_openshift/library/oc_obj.py2
-rw-r--r--roles/lib_openshift/library/oc_objectvalidator.py2
-rw-r--r--roles/lib_openshift/library/oc_process.py2
-rw-r--r--roles/lib_openshift/library/oc_project.py2
-rw-r--r--roles/lib_openshift/library/oc_pvc.py2
-rw-r--r--roles/lib_openshift/library/oc_route.py2
-rw-r--r--roles/lib_openshift/library/oc_scale.py2
-rw-r--r--roles/lib_openshift/library/oc_secret.py2
-rw-r--r--roles/lib_openshift/library/oc_service.py2
-rw-r--r--roles/lib_openshift/library/oc_serviceaccount.py2
-rw-r--r--roles/lib_openshift/library/oc_serviceaccount_secret.py2
-rw-r--r--roles/lib_openshift/library/oc_storageclass.py2
-rw-r--r--roles/lib_openshift/library/oc_user.py2
-rw-r--r--roles/lib_openshift/library/oc_version.py2
-rw-r--r--roles/lib_openshift/library/oc_volume.py2
-rw-r--r--roles/lib_openshift/src/lib/base.py2
-rw-r--r--roles/lib_utils/library/docker_creds.py4
-rw-r--r--roles/lib_utils/library/openshift_container_binary_sync.py2
-rw-r--r--roles/nuage_master/handlers/main.yaml8
-rw-r--r--roles/nuage_master/tasks/etcd_certificates.yml21
-rw-r--r--roles/nuage_master/tasks/main.yaml17
-rwxr-xr-xroles/nuage_master/templates/nuage-infra-pod-config-daemonset.j239
-rwxr-xr-xroles/nuage_master/templates/nuage-master-config-daemonset.j29
-rwxr-xr-xroles/nuage_master/templates/nuage-node-config-daemonset.j211
-rw-r--r--roles/nuage_master/vars/main.yaml7
-rw-r--r--roles/openshift_aws/defaults/main.yml4
-rw-r--r--roles/openshift_default_storage_class/defaults/main.yml9
-rw-r--r--roles/openshift_logging/README.md1
-rw-r--r--roles/openshift_logging/tasks/delete_logging.yaml4
-rw-r--r--roles/openshift_logging/tasks/install_logging.yaml27
-rw-r--r--roles/openshift_logging_elasticsearch/tasks/main.yaml13
-rw-r--r--roles/openshift_logging_elasticsearch/templates/es.j21
-rw-r--r--roles/openshift_logging_elasticsearch/templates/passwd.j22
-rw-r--r--roles/openshift_logging_kibana/defaults/main.yml3
-rw-r--r--roles/openshift_logging_kibana/tasks/main.yaml1
-rw-r--r--roles/openshift_logging_kibana/templates/kibana.j24
-rw-r--r--roles/openshift_master/tasks/main.yml2
-rw-r--r--roles/openshift_master/tasks/restart.yml17
-rw-r--r--roles/openshift_metrics/defaults/main.yaml2
-rw-r--r--roles/openshift_metrics/tasks/install_metrics.yaml4
-rw-r--r--roles/openshift_metrics/tasks/uninstall_metrics.yaml4
-rw-r--r--roles/openshift_node/defaults/main.yml1
-rw-r--r--roles/openshift_node/tasks/storage_plugins/iscsi.yml28
-rw-r--r--roles/openshift_node/templates/multipath.conf.j215
-rw-r--r--roles/openshift_openstack/templates/docker-storage-setup-dm.j24
-rw-r--r--roles/openshift_openstack/templates/docker-storage-setup-overlayfs.j24
-rw-r--r--roles/openshift_openstack/templates/heat_stack.yaml.j24
-rw-r--r--roles/openshift_openstack/templates/heat_stack_server.yaml.j23
-rw-r--r--roles/openshift_provisioners/defaults/main.yaml10
-rw-r--r--roles/openshift_provisioners/tasks/main.yaml5
-rw-r--r--roles/openshift_sanitize_inventory/tasks/main.yml14
-rw-r--r--roles/openshift_storage_glusterfs/README.md106
-rw-r--r--roles/openshift_storage_glusterfs/defaults/main.yml10
-rw-r--r--roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml26
-rw-r--r--roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml2
-rw-r--r--roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml2
-rw-r--r--roles/openshift_storage_glusterfs/templates/glusterfs.conf5
-rw-r--r--roles/openshift_storage_glusterfs/templates/v3.7/gluster-block-storageclass.yml.j219
-rw-r--r--roles/openshift_storage_glusterfs/templates/v3.8/gluster-block-storageclass.yml.j219
-rw-r--r--roles/openshift_storage_glusterfs/templates/v3.9/gluster-block-storageclass.yml.j219
98 files changed, 601 insertions, 177 deletions
diff --git a/.tito/packages/openshift-ansible b/.tito/packages/openshift-ansible
index da0712a8a..bdfa06c4a 100644
--- a/.tito/packages/openshift-ansible
+++ b/.tito/packages/openshift-ansible
@@ -1 +1 @@
-3.9.0-0.33.0 ./
+3.9.0-0.35.0 ./
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 1c0fa73ad..ef0a302dc 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -74,6 +74,27 @@ If you are new to Git, these links might help:
---
+## Simple all-in-one localhost installation
+```
+git clone https://github.com/openshift/openshift-ansible
+cd openshift-ansible
+sudo ansible-playbook -i inventory/hosts.localhost playbooks/prerequisites.yml
+sudo ansible-playbook -i inventory/hosts.localhost playbooks/deploy_cluster.yml
+```
+
+## Development process
+Most changes can be applied by re-running the config playbook. However, while
+the config playbook will run faster the second time through it's still going to
+take a very long time. As such, you may wish to run a smaller subsection of the
+installation playbooks. You can for instance run the node, master, or hosted
+playbooks in playbooks/openshift-node/config.yml,
+playbooks/openshift-master/config.yml, playbooks/openshift-hosted/config.yml
+respectively.
+
+We're actively working to refactor the playbooks into smaller discrete
+components and we'll be documenting that structure shortly, for now those are
+the most sensible logical units of work.
+
## Running tests and other verification tasks
We use [`tox`](http://readthedocs.org/docs/tox/) to manage virtualenvs where
diff --git a/README.md b/README.md
index fdedf2f19..609930dcd 100644
--- a/README.md
+++ b/README.md
@@ -74,7 +74,17 @@ Fedora:
dnf install -y ansible pyOpenSSL python-cryptography python-lxml
```
-## OpenShift Installation Documentation:
+## Simple all-in-one localhost Installation
+This assumes that you've installed the base dependencies and you're running on
+Fedora or RHEL
+```
+git clone https://github.com/openshift/openshift-ansible
+cd openshift-ansible
+sudo ansible-playbook -i inventory/hosts.localhost playbooks/prerequisites.yml
+sudo ansible-playbook -i inventory/hosts.localhost playbooks/deploy_cluster.yml
+```
+
+## Complete Production Installation Documentation:
- [OpenShift Enterprise](https://docs.openshift.com/enterprise/latest/install_config/install/advanced_install.html)
- [OpenShift Origin](https://docs.openshift.org/latest/install_config/install/advanced_install.html)
diff --git a/images/installer/Dockerfile b/images/installer/Dockerfile
index 22a0d06a0..c9ec8ba41 100644
--- a/images/installer/Dockerfile
+++ b/images/installer/Dockerfile
@@ -10,7 +10,7 @@ COPY images/installer/origin-extra-root /
# install ansible and deps
RUN INSTALL_PKGS="python-lxml python-dns pyOpenSSL python2-cryptography openssl java-1.8.0-openjdk-headless python2-passlib httpd-tools openssh-clients origin-clients" \
&& yum install -y --setopt=tsflags=nodocs $INSTALL_PKGS \
- && EPEL_PKGS="ansible python2-boto python2-boto3 google-cloud-sdk-183.0.0 which" \
+ && EPEL_PKGS="ansible python2-boto python2-boto3 python2-crypto google-cloud-sdk-183.0.0 which" \
&& yum install -y epel-release \
&& yum install -y --setopt=tsflags=nodocs $EPEL_PKGS \
&& EPEL_TESTING_PKGS="python2-libcloud" \
diff --git a/images/installer/Dockerfile.rhel7 b/images/installer/Dockerfile.rhel7
index 3b05c1aa6..5da950744 100644
--- a/images/installer/Dockerfile.rhel7
+++ b/images/installer/Dockerfile.rhel7
@@ -5,7 +5,7 @@ MAINTAINER OpenShift Team <dev@lists.openshift.redhat.com>
USER root
# Playbooks, roles, and their dependencies are installed from packages.
-RUN INSTALL_PKGS="atomic-openshift-utils atomic-openshift-clients python-boto python2-boto3 openssl java-1.8.0-openjdk-headless httpd-tools google-cloud-sdk" \
+RUN INSTALL_PKGS="atomic-openshift-utils atomic-openshift-clients python-boto python2-boto3 python2-crypto openssl java-1.8.0-openjdk-headless httpd-tools google-cloud-sdk" \
&& yum repolist > /dev/null \
&& yum-config-manager --enable rhel-7-server-ose-3.7-rpms \
&& yum-config-manager --enable rhel-7-server-rh-common-rpms \
diff --git a/inventory/hosts.example b/inventory/hosts.example
index 322d23888..82c588100 100644
--- a/inventory/hosts.example
+++ b/inventory/hosts.example
@@ -325,7 +325,10 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# or to one or all of the masters defined in the inventory if no load
# balancer is present.
#openshift_master_cluster_hostname=openshift-ansible.test.example.com
-#openshift_master_cluster_public_hostname=openshift-ansible.test.example.com
+
+# If an external load balancer is used public hostname should resolve to
+# external load balancer address
+#openshift_master_cluster_public_hostname=openshift-ansible.public.example.com
# Configure controller arguments
#osm_controller_args={'resource-quota-sync-period': ['10s']}
diff --git a/inventory/hosts.localhost b/inventory/hosts.localhost
new file mode 100644
index 000000000..41ed309e1
--- /dev/null
+++ b/inventory/hosts.localhost
@@ -0,0 +1,26 @@
+#bare minimum hostfile
+
+[OSEv3:children]
+masters
+nodes
+etcd
+
+[OSEv3:vars]
+# if your target hosts are Fedora uncomment this
+#ansible_python_interpreter=/usr/bin/python3
+openshift_deployment_type=origin
+openshift_release=3.7
+osm_cluster_network_cidr=10.128.0.0/14
+openshift_portal_net=172.30.0.0/16
+osm_host_subnet_length=9
+# localhost likely doesn't meet the minimum requirements
+openshift_disable_check=disk_availability,memory_availability
+
+[masters]
+localhost ansible_connection=local
+
+[etcd]
+localhost ansible_connection=local
+
+[nodes]
+localhost ansible_connection=local openshift_schedulable=true openshift_node_labels="{'region': 'infra', 'zone': 'default'}"
diff --git a/openshift-ansible.spec b/openshift-ansible.spec
index 059bbffae..48f666a07 100644
--- a/openshift-ansible.spec
+++ b/openshift-ansible.spec
@@ -10,7 +10,7 @@
Name: openshift-ansible
Version: 3.9.0
-Release: 0.33.0%{?dist}
+Release: 0.35.0%{?dist}
Summary: Openshift and Atomic Enterprise Ansible
License: ASL 2.0
URL: https://github.com/openshift/openshift-ansible
@@ -28,6 +28,7 @@ Requires: java-1.8.0-openjdk-headless
Requires: httpd-tools
Requires: libselinux-python
Requires: python-passlib
+Requires: python2-crypto
%description
Openshift and Atomic Enterprise Ansible
@@ -200,6 +201,35 @@ Atomic OpenShift Utilities includes
%changelog
+* Wed Jan 31 2018 Justin Pierce <jupierce@redhat.com> 3.9.0-0.35.0
+- add glusterblock support for ansible (m.judeikis@gmail.com)
+- Add a bare minimum localhost hosts file (sdodson@redhat.com)
+- copy etcd client certificates for nuage openshift monitor
+ (siva_teja.areti@nokia.com)
+- fix hostvars parameter name (tzumainn@redhat.com)
+- remove mountpoint parameter (tzumainn@redhat.com)
+- flake cleanup (tzumainn@redhat.com)
+- code simplification and lint cleanup (tzumainn@redhat.com)
+- Symlink kubectl to oc instead of openshift (mfojtik@redhat.com)
+- Rework provisioners vars to support different prefix/version for Origin/OSE
+ (vrutkovs@redhat.com)
+- add cinder mountpoint to inventory (tzumainn@redhat.com)
+- allow setting of kibana env vars (jcantril@redhat.com)
+- No longer compare with legacy hosted var (ewolinet@redhat.com)
+- Preserving ES dc storage type unless overridden by inventory variable
+ (ewolinet@redhat.com)
+- Fix: e2e tests failing due to :1936/metrics unaccessible.
+ (jmencak@redhat.com)
+
+* Tue Jan 30 2018 Justin Pierce <jupierce@redhat.com> 3.9.0-0.34.0
+- docker_creds: decode docker_config for py3 only if its a string
+ (vrutkovs@redhat.com)
+- Removing ability to change default cassandra_pvc_prefix based on metrics
+ volume name (ewolinet@redhat.com)
+- Don't deploy the console if disabled or registry subtype (sdodson@redhat.com)
+- [1538960] Correct ability to overried openshift_management_app_template
+ (rteague@redhat.com)
+
* Tue Jan 30 2018 Justin Pierce <jupierce@redhat.com> 3.9.0-0.33.0
-
diff --git a/playbooks/adhoc/openshift_hosted_logging_efk.yaml b/playbooks/adhoc/openshift_hosted_logging_efk.yaml
deleted file mode 100644
index faeb332ad..000000000
--- a/playbooks/adhoc/openshift_hosted_logging_efk.yaml
+++ /dev/null
@@ -1,16 +0,0 @@
----
-- hosts: masters[0]
- roles:
- - role: openshift_logging
- openshift_hosted_logging_cleanup: no
-
-- name: Update master-config for publicLoggingURL
- hosts: masters:!masters[0]
- pre_tasks:
- - set_fact:
- openshift_logging_kibana_hostname: "{{ openshift_hosted_logging_hostname | default('kibana.' ~ openshift_master_default_subdomain }}"
- tasks:
- - import_role:
- name: openshift_logging
- tasks_from: update_master_config
- when: openshift_hosted_logging_deploy | default(false) | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/config.yml b/playbooks/common/openshift-cluster/upgrades/pre/config.yml
index 2b27f8dd0..edc541ef9 100644
--- a/playbooks/common/openshift-cluster/upgrades/pre/config.yml
+++ b/playbooks/common/openshift-cluster/upgrades/pre/config.yml
@@ -60,7 +60,7 @@
- fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run."
when:
- l_upgrade_nodes_only | default(False) | bool
- - openshift.common.version != openshift_version
+ - not openshift.common.version | match(openshift_version)
# If we're only upgrading nodes, skip this.
- import_playbook: ../../../../openshift-master/private/validate_restart.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
index a10fd4bee..c27118f6f 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
@@ -72,8 +72,6 @@
# support for optional hooks to be defined.
- name: Upgrade master
hosts: oo_masters_to_config
- vars:
- openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}"
serial: 1
roles:
- openshift_facts
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml
index fe1fdefff..c8a42322d 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml
@@ -64,6 +64,7 @@
- import_playbook: ../upgrade_control_plane.yml
vars:
openshift_release: '3.8'
+ openshift_pkg_version: ''
when: hostvars[groups.oo_first_master.0].openshift_currently_installed_version | version_compare('3.8','<')
## 3.8 upgrade complete we should now be able to upgrade to 3.9
diff --git a/playbooks/init/basic_facts.yml b/playbooks/init/basic_facts.yml
index 06a4e7291..a9bf06693 100644
--- a/playbooks/init/basic_facts.yml
+++ b/playbooks/init/basic_facts.yml
@@ -67,3 +67,11 @@
first_master_client_binary: "{{ openshift_client_binary }}"
#Some roles may require this to be set for first master
openshift_client_binary: "{{ openshift_client_binary }}"
+
+- name: Disable web console if required
+ hosts: oo_masters_to_config
+ gather_facts: no
+ tasks:
+ - set_fact:
+ openshift_web_console_install: False
+ when: openshift_deployment_subtype == 'registry' or ( osm_disabled_features is defined and 'WebConsole' in osm_disabled_features )
diff --git a/playbooks/openshift-hosted/private/openshift_default_storage_class.yml b/playbooks/openshift-hosted/private/openshift_default_storage_class.yml
index 62fe0dd60..c59ebcead 100644
--- a/playbooks/openshift-hosted/private/openshift_default_storage_class.yml
+++ b/playbooks/openshift-hosted/private/openshift_default_storage_class.yml
@@ -3,4 +3,6 @@
hosts: oo_first_master
roles:
- role: openshift_default_storage_class
- when: openshift_cloudprovider_kind is defined and (openshift_cloudprovider_kind == 'aws' or openshift_cloudprovider_kind == 'gce' or openshift_cloudprovider_kind == 'openstack')
+ when:
+ - openshift_cloudprovider_kind is defined
+ - openshift_cloudprovider_kind in ['aws','gce','openstack','vsphere']
diff --git a/playbooks/openshift-logging/private/config.yml b/playbooks/openshift-logging/private/config.yml
index d6b26647c..07aa8bfde 100644
--- a/playbooks/openshift-logging/private/config.yml
+++ b/playbooks/openshift-logging/private/config.yml
@@ -24,6 +24,7 @@
- import_role:
name: openshift_logging
tasks_from: update_master_config
+ when: not openshift.common.version_gte_3_9
- name: Logging Install Checkpoint End
hosts: all
diff --git a/playbooks/openshift-master/private/additional_config.yml b/playbooks/openshift-master/private/additional_config.yml
index 85be0e600..ca514ed26 100644
--- a/playbooks/openshift-master/private/additional_config.yml
+++ b/playbooks/openshift-master/private/additional_config.yml
@@ -16,7 +16,6 @@
vars:
cockpit_plugins: "{{ osm_cockpit_plugins | default(['cockpit-kubernetes']) }}"
etcd_urls: "{{ openshift.master.etcd_urls }}"
- openshift_master_ha: "{{ groups.oo_masters | length > 1 }}"
omc_cluster_hosts: "{{ groups.oo_masters | join(' ')}}"
roles:
- role: openshift_project_request_template
diff --git a/playbooks/openshift-master/private/config.yml b/playbooks/openshift-master/private/config.yml
index 153ea9993..d2fc2eed8 100644
--- a/playbooks/openshift-master/private/config.yml
+++ b/playbooks/openshift-master/private/config.yml
@@ -78,7 +78,6 @@
console_url: "{{ openshift_master_console_url | default(None) }}"
console_use_ssl: "{{ openshift_master_console_use_ssl | default(None) }}"
public_console_url: "{{ openshift_master_public_console_url | default(None) }}"
- ha: "{{ openshift_master_ha | default(groups.oo_masters | length > 1) }}"
master_count: "{{ openshift_master_count | default(groups.oo_masters | length) }}"
- name: Inspect state of first master config settings
@@ -166,7 +165,6 @@
hosts: oo_masters_to_config
any_errors_fatal: true
vars:
- openshift_master_ha: "{{ openshift.master.ha }}"
openshift_master_count: "{{ openshift.master.master_count }}"
openshift_master_session_auth_secrets: "{{ hostvars[groups.oo_first_master.0].openshift.master.session_auth_secrets }}"
openshift_master_session_encryption_secrets: "{{ hostvars[groups.oo_first_master.0].openshift.master.session_encryption_secrets }}"
@@ -186,6 +184,7 @@
- role: openshift_buildoverrides
- role: nickhammond.logrotate
- role: openshift_master
+ openshift_master_ha: "{{ (groups.oo_masters | length > 1) | bool }}"
openshift_master_hosts: "{{ groups.oo_masters_to_config }}"
r_openshift_master_clean_install: "{{ hostvars[groups.oo_first_master.0].l_clean_install }}"
r_openshift_master_etcd3_storage: "{{ hostvars[groups.oo_first_master.0].l_etcd3_enabled }}"
diff --git a/playbooks/openshift-master/private/restart.yml b/playbooks/openshift-master/private/restart.yml
index 5cb284935..17d90533c 100644
--- a/playbooks/openshift-master/private/restart.yml
+++ b/playbooks/openshift-master/private/restart.yml
@@ -3,16 +3,13 @@
- name: Restart masters
hosts: oo_masters_to_config
- vars:
- openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}"
serial: 1
- handlers:
- - import_tasks: ../../../roles/openshift_master/handlers/main.yml
roles:
- openshift_facts
post_tasks:
- include_tasks: tasks/restart_hosts.yml
when: openshift_rolling_restart_mode | default('services') == 'system'
-
- - include_tasks: tasks/restart_services.yml
+ - import_role:
+ name: openshift_master
+ tasks_from: restart.yml
when: openshift_rolling_restart_mode | default('services') == 'services'
diff --git a/playbooks/openshift-master/private/scaleup.yml b/playbooks/openshift-master/private/scaleup.yml
index 007b23ea3..20ebf70d3 100644
--- a/playbooks/openshift-master/private/scaleup.yml
+++ b/playbooks/openshift-master/private/scaleup.yml
@@ -8,7 +8,6 @@
- openshift_facts:
role: master
local_facts:
- ha: "{{ openshift_master_ha | default(groups.oo_masters | length > 1) }}"
master_count: "{{ openshift_master_count | default(groups.oo_masters | length) }}"
- name: Update master count
modify_yaml:
diff --git a/playbooks/openshift-master/private/tasks/restart_services.yml b/playbooks/openshift-master/private/tasks/restart_services.yml
deleted file mode 100644
index cf2c282e3..000000000
--- a/playbooks/openshift-master/private/tasks/restart_services.yml
+++ /dev/null
@@ -1,4 +0,0 @@
----
-- import_role:
- name: openshift_master
- tasks_from: restart.yml
diff --git a/playbooks/openshift-metrics/private/config.yml b/playbooks/openshift-metrics/private/config.yml
index 1e237e3f0..889ea77b1 100644
--- a/playbooks/openshift-metrics/private/config.yml
+++ b/playbooks/openshift-metrics/private/config.yml
@@ -25,6 +25,7 @@
import_role:
name: openshift_metrics
tasks_from: update_master_config.yaml
+ when: not openshift.common.version_gte_3_9
- name: Metrics Install Checkpoint End
hosts: all
diff --git a/playbooks/openshift-node/private/restart.yml b/playbooks/openshift-node/private/restart.yml
index 7249ced70..7371bd7ac 100644
--- a/playbooks/openshift-node/private/restart.yml
+++ b/playbooks/openshift-node/private/restart.yml
@@ -16,6 +16,7 @@
until: not (l_docker_restart_docker_in_node_result is failed)
retries: 3
delay: 30
+ when: openshift_node_restart_docker_required | default(True)
- name: Restart containerized services
service:
diff --git a/playbooks/openshift-node/redeploy-certificates.yml b/playbooks/openshift-node/redeploy-certificates.yml
index 8b7272485..cdf816fbf 100644
--- a/playbooks/openshift-node/redeploy-certificates.yml
+++ b/playbooks/openshift-node/redeploy-certificates.yml
@@ -4,3 +4,5 @@
- import_playbook: private/redeploy-certificates.yml
- import_playbook: private/restart.yml
+ vars:
+ openshift_node_restart_docker_required: False
diff --git a/playbooks/openstack/inventory.py b/playbooks/openstack/inventory.py
index 76e658eb7..d5a8c3e24 100755
--- a/playbooks/openstack/inventory.py
+++ b/playbooks/openstack/inventory.py
@@ -15,18 +15,10 @@ import json
import shade
-def build_inventory():
- '''Build the dynamic inventory.'''
- cloud = shade.openstack_cloud()
-
+def base_openshift_inventory(cluster_hosts):
+ '''Set the base openshift inventory.'''
inventory = {}
- # TODO(shadower): filter the servers based on the `OPENSHIFT_CLUSTER`
- # environment variable.
- cluster_hosts = [
- server for server in cloud.list_servers()
- if 'metadata' in server and 'clusterid' in server.metadata]
-
masters = [server.name for server in cluster_hosts
if server.metadata['host-type'] == 'master']
@@ -67,6 +59,34 @@ def build_inventory():
inventory['dns'] = {'hosts': dns}
inventory['lb'] = {'hosts': load_balancers}
+ return inventory
+
+
+def get_docker_storage_mountpoints(volumes):
+ '''Check volumes to see if they're being used for docker storage'''
+ docker_storage_mountpoints = {}
+ for volume in volumes:
+ if volume.metadata.get('purpose') == "openshift_docker_storage":
+ for attachment in volume.attachments:
+ if attachment.server_id in docker_storage_mountpoints:
+ docker_storage_mountpoints[attachment.server_id].append(attachment.device)
+ else:
+ docker_storage_mountpoints[attachment.server_id] = [attachment.device]
+ return docker_storage_mountpoints
+
+
+def build_inventory():
+ '''Build the dynamic inventory.'''
+ cloud = shade.openstack_cloud()
+
+ # TODO(shadower): filter the servers based on the `OPENSHIFT_CLUSTER`
+ # environment variable.
+ cluster_hosts = [
+ server for server in cloud.list_servers()
+ if 'metadata' in server and 'clusterid' in server.metadata]
+
+ inventory = base_openshift_inventory(cluster_hosts)
+
for server in cluster_hosts:
if 'group' in server.metadata:
group = server.metadata.group
@@ -76,6 +96,9 @@ def build_inventory():
inventory['_meta'] = {'hostvars': {}}
+ # cinder volumes used for docker storage
+ docker_storage_mountpoints = get_docker_storage_mountpoints(cloud.list_volumes())
+
for server in cluster_hosts:
ssh_ip_address = server.public_v4 or server.private_v4
hostvars = {
@@ -111,6 +134,11 @@ def build_inventory():
if node_labels:
hostvars['openshift_node_labels'] = node_labels
+ # check for attached docker storage volumes
+ if 'os-extended-volumes:volumes_attached' in server:
+ if server.id in docker_storage_mountpoints:
+ hostvars['docker_storage_mountpoints'] = ' '.join(docker_storage_mountpoints[server.id])
+
inventory['_meta']['hostvars'][server.name] = hostvars
return inventory
diff --git a/roles/lib_openshift/library/oc_adm_ca_server_cert.py b/roles/lib_openshift/library/oc_adm_ca_server_cert.py
index 05b2763d5..bfed58011 100644
--- a/roles/lib_openshift/library/oc_adm_ca_server_cert.py
+++ b/roles/lib_openshift/library/oc_adm_ca_server_cert.py
@@ -1138,7 +1138,7 @@ class Utils(object): # pragma: no cover
''' Actually write the file contents to disk. This helps with mocking. '''
with open(filename, 'w') as sfd:
- sfd.write(contents)
+ sfd.write(str(contents))
@staticmethod
def create_tmp_file_from_contents(rname, data, ftype='yaml'):
diff --git a/roles/lib_openshift/library/oc_adm_csr.py b/roles/lib_openshift/library/oc_adm_csr.py
index 324f52689..c78e379d5 100644
--- a/roles/lib_openshift/library/oc_adm_csr.py
+++ b/roles/lib_openshift/library/oc_adm_csr.py
@@ -1116,7 +1116,7 @@ class Utils(object): # pragma: no cover
''' Actually write the file contents to disk. This helps with mocking. '''
with open(filename, 'w') as sfd:
- sfd.write(contents)
+ sfd.write(str(contents))
@staticmethod
def create_tmp_file_from_contents(rname, data, ftype='yaml'):
diff --git a/roles/lib_openshift/library/oc_adm_manage_node.py b/roles/lib_openshift/library/oc_adm_manage_node.py
index 152f270ab..b1b2cb5b5 100644
--- a/roles/lib_openshift/library/oc_adm_manage_node.py
+++ b/roles/lib_openshift/library/oc_adm_manage_node.py
@@ -1124,7 +1124,7 @@ class Utils(object): # pragma: no cover
''' Actually write the file contents to disk. This helps with mocking. '''
with open(filename, 'w') as sfd:
- sfd.write(contents)
+ sfd.write(str(contents))
@staticmethod
def create_tmp_file_from_contents(rname, data, ftype='yaml'):
diff --git a/roles/lib_openshift/library/oc_adm_policy_group.py b/roles/lib_openshift/library/oc_adm_policy_group.py
index 3082f5890..2773201d7 100644
--- a/roles/lib_openshift/library/oc_adm_policy_group.py
+++ b/roles/lib_openshift/library/oc_adm_policy_group.py
@@ -1110,7 +1110,7 @@ class Utils(object): # pragma: no cover
''' Actually write the file contents to disk. This helps with mocking. '''
with open(filename, 'w') as sfd:
- sfd.write(contents)
+ sfd.write(str(contents))
@staticmethod
def create_tmp_file_from_contents(rname, data, ftype='yaml'):
diff --git a/roles/lib_openshift/library/oc_adm_policy_user.py b/roles/lib_openshift/library/oc_adm_policy_user.py
index 92515889b..25cbed8b7 100644
--- a/roles/lib_openshift/library/oc_adm_policy_user.py
+++ b/roles/lib_openshift/library/oc_adm_policy_user.py
@@ -1124,7 +1124,7 @@ class Utils(object): # pragma: no cover
''' Actually write the file contents to disk. This helps with mocking. '''
with open(filename, 'w') as sfd:
- sfd.write(contents)
+ sfd.write(str(contents))
@staticmethod
def create_tmp_file_from_contents(rname, data, ftype='yaml'):
diff --git a/roles/lib_openshift/library/oc_adm_registry.py b/roles/lib_openshift/library/oc_adm_registry.py
index fe565987c..e26214316 100644
--- a/roles/lib_openshift/library/oc_adm_registry.py
+++ b/roles/lib_openshift/library/oc_adm_registry.py
@@ -1228,7 +1228,7 @@ class Utils(object): # pragma: no cover
''' Actually write the file contents to disk. This helps with mocking. '''
with open(filename, 'w') as sfd:
- sfd.write(contents)
+ sfd.write(str(contents))
@staticmethod
def create_tmp_file_from_contents(rname, data, ftype='yaml'):
diff --git a/roles/lib_openshift/library/oc_adm_router.py b/roles/lib_openshift/library/oc_adm_router.py
index 44de29592..62fca19e5 100644
--- a/roles/lib_openshift/library/oc_adm_router.py
+++ b/roles/lib_openshift/library/oc_adm_router.py
@@ -1253,7 +1253,7 @@ class Utils(object): # pragma: no cover
''' Actually write the file contents to disk. This helps with mocking. '''
with open(filename, 'w') as sfd:
- sfd.write(contents)
+ sfd.write(str(contents))
@staticmethod
def create_tmp_file_from_contents(rname, data, ftype='yaml'):
diff --git a/roles/lib_openshift/library/oc_clusterrole.py b/roles/lib_openshift/library/oc_clusterrole.py
index 9761b4b4e..0c4bfa01f 100644
--- a/roles/lib_openshift/library/oc_clusterrole.py
+++ b/roles/lib_openshift/library/oc_clusterrole.py
@@ -1102,7 +1102,7 @@ class Utils(object): # pragma: no cover
''' Actually write the file contents to disk. This helps with mocking. '''
with open(filename, 'w') as sfd:
- sfd.write(contents)
+ sfd.write(str(contents))
@staticmethod
def create_tmp_file_from_contents(rname, data, ftype='yaml'):
diff --git a/roles/lib_openshift/library/oc_configmap.py b/roles/lib_openshift/library/oc_configmap.py
index 047edffbb..36e6111eb 100644
--- a/roles/lib_openshift/library/oc_configmap.py
+++ b/roles/lib_openshift/library/oc_configmap.py
@@ -1108,7 +1108,7 @@ class Utils(object): # pragma: no cover
''' Actually write the file contents to disk. This helps with mocking. '''
with open(filename, 'w') as sfd:
- sfd.write(contents)
+ sfd.write(str(contents))
@staticmethod
def create_tmp_file_from_contents(rname, data, ftype='yaml'):
diff --git a/roles/lib_openshift/library/oc_edit.py b/roles/lib_openshift/library/oc_edit.py
index 0cea07256..ab4f153c7 100644
--- a/roles/lib_openshift/library/oc_edit.py
+++ b/roles/lib_openshift/library/oc_edit.py
@@ -1152,7 +1152,7 @@ class Utils(object): # pragma: no cover
''' Actually write the file contents to disk. This helps with mocking. '''
with open(filename, 'w') as sfd:
- sfd.write(contents)
+ sfd.write(str(contents))
@staticmethod
def create_tmp_file_from_contents(rname, data, ftype='yaml'):
diff --git a/roles/lib_openshift/library/oc_env.py b/roles/lib_openshift/library/oc_env.py
index 1f52fba40..f334ddaa4 100644
--- a/roles/lib_openshift/library/oc_env.py
+++ b/roles/lib_openshift/library/oc_env.py
@@ -1119,7 +1119,7 @@ class Utils(object): # pragma: no cover
''' Actually write the file contents to disk. This helps with mocking. '''
with open(filename, 'w') as sfd:
- sfd.write(contents)
+ sfd.write(str(contents))
@staticmethod
def create_tmp_file_from_contents(rname, data, ftype='yaml'):
diff --git a/roles/lib_openshift/library/oc_group.py b/roles/lib_openshift/library/oc_group.py
index 72023eaf7..7e9078339 100644
--- a/roles/lib_openshift/library/oc_group.py
+++ b/roles/lib_openshift/library/oc_group.py
@@ -1092,7 +1092,7 @@ class Utils(object): # pragma: no cover
''' Actually write the file contents to disk. This helps with mocking. '''
with open(filename, 'w') as sfd:
- sfd.write(contents)
+ sfd.write(str(contents))
@staticmethod
def create_tmp_file_from_contents(rname, data, ftype='yaml'):
diff --git a/roles/lib_openshift/library/oc_image.py b/roles/lib_openshift/library/oc_image.py
index 94b08d9ce..e71e2eb5c 100644
--- a/roles/lib_openshift/library/oc_image.py
+++ b/roles/lib_openshift/library/oc_image.py
@@ -1111,7 +1111,7 @@ class Utils(object): # pragma: no cover
''' Actually write the file contents to disk. This helps with mocking. '''
with open(filename, 'w') as sfd:
- sfd.write(contents)
+ sfd.write(str(contents))
@staticmethod
def create_tmp_file_from_contents(rname, data, ftype='yaml'):
diff --git a/roles/lib_openshift/library/oc_label.py b/roles/lib_openshift/library/oc_label.py
index ad837fdb5..ac3279ef8 100644
--- a/roles/lib_openshift/library/oc_label.py
+++ b/roles/lib_openshift/library/oc_label.py
@@ -1128,7 +1128,7 @@ class Utils(object): # pragma: no cover
''' Actually write the file contents to disk. This helps with mocking. '''
with open(filename, 'w') as sfd:
- sfd.write(contents)
+ sfd.write(str(contents))
@staticmethod
def create_tmp_file_from_contents(rname, data, ftype='yaml'):
diff --git a/roles/lib_openshift/library/oc_obj.py b/roles/lib_openshift/library/oc_obj.py
index 892546e56..ca53c4c97 100644
--- a/roles/lib_openshift/library/oc_obj.py
+++ b/roles/lib_openshift/library/oc_obj.py
@@ -1131,7 +1131,7 @@ class Utils(object): # pragma: no cover
''' Actually write the file contents to disk. This helps with mocking. '''
with open(filename, 'w') as sfd:
- sfd.write(contents)
+ sfd.write(str(contents))
@staticmethod
def create_tmp_file_from_contents(rname, data, ftype='yaml'):
diff --git a/roles/lib_openshift/library/oc_objectvalidator.py b/roles/lib_openshift/library/oc_objectvalidator.py
index 38df585f0..877c78d93 100644
--- a/roles/lib_openshift/library/oc_objectvalidator.py
+++ b/roles/lib_openshift/library/oc_objectvalidator.py
@@ -1063,7 +1063,7 @@ class Utils(object): # pragma: no cover
''' Actually write the file contents to disk. This helps with mocking. '''
with open(filename, 'w') as sfd:
- sfd.write(contents)
+ sfd.write(str(contents))
@staticmethod
def create_tmp_file_from_contents(rname, data, ftype='yaml'):
diff --git a/roles/lib_openshift/library/oc_process.py b/roles/lib_openshift/library/oc_process.py
index 70632f86d..507170424 100644
--- a/roles/lib_openshift/library/oc_process.py
+++ b/roles/lib_openshift/library/oc_process.py
@@ -1120,7 +1120,7 @@ class Utils(object): # pragma: no cover
''' Actually write the file contents to disk. This helps with mocking. '''
with open(filename, 'w') as sfd:
- sfd.write(contents)
+ sfd.write(str(contents))
@staticmethod
def create_tmp_file_from_contents(rname, data, ftype='yaml'):
diff --git a/roles/lib_openshift/library/oc_project.py b/roles/lib_openshift/library/oc_project.py
index 4eee748d7..347e879ca 100644
--- a/roles/lib_openshift/library/oc_project.py
+++ b/roles/lib_openshift/library/oc_project.py
@@ -1117,7 +1117,7 @@ class Utils(object): # pragma: no cover
''' Actually write the file contents to disk. This helps with mocking. '''
with open(filename, 'w') as sfd:
- sfd.write(contents)
+ sfd.write(str(contents))
@staticmethod
def create_tmp_file_from_contents(rname, data, ftype='yaml'):
diff --git a/roles/lib_openshift/library/oc_pvc.py b/roles/lib_openshift/library/oc_pvc.py
index 2e73a7645..93c96b817 100644
--- a/roles/lib_openshift/library/oc_pvc.py
+++ b/roles/lib_openshift/library/oc_pvc.py
@@ -1124,7 +1124,7 @@ class Utils(object): # pragma: no cover
''' Actually write the file contents to disk. This helps with mocking. '''
with open(filename, 'w') as sfd:
- sfd.write(contents)
+ sfd.write(str(contents))
@staticmethod
def create_tmp_file_from_contents(rname, data, ftype='yaml'):
diff --git a/roles/lib_openshift/library/oc_route.py b/roles/lib_openshift/library/oc_route.py
index e003770d8..3369cf134 100644
--- a/roles/lib_openshift/library/oc_route.py
+++ b/roles/lib_openshift/library/oc_route.py
@@ -1168,7 +1168,7 @@ class Utils(object): # pragma: no cover
''' Actually write the file contents to disk. This helps with mocking. '''
with open(filename, 'w') as sfd:
- sfd.write(contents)
+ sfd.write(str(contents))
@staticmethod
def create_tmp_file_from_contents(rname, data, ftype='yaml'):
diff --git a/roles/lib_openshift/library/oc_scale.py b/roles/lib_openshift/library/oc_scale.py
index c142f1f43..1b6202a26 100644
--- a/roles/lib_openshift/library/oc_scale.py
+++ b/roles/lib_openshift/library/oc_scale.py
@@ -1106,7 +1106,7 @@ class Utils(object): # pragma: no cover
''' Actually write the file contents to disk. This helps with mocking. '''
with open(filename, 'w') as sfd:
- sfd.write(contents)
+ sfd.write(str(contents))
@staticmethod
def create_tmp_file_from_contents(rname, data, ftype='yaml'):
diff --git a/roles/lib_openshift/library/oc_secret.py b/roles/lib_openshift/library/oc_secret.py
index 62bda33ad..732299e48 100644
--- a/roles/lib_openshift/library/oc_secret.py
+++ b/roles/lib_openshift/library/oc_secret.py
@@ -1164,7 +1164,7 @@ class Utils(object): # pragma: no cover
''' Actually write the file contents to disk. This helps with mocking. '''
with open(filename, 'w') as sfd:
- sfd.write(contents)
+ sfd.write(str(contents))
@staticmethod
def create_tmp_file_from_contents(rname, data, ftype='yaml'):
diff --git a/roles/lib_openshift/library/oc_service.py b/roles/lib_openshift/library/oc_service.py
index c541e1bbd..a6cf764ff 100644
--- a/roles/lib_openshift/library/oc_service.py
+++ b/roles/lib_openshift/library/oc_service.py
@@ -1171,7 +1171,7 @@ class Utils(object): # pragma: no cover
''' Actually write the file contents to disk. This helps with mocking. '''
with open(filename, 'w') as sfd:
- sfd.write(contents)
+ sfd.write(str(contents))
@staticmethod
def create_tmp_file_from_contents(rname, data, ftype='yaml'):
diff --git a/roles/lib_openshift/library/oc_serviceaccount.py b/roles/lib_openshift/library/oc_serviceaccount.py
index 646a39224..90d514292 100644
--- a/roles/lib_openshift/library/oc_serviceaccount.py
+++ b/roles/lib_openshift/library/oc_serviceaccount.py
@@ -1104,7 +1104,7 @@ class Utils(object): # pragma: no cover
''' Actually write the file contents to disk. This helps with mocking. '''
with open(filename, 'w') as sfd:
- sfd.write(contents)
+ sfd.write(str(contents))
@staticmethod
def create_tmp_file_from_contents(rname, data, ftype='yaml'):
diff --git a/roles/lib_openshift/library/oc_serviceaccount_secret.py b/roles/lib_openshift/library/oc_serviceaccount_secret.py
index 99a8e8f3d..0d9acac0e 100644
--- a/roles/lib_openshift/library/oc_serviceaccount_secret.py
+++ b/roles/lib_openshift/library/oc_serviceaccount_secret.py
@@ -1104,7 +1104,7 @@ class Utils(object): # pragma: no cover
''' Actually write the file contents to disk. This helps with mocking. '''
with open(filename, 'w') as sfd:
- sfd.write(contents)
+ sfd.write(str(contents))
@staticmethod
def create_tmp_file_from_contents(rname, data, ftype='yaml'):
diff --git a/roles/lib_openshift/library/oc_storageclass.py b/roles/lib_openshift/library/oc_storageclass.py
index 7e7d0fa60..6fb5a94e9 100644
--- a/roles/lib_openshift/library/oc_storageclass.py
+++ b/roles/lib_openshift/library/oc_storageclass.py
@@ -1122,7 +1122,7 @@ class Utils(object): # pragma: no cover
''' Actually write the file contents to disk. This helps with mocking. '''
with open(filename, 'w') as sfd:
- sfd.write(contents)
+ sfd.write(str(contents))
@staticmethod
def create_tmp_file_from_contents(rname, data, ftype='yaml'):
diff --git a/roles/lib_openshift/library/oc_user.py b/roles/lib_openshift/library/oc_user.py
index 7bbe38819..feb69348b 100644
--- a/roles/lib_openshift/library/oc_user.py
+++ b/roles/lib_openshift/library/oc_user.py
@@ -1164,7 +1164,7 @@ class Utils(object): # pragma: no cover
''' Actually write the file contents to disk. This helps with mocking. '''
with open(filename, 'w') as sfd:
- sfd.write(contents)
+ sfd.write(str(contents))
@staticmethod
def create_tmp_file_from_contents(rname, data, ftype='yaml'):
diff --git a/roles/lib_openshift/library/oc_version.py b/roles/lib_openshift/library/oc_version.py
index 63adbd6ac..0f024c048 100644
--- a/roles/lib_openshift/library/oc_version.py
+++ b/roles/lib_openshift/library/oc_version.py
@@ -1076,7 +1076,7 @@ class Utils(object): # pragma: no cover
''' Actually write the file contents to disk. This helps with mocking. '''
with open(filename, 'w') as sfd:
- sfd.write(contents)
+ sfd.write(str(contents))
@staticmethod
def create_tmp_file_from_contents(rname, data, ftype='yaml'):
diff --git a/roles/lib_openshift/library/oc_volume.py b/roles/lib_openshift/library/oc_volume.py
index 3c07f8d4b..6f409f979 100644
--- a/roles/lib_openshift/library/oc_volume.py
+++ b/roles/lib_openshift/library/oc_volume.py
@@ -1153,7 +1153,7 @@ class Utils(object): # pragma: no cover
''' Actually write the file contents to disk. This helps with mocking. '''
with open(filename, 'w') as sfd:
- sfd.write(contents)
+ sfd.write(str(contents))
@staticmethod
def create_tmp_file_from_contents(rname, data, ftype='yaml'):
diff --git a/roles/lib_openshift/src/lib/base.py b/roles/lib_openshift/src/lib/base.py
index 1fb32164e..9a4ce3509 100644
--- a/roles/lib_openshift/src/lib/base.py
+++ b/roles/lib_openshift/src/lib/base.py
@@ -314,7 +314,7 @@ class Utils(object):
''' Actually write the file contents to disk. This helps with mocking. '''
with open(filename, 'w') as sfd:
- sfd.write(contents)
+ sfd.write(str(contents))
@staticmethod
def create_tmp_file_from_contents(rname, data, ftype='yaml'):
diff --git a/roles/lib_utils/library/docker_creds.py b/roles/lib_utils/library/docker_creds.py
index b94c0b779..936fb1c38 100644
--- a/roles/lib_utils/library/docker_creds.py
+++ b/roles/lib_utils/library/docker_creds.py
@@ -148,10 +148,12 @@ def update_config(docker_config, registry, username, password):
def write_config(module, docker_config, dest):
'''Write updated credentials into dest/config.json'''
+ if not isinstance(docker_config, dict):
+ docker_config = docker_config.decode()
conf_file_path = os.path.join(dest, 'config.json')
try:
with open(conf_file_path, 'w') as conf_file:
- json.dump(docker_config.decode(), conf_file, indent=8)
+ json.dump(docker_config, conf_file, indent=8)
except IOError as ioerror:
result = {'failed': True,
'changed': False,
diff --git a/roles/lib_utils/library/openshift_container_binary_sync.py b/roles/lib_utils/library/openshift_container_binary_sync.py
index 440b8ec28..efdfcf1c7 100644
--- a/roles/lib_utils/library/openshift_container_binary_sync.py
+++ b/roles/lib_utils/library/openshift_container_binary_sync.py
@@ -107,7 +107,7 @@ class BinarySyncer(object):
self._sync_binary('oc')
# Ensure correct symlinks created:
- self._sync_symlink('kubectl', 'openshift')
+ self._sync_symlink('kubectl', 'oc')
# Remove old oadm binary
if os.path.exists(os.path.join(self.bin_dir, 'oadm')):
diff --git a/roles/nuage_master/handlers/main.yaml b/roles/nuage_master/handlers/main.yaml
index 7b55dda56..c0411d641 100644
--- a/roles/nuage_master/handlers/main.yaml
+++ b/roles/nuage_master/handlers/main.yaml
@@ -1,9 +1,7 @@
---
- name: restart master api
systemd: name={{ openshift_service_type }}-master-api state=restarted
- when: >
- (openshift_master_ha | bool) and
- (not master_api_service_status_changed | default(false))
+ when: (not master_api_service_status_changed | default(false))
# TODO: need to fix up ignore_errors here
# We retry the controllers because the API may not be 100% initialized yet.
@@ -13,7 +11,5 @@
delay: 5
register: result
until: result.rc == 0
- when: >
- (openshift_master_ha | bool) and
- (not master_controllers_service_status_changed | default(false))
+ when: (not master_controllers_service_status_changed | default(false))
ignore_errors: yes
diff --git a/roles/nuage_master/tasks/etcd_certificates.yml b/roles/nuage_master/tasks/etcd_certificates.yml
new file mode 100644
index 000000000..99ec27f91
--- /dev/null
+++ b/roles/nuage_master/tasks/etcd_certificates.yml
@@ -0,0 +1,21 @@
+---
+- name: Generate openshift etcd certs
+ become: yes
+ include_role:
+ name: etcd
+ tasks_from: client_certificates
+ vars:
+ etcd_cert_prefix: nuageEtcd-
+ etcd_cert_config_dir: "{{ cert_output_dir }}"
+ embedded_etcd: "{{ hostvars[groups.oo_first_master.0].openshift.master.embedded_etcd }}"
+ etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
+ etcd_cert_subdir: "openshift-nuage-{{ openshift.common.hostname }}"
+
+
+- name: Error if etcd certs are not copied
+ stat:
+ path: "{{ item }}"
+ with_items:
+ - "{{ cert_output_dir }}/nuageEtcd-ca.crt"
+ - "{{ cert_output_dir }}/nuageEtcd-client.crt"
+ - "{{ cert_output_dir }}/nuageEtcd-client.key"
diff --git a/roles/nuage_master/tasks/main.yaml b/roles/nuage_master/tasks/main.yaml
index 29e16b6f8..a1781dc56 100644
--- a/roles/nuage_master/tasks/main.yaml
+++ b/roles/nuage_master/tasks/main.yaml
@@ -81,6 +81,7 @@
- nuage.key
- nuage.kubeconfig
+- include_tasks: etcd_certificates.yml
- include_tasks: certificates.yml
- name: Install Nuage VSD user certificate
@@ -99,7 +100,16 @@
become: yes
template: src=nuage-node-config-daemonset.j2 dest=/etc/nuage-node-config-daemonset.yaml owner=root mode=0644
-- name: Add the service account to the privileged scc to have root permissions
+- name: Create Nuage Infra Pod daemon set yaml file
+ become: yes
+ template: src=nuage-infra-pod-config-daemonset.j2 dest=/etc/nuage-infra-pod-config-daemonset.yaml owner=root mode=0644
+
+- name: Add the service account to the privileged scc to have root permissions for kube-system
+ shell: oc adm policy add-scc-to-user privileged system:serviceaccount:kube-system:daemon-set-controller
+ ignore_errors: true
+ when: inventory_hostname == groups.oo_first_master.0
+
+- name: Add the service account to the privileged scc to have root permissions for openshift-infra
shell: oc adm policy add-scc-to-user privileged system:serviceaccount:openshift-infra:daemonset-controller
ignore_errors: true
when: inventory_hostname == groups.oo_first_master.0
@@ -114,6 +124,11 @@
ignore_errors: true
when: inventory_hostname == groups.oo_first_master.0
+- name: Spawn Nuage Infra daemon sets pod
+ shell: oc create -f /etc/nuage-infra-pod-config-daemonset.yaml
+ ignore_errors: true
+ when: inventory_hostname == groups.oo_first_master.0
+
- name: Restart daemons
command: /bin/true
notify:
diff --git a/roles/nuage_master/templates/nuage-infra-pod-config-daemonset.j2 b/roles/nuage_master/templates/nuage-infra-pod-config-daemonset.j2
new file mode 100755
index 000000000..534a1517f
--- /dev/null
+++ b/roles/nuage_master/templates/nuage-infra-pod-config-daemonset.j2
@@ -0,0 +1,39 @@
+# This manifest installs Nuage Infra pod on
+# each worker node in an Openshift cluster.
+kind: DaemonSet
+apiVersion: extensions/v1beta1
+metadata:
+ name: nuage-infra-ds
+ namespace: kube-system
+ labels:
+ k8s-app: nuage-infra-ds
+spec:
+ selector:
+ matchLabels:
+ k8s-app: nuage-infra-ds
+ updateStrategy:
+ type: RollingUpdate
+ template:
+ metadata:
+ labels:
+ k8s-app: nuage-infra-ds
+ spec:
+ tolerations:
+ - key: node-role.kubernetes.io/master
+ effect: NoSchedule
+ operator: Exists
+ containers:
+ # This container spawns a Nuage Infra pod
+ # on each worker node
+ - name: install-nuage-infra
+ image: nuage/infra:{{ nuage_infra_container_image_version }}
+ command: ["/install-nuage-infra-pod.sh"]
+ securityContext:
+ privileged: true
+ volumeMounts:
+ - mountPath: /var/log
+ name: log-dir
+ volumes:
+ - name: log-dir
+ hostPath:
+ path: /var/log
diff --git a/roles/nuage_master/templates/nuage-master-config-daemonset.j2 b/roles/nuage_master/templates/nuage-master-config-daemonset.j2
index 7be5d6743..3543eeb56 100755
--- a/roles/nuage_master/templates/nuage-master-config-daemonset.j2
+++ b/roles/nuage_master/templates/nuage-master-config-daemonset.j2
@@ -37,11 +37,14 @@ data:
nuageMonServer:
URL: 0.0.0.0:9443
certificateDirectory: {{ nuage_master_crt_dir }}
+ clientCA: ""
+ serverCertificate: ""
+ serverKey: ""
# etcd config required for HA
etcdClientConfig:
- ca: {{ nuage_master_crt_dir }}/nuageMonCA.crt
- certFile: {{ nuage_master_crt_dir }}/nuageMonServer.crt
- keyFile: {{ nuage_master_crt_dir }}/master.etcd-client.key
+ ca: {{ nuage_master_crt_dir }}/nuageEtcd-ca.crt
+ certFile: {{ nuage_master_crt_dir }}/nuageEtcd-client.crt
+ keyFile: {{ nuage_master_crt_dir }}/nuageEtcd-client.key
urls:
{% for etcd_url in openshift.master.etcd_urls %}
- {{ etcd_url }}
diff --git a/roles/nuage_master/templates/nuage-node-config-daemonset.j2 b/roles/nuage_master/templates/nuage-node-config-daemonset.j2
index 6a1267d94..996a2d2b0 100755
--- a/roles/nuage_master/templates/nuage-node-config-daemonset.j2
+++ b/roles/nuage_master/templates/nuage-node-config-daemonset.j2
@@ -61,6 +61,8 @@ spec:
selector:
matchLabels:
k8s-app: nuage-cni-ds
+ updateStrategy:
+ type: RollingUpdate
template:
metadata:
labels:
@@ -104,6 +106,8 @@ spec:
- mountPath: /var/log
name: cni-log-dir
- mountPath: {{ nuage_node_config_dsets_mount_dir }}
+ name: var-usr-share-dir
+ - mountPath: /usr/share/
name: usr-share-dir
volumes:
- name: cni-bin-dir
@@ -121,9 +125,12 @@ spec:
- name: cni-log-dir
hostPath:
path: /var/log
- - name: usr-share-dir
+ - name: var-usr-share-dir
hostPath:
path: {{ nuage_node_config_dsets_mount_dir }}
+ - name: usr-share-dir
+ hostPath:
+ path: /usr/share/
---
@@ -164,7 +171,7 @@ spec:
- name: NUAGE_PLATFORM
value: '"kvm, k8s"'
- name: NUAGE_K8S_SERVICE_IPV4_SUBNET
- value: '192.168.0.0\/16'
+ value: '172.30.0.0\/16'
- name: NUAGE_NETWORK_UPLINK_INTF
value: "eth0"
volumeMounts:
diff --git a/roles/nuage_master/vars/main.yaml b/roles/nuage_master/vars/main.yaml
index 114514d7c..5045e1cc5 100644
--- a/roles/nuage_master/vars/main.yaml
+++ b/roles/nuage_master/vars/main.yaml
@@ -26,9 +26,10 @@ nuage_master_config_dsets_mount_dir: /usr/share/
nuage_node_config_dsets_mount_dir: /usr/share/
nuage_cni_bin_dsets_mount_dir: /opt/cni/bin
nuage_cni_netconf_dsets_mount_dir: /etc/cni/net.d
-nuage_monitor_container_image_version: "{{ nuage_monitor_image_version | default('v5.1.1') }}"
-nuage_vrs_container_image_version: "{{ nuage_vrs_image_version | default('v5.1.1') }}"
-nuage_cni_container_image_version: "{{ nuage_cni_image_version | default('v5.1.1') }}"
+nuage_monitor_container_image_version: "{{ nuage_monitor_image_version | default('v5.2.1') }}"
+nuage_vrs_container_image_version: "{{ nuage_vrs_image_version | default('v5.2.1') }}"
+nuage_cni_container_image_version: "{{ nuage_cni_image_version | default('v5.2.1') }}"
+nuage_infra_container_image_version: "{{ nuage_infra_image_version | default('v5.2.1') }}"
api_server_url: "{{ hostvars[groups.oo_first_master.0].openshift.master.api_url }}"
nuage_vport_mtu: "{{ nuage_interface_mtu | default('1460') }}"
master_host_type: "{{ master_base_host_type | default('is_rhel_server') }}"
diff --git a/roles/openshift_aws/defaults/main.yml b/roles/openshift_aws/defaults/main.yml
index 178e0849c..c8d385db5 100644
--- a/roles/openshift_aws/defaults/main.yml
+++ b/roles/openshift_aws/defaults/main.yml
@@ -44,6 +44,8 @@ openshift_aws_s3_bucket_name: "{{ openshift_aws_clusterid }}-docker-registry"
openshift_aws_elb_basename: "{{ openshift_aws_clusterid }}"
+openshift_aws_elb_cert_arn: ''
+
openshift_aws_elb_dict:
master:
external:
@@ -65,7 +67,7 @@ openshift_aws_elb_dict:
load_balancer_port: "{{ openshift_master_api_port | default(8443) }}"
instance_protocol: ssl
instance_port: "{{ openshift_master_api_port | default(8443) }}"
- ssl_certificate_id: ''
+ ssl_certificate_id: "{{ openshift_aws_elb_cert_arn }}"
name: "{{ openshift_aws_elb_basename }}-master-external"
tags: "{{ openshift_aws_kube_tags }}"
internal:
diff --git a/roles/openshift_default_storage_class/defaults/main.yml b/roles/openshift_default_storage_class/defaults/main.yml
index 014c06641..687d60171 100644
--- a/roles/openshift_default_storage_class/defaults/main.yml
+++ b/roles/openshift_default_storage_class/defaults/main.yml
@@ -1,4 +1,7 @@
---
+# Must not be blank if you're using vsphere
+openshift_cloudprovider_vsphere_datacenter: ''
+
openshift_storageclass_defaults:
aws:
provisioner: aws-ebs
@@ -19,6 +22,12 @@ openshift_storageclass_defaults:
parameters:
fstype: xfs
+ vsphere:
+ provisioner: vsphere-volume
+ name: standard
+ parameters:
+ datastore: "{{ openshift_cloudprovider_vsphere_datacenter }}"
+
openshift_storageclass_default: "true"
openshift_storageclass_name: "{{ openshift_storageclass_defaults[openshift_cloudprovider_kind]['name'] }}"
openshift_storageclass_provisioner: "{{ openshift_storageclass_defaults[openshift_cloudprovider_kind]['provisioner'] }}"
diff --git a/roles/openshift_logging/README.md b/roles/openshift_logging/README.md
index a192bd67e..c438236a4 100644
--- a/roles/openshift_logging/README.md
+++ b/roles/openshift_logging/README.md
@@ -58,6 +58,7 @@ When `openshift_logging_install_logging` is set to `False` the `openshift_loggin
- `openshift_logging_kibana_replica_count`: The number of replicas Kibana should be scaled up to. Defaults to 1.
- `openshift_logging_kibana_nodeselector`: A map of labels (e.g. {"node":"infra","region":"west"} to select the nodes where the pod will land.
- `openshift_logging_kibana_edge_term_policy`: Insecure Edge Termination Policy. Defaults to Redirect.
+- `openshift_logging_kibana_env_vars`: A map of environment variables to add to the kibana deployment config (e.g. {"ELASTICSEARCH_REQUESTTIMEOUT":"30000"})
- `openshift_logging_fluentd_nodeselector`: The node selector that the Fluentd daemonset uses to determine where to deploy to. Defaults to '"logging-infra-fluentd": "true"'.
- `openshift_logging_fluentd_cpu_request`: The minimum amount of CPU to allocate for Fluentd collector pods. Defaults to '100m'.
diff --git a/roles/openshift_logging/tasks/delete_logging.yaml b/roles/openshift_logging/tasks/delete_logging.yaml
index ced7397b5..6be47b1f8 100644
--- a/roles/openshift_logging/tasks/delete_logging.yaml
+++ b/roles/openshift_logging/tasks/delete_logging.yaml
@@ -140,4 +140,6 @@
console_config_edits:
- key: clusterInfo#loggingPublicURL
value: ""
- when: openshift_web_console_install | default(true) | bool
+ when:
+ - openshift_web_console_install | default(true) | bool
+ - openshift.common.version_gte_3_9
diff --git a/roles/openshift_logging/tasks/install_logging.yaml b/roles/openshift_logging/tasks/install_logging.yaml
index e4883bfa0..c905502ac 100644
--- a/roles/openshift_logging/tasks/install_logging.yaml
+++ b/roles/openshift_logging/tasks/install_logging.yaml
@@ -71,10 +71,17 @@
- set_fact: openshift_logging_es_pvc_prefix="logging-es"
when: openshift_logging_es_pvc_prefix == ""
+# Using this module for setting this fact because otherwise we were getting a value of "" trying to
+# use default() in the set_fact after this which caused us to not correctly evaluate
+# openshift_logging_elasticsearch_storage_type
+- conditional_set_fact:
+ facts: "{{ hostvars[inventory_hostname] }}"
+ vars:
+ elasticsearch_storage_type: openshift_logging_elasticsearch_storage_type
+
- set_fact:
- elasticsearch_storage_type: "{{ openshift_logging_elasticsearch_storage_type | default('pvc' if ( openshift_logging_es_pvc_dynamic | bool or openshift_hosted_logging_storage_kind | default('') == 'nfs' or openshift_logging_es_pvc_size | length > 0) else 'emptydir') }}"
+ default_elasticsearch_storage_type: "{{ 'pvc' if ( openshift_logging_es_pvc_dynamic | bool or openshift_logging_storage_kind | default('') == 'nfs' or openshift_logging_es_pvc_size | length > 0) else 'emptydir' }}"
-# We don't allow scaling down of ES nodes currently
- include_role:
name: openshift_logging_elasticsearch
vars:
@@ -85,7 +92,8 @@
openshift_logging_elasticsearch_pvc_size: "{{ openshift_logging_es_pvc_size }}"
openshift_logging_elasticsearch_replica_count: "{{ openshift_logging_es_cluster_size | int }}"
- openshift_logging_elasticsearch_storage_type: "{{ elasticsearch_storage_type }}"
+ openshift_logging_elasticsearch_storage_type: "{{ elasticsearch_storage_type | default('pvc' if outer_item.0.volumes['elasticsearch-storage'].persistentVolumeClaim is defined else 'hostmount' if outer_item.0.volumes['elasticsearch-storage'].hostPath is defined else 'emptydir' if outer_item.0.volumes['elasticsearch-storage'].emptyDir is defined else default_elasticsearch_storage_type) }}"
+ openshift_logging_elasticsearch_hostmount_path: "{{ outer_item.0.volumes['elasticsearch-storage'].hostPath.path if outer_item.0.volumes['elasticsearch-storage'].hostPath is defined else '' }}"
openshift_logging_elasticsearch_pvc_pv_selector: "{{ openshift_logging_es_pv_selector }}"
openshift_logging_elasticsearch_pvc_storage_class_name: "{{ openshift_logging_es_pvc_storage_class_name | default() }}"
openshift_logging_elasticsearch_nodeselector: "{{ openshift_logging_es_nodeselector if outer_item.0.nodeSelector | default(None) is none else outer_item.0.nodeSelector }}"
@@ -112,7 +120,7 @@
openshift_logging_elasticsearch_pvc_size: "{{ openshift_logging_es_pvc_size }}"
openshift_logging_elasticsearch_replica_count: "{{ openshift_logging_es_cluster_size | int }}"
- openshift_logging_elasticsearch_storage_type: "{{ elasticsearch_storage_type }}"
+ openshift_logging_elasticsearch_storage_type: "{{ elasticsearch_storage_type | default(default_elasticsearch_storage_type) }}"
openshift_logging_elasticsearch_pvc_pv_selector: "{{ openshift_logging_es_pv_selector }}"
openshift_logging_elasticsearch_pvc_storage_class_name: "{{ openshift_logging_es_pvc_storage_class_name | default() }}"
@@ -133,7 +141,7 @@
when: openshift_logging_es_ops_pvc_prefix == ""
- set_fact:
- elasticsearch_storage_type: "{{ openshift_logging_elasticsearch_storage_type | default('pvc' if ( openshift_logging_es_ops_pvc_dynamic | bool or openshift_hosted_logging_storage_kind | default('') == 'nfs' or openshift_logging_es_ops_pvc_size | length > 0) else 'emptydir') }}"
+ default_elasticsearch_storage_type: "{{ 'pvc' if ( openshift_logging_es_ops_pvc_dynamic | bool or openshift_logging_storage_kind | default('') == 'nfs' or openshift_logging_es_ops_pvc_size | length > 0) else 'emptydir' }}"
when:
- openshift_logging_use_ops | bool
@@ -147,7 +155,8 @@
openshift_logging_elasticsearch_ops_deployment: true
openshift_logging_elasticsearch_replica_count: "{{ openshift_logging_es_ops_cluster_size | int }}"
- openshift_logging_elasticsearch_storage_type: "{{ elasticsearch_storage_type }}"
+ openshift_logging_elasticsearch_storage_type: "{{ elasticsearch_storage_type | default('pvc' if outer_item.0.volumes['elasticsearch-storage'].persistentVolumeClaim is defined else 'hostmount' if outer_item.0.volumes['elasticsearch-storage'].hostPath is defined else 'emptydir' if outer_item.0.volumes['elasticsearch-storage'].emptyDir is defined else default_elasticsearch_storage_type) }}"
+ openshift_logging_elasticsearch_hostmount_path: "{{ outer_item.0.volumes['elasticsearch-storage'].hostPath.path if outer_item.0.volumes['elasticsearch-storage'].hostPath is defined else '' }}"
openshift_logging_elasticsearch_pvc_size: "{{ openshift_logging_es_ops_pvc_size }}"
openshift_logging_elasticsearch_pvc_dynamic: "{{ openshift_logging_es_ops_pvc_dynamic }}"
openshift_logging_elasticsearch_pvc_pv_selector: "{{ openshift_logging_es_ops_pv_selector }}"
@@ -189,7 +198,7 @@
openshift_logging_elasticsearch_ops_deployment: true
openshift_logging_elasticsearch_replica_count: "{{ openshift_logging_es_ops_cluster_size | int }}"
- openshift_logging_elasticsearch_storage_type: "{{ elasticsearch_storage_type }}"
+ openshift_logging_elasticsearch_storage_type: "{{ elasticsearch_storage_type | default(default_elasticsearch_storage_type) }}"
openshift_logging_elasticsearch_pvc_size: "{{ openshift_logging_es_ops_pvc_size }}"
openshift_logging_elasticsearch_pvc_dynamic: "{{ openshift_logging_es_ops_pvc_dynamic }}"
openshift_logging_elasticsearch_pvc_pv_selector: "{{ openshift_logging_es_ops_pv_selector }}"
@@ -326,4 +335,6 @@
console_config_edits:
- key: clusterInfo#loggingPublicURL
value: "https://{{ openshift_logging_kibana_hostname }}"
- when: openshift_web_console_install | default(true) | bool
+ when:
+ - openshift_web_console_install | default(true) | bool
+ - openshift.common.version_gte_3_9
diff --git a/roles/openshift_logging_elasticsearch/tasks/main.yaml b/roles/openshift_logging_elasticsearch/tasks/main.yaml
index ff5ad1045..b731d93a0 100644
--- a/roles/openshift_logging_elasticsearch/tasks/main.yaml
+++ b/roles/openshift_logging_elasticsearch/tasks/main.yaml
@@ -137,6 +137,16 @@
- "prometheus_out.stderr | length > 0"
- "'already exists' not in prometheus_out.stderr"
+- set_fact:
+ _logging_metrics_proxy_passwd: "{{ 16 | lib_utils_oo_random_word | b64encode }}"
+
+- template:
+ src: passwd.j2
+ dest: "{{mktemp.stdout}}/passwd.yml"
+ vars:
+ logging_user_name: "{{ openshift_logging_elasticsearch_prometheus_sa }}"
+ logging_user_passwd: "{{ _logging_metrics_proxy_passwd }}"
+
# View role and binding
- name: Generate logging-elasticsearch-view-role
template:
@@ -255,6 +265,8 @@
path: "{{ generated_certs_dir }}/ca.crt"
- name: admin.jks
path: "{{ generated_certs_dir }}/system.admin.jks"
+ - name: passwd.yml
+ path: "{{mktemp.stdout}}/passwd.yml"
# services
- name: Set logging-{{ es_component }}-cluster service
@@ -391,6 +403,7 @@
es_container_security_context: "{{ _es_containers.elasticsearch.securityContext if _es_containers is defined and 'elasticsearch' in _es_containers and 'securityContext' in _es_containers.elasticsearch else None }}"
deploy_type: "{{ openshift_logging_elasticsearch_deployment_type }}"
es_replicas: 1
+ basic_auth_passwd: "{{ _logging_metrics_proxy_passwd | b64decode }}"
- name: Set ES dc
oc_obj:
diff --git a/roles/openshift_logging_elasticsearch/templates/es.j2 b/roles/openshift_logging_elasticsearch/templates/es.j2
index 4b189f255..b1d6a4489 100644
--- a/roles/openshift_logging_elasticsearch/templates/es.j2
+++ b/roles/openshift_logging_elasticsearch/templates/es.j2
@@ -51,6 +51,7 @@ spec:
- -client-id={{openshift_logging_elasticsearch_prometheus_sa}}
- -client-secret-file=/var/run/secrets/kubernetes.io/serviceaccount/token
- -cookie-secret={{ 16 | lib_utils_oo_random_word | b64encode }}
+ - -basic-auth-password={{ basic_auth_passwd }}
- -upstream=https://localhost:9200
- '-openshift-sar={"namespace": "{{ openshift_logging_elasticsearch_namespace}}", "verb": "view", "resource": "prometheus", "group": "metrics.openshift.io"}'
- '-openshift-delegate-urls={"/": {"resource": "prometheus", "verb": "view", "group": "metrics.openshift.io", "namespace": "{{ openshift_logging_elasticsearch_namespace}}"}}'
diff --git a/roles/openshift_logging_elasticsearch/templates/passwd.j2 b/roles/openshift_logging_elasticsearch/templates/passwd.j2
new file mode 100644
index 000000000..a22151eef
--- /dev/null
+++ b/roles/openshift_logging_elasticsearch/templates/passwd.j2
@@ -0,0 +1,2 @@
+"{{logging_user_name}}":
+ passwd: "{{logging_user_passwd}}"
diff --git a/roles/openshift_logging_kibana/defaults/main.yml b/roles/openshift_logging_kibana/defaults/main.yml
index 899193838..b69cbacae 100644
--- a/roles/openshift_logging_kibana/defaults/main.yml
+++ b/roles/openshift_logging_kibana/defaults/main.yml
@@ -18,6 +18,9 @@ openshift_logging_kibana_es_port: 9200
openshift_logging_kibana_replicas: 1
openshift_logging_kibana_edge_term_policy: Redirect
+# map of env. var to add to the kibana deploymentconfig
+openshift_logging_kibana_env_vars: {}
+
# this is used to determine if this is an operations deployment or a non-ops deployment
# simply used for naming purposes
openshift_logging_kibana_ops_deployment: false
diff --git a/roles/openshift_logging_kibana/tasks/main.yaml b/roles/openshift_logging_kibana/tasks/main.yaml
index 3c3bd902e..c67235c62 100644
--- a/roles/openshift_logging_kibana/tasks/main.yaml
+++ b/roles/openshift_logging_kibana/tasks/main.yaml
@@ -251,6 +251,7 @@
kibana_proxy_memory_limit: "{{ openshift_logging_kibana_proxy_memory_limit }}"
kibana_replicas: "{{ openshift_logging_kibana_replicas | default (1) }}"
kibana_node_selector: "{{ openshift_logging_kibana_nodeselector | default({}) }}"
+ kibana_env_vars: "{{ openshift_logging_kibana_env_vars | default({}) }}"
- name: Set Kibana DC
oc_obj:
diff --git a/roles/openshift_logging_kibana/templates/kibana.j2 b/roles/openshift_logging_kibana/templates/kibana.j2
index 57d216373..ed05b8458 100644
--- a/roles/openshift_logging_kibana/templates/kibana.j2
+++ b/roles/openshift_logging_kibana/templates/kibana.j2
@@ -70,6 +70,10 @@ spec:
resourceFieldRef:
containerName: kibana
resource: limits.memory
+{% for key, value in kibana_env_vars.items() %}
+ - name: "{{ key }}"
+ value: "{{ value }}"
+{% endfor %}
volumeMounts:
- name: kibana
mountPath: /etc/kibana/keys
diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml
index b12a6b346..41f2ee2a5 100644
--- a/roles/openshift_master/tasks/main.yml
+++ b/roles/openshift_master/tasks/main.yml
@@ -227,7 +227,7 @@
- pause:
seconds: 15
when:
- - openshift.master.ha | bool
+ - openshift_master_ha | bool
- name: Start and enable master api all masters
systemd:
diff --git a/roles/openshift_master/tasks/restart.yml b/roles/openshift_master/tasks/restart.yml
index 715347101..f7697067a 100644
--- a/roles/openshift_master/tasks/restart.yml
+++ b/roles/openshift_master/tasks/restart.yml
@@ -3,7 +3,6 @@
service:
name: "{{ openshift_service_type }}-master-api"
state: restarted
- when: openshift_master_ha | bool
- name: Wait for master API to come back online
wait_for:
host: "{{ openshift.common.hostname }}"
@@ -11,12 +10,10 @@
delay: 10
port: "{{ openshift.master.api_port }}"
timeout: 600
- when: openshift_master_ha | bool
-- name: Restart master controllers
- service:
- name: "{{ openshift_service_type }}-master-controllers"
- state: restarted
- # Ignore errrors since it is possible that type != simple for
- # pre-3.1.1 installations.
- ignore_errors: true
- when: openshift_master_ha | bool
+# We retry the controllers because the API may not be 100% initialized yet.
+- name: restart master controllers
+ command: "systemctl restart {{ openshift_service_type }}-master-controllers"
+ retries: 3
+ delay: 5
+ register: result
+ until: result.rc == 0
diff --git a/roles/openshift_metrics/defaults/main.yaml b/roles/openshift_metrics/defaults/main.yaml
index 8da74430f..293d8f451 100644
--- a/roles/openshift_metrics/defaults/main.yaml
+++ b/roles/openshift_metrics/defaults/main.yaml
@@ -54,7 +54,7 @@ openshift_metrics_master_url: https://kubernetes.default.svc
openshift_metrics_node_id: nodename
openshift_metrics_project: openshift-infra
-openshift_metrics_cassandra_pvc_prefix: "{{ openshift_metrics_storage_volume_name | default('metrics-cassandra') }}"
+openshift_metrics_cassandra_pvc_prefix: metrics-cassandra
openshift_metrics_cassandra_pvc_access: "{{ openshift_metrics_storage_access_modes | default(['ReadWriteOnce']) }}"
openshift_metrics_hawkular_user_write_access: False
diff --git a/roles/openshift_metrics/tasks/install_metrics.yaml b/roles/openshift_metrics/tasks/install_metrics.yaml
index 6b6c21d71..f05c8968d 100644
--- a/roles/openshift_metrics/tasks/install_metrics.yaml
+++ b/roles/openshift_metrics/tasks/install_metrics.yaml
@@ -79,7 +79,9 @@
console_config_edits:
- key: clusterInfo#metricsPublicURL
value: "https://{{ openshift_metrics_hawkular_hostname}}/hawkular/metrics"
- when: openshift_web_console_install | default(true) | bool
+ when:
+ - openshift_web_console_install | default(true) | bool
+ - openshift.common.version_gte_3_9
- command: >
{{openshift_client_binary}}
diff --git a/roles/openshift_metrics/tasks/uninstall_metrics.yaml b/roles/openshift_metrics/tasks/uninstall_metrics.yaml
index 1664e9975..ed849916d 100644
--- a/roles/openshift_metrics/tasks/uninstall_metrics.yaml
+++ b/roles/openshift_metrics/tasks/uninstall_metrics.yaml
@@ -28,4 +28,6 @@
console_config_edits:
- key: clusterInfo#metricsPublicURL
value: ""
- when: openshift_web_console_install | default(true) | bool
+ when:
+ - openshift_web_console_install | default(true) | bool
+ - openshift.common.version_gte_3_9
diff --git a/roles/openshift_node/defaults/main.yml b/roles/openshift_node/defaults/main.yml
index 0fe4c2035..9f887891b 100644
--- a/roles/openshift_node/defaults/main.yml
+++ b/roles/openshift_node/defaults/main.yml
@@ -137,6 +137,7 @@ default_r_openshift_node_image_prep_packages:
- yum-utils
# gluster
- glusterfs-fuse
+- device-mapper-multipath
# nfs
- nfs-utils
- flannel
diff --git a/roles/openshift_node/tasks/storage_plugins/iscsi.yml b/roles/openshift_node/tasks/storage_plugins/iscsi.yml
index a8048c42f..72415f9a6 100644
--- a/roles/openshift_node/tasks/storage_plugins/iscsi.yml
+++ b/roles/openshift_node/tasks/storage_plugins/iscsi.yml
@@ -1,6 +1,32 @@
---
- name: Install iSCSI storage plugin dependencies
- package: name=iscsi-initiator-utils state=present
+ package:
+ name: "{{ item }}"
+ state: present
when: not openshift_is_atomic | bool
register: result
until: result is succeeded
+ with_items:
+ - iscsi-initiator-utils
+ - device-mapper-multipath
+
+- name: restart services
+ systemd:
+ name: "{{ item }}"
+ state: started
+ enabled: True
+ with_items:
+ - multipathd
+ - rpcbind
+
+- name: Template multipath configuration
+ template:
+ dest: "/etc/multipath.conf"
+ src: multipath.conf.j2
+ backup: true
+ when: not openshift_is_atomic | bool
+
+#enable multipath
+- name: Enable multipath
+ command: "mpathconf --enable"
+ when: not openshift_is_atomic | bool
diff --git a/roles/openshift_node/templates/multipath.conf.j2 b/roles/openshift_node/templates/multipath.conf.j2
new file mode 100644
index 000000000..8a0abc2c1
--- /dev/null
+++ b/roles/openshift_node/templates/multipath.conf.j2
@@ -0,0 +1,15 @@
+# LIO iSCSI
+# TODO: Add env variables for tweaking
+devices {
+ device {
+ vendor "LIO-ORG"
+ user_friendly_names "yes"
+ path_grouping_policy "failover"
+ path_selector "round-robin 0"
+ failback immediate
+ path_checker "tur"
+ prio "const"
+ no_path_retry 120
+ rr_weight "uniform"
+ }
+}
diff --git a/roles/openshift_openstack/templates/docker-storage-setup-dm.j2 b/roles/openshift_openstack/templates/docker-storage-setup-dm.j2
index 32c6b5838..9015c561f 100644
--- a/roles/openshift_openstack/templates/docker-storage-setup-dm.j2
+++ b/roles/openshift_openstack/templates/docker-storage-setup-dm.j2
@@ -1,4 +1,8 @@
+{% if docker_storage_mountpoints is defined %}
+DEVS="{{ docker_storage_mountpoints }}"
+{% else %}
DEVS="{{ openshift_openstack_container_storage_setup.docker_dev }}"
+{% endif %}
VG="{{ openshift_openstack_container_storage_setup.docker_vg }}"
DATA_SIZE="{{ openshift_openstack_container_storage_setup.docker_data_size }}"
EXTRA_DOCKER_STORAGE_OPTIONS="--storage-opt dm.basesize={{ openshift_openstack_container_storage_setup.docker_dm_basesize }}"
diff --git a/roles/openshift_openstack/templates/docker-storage-setup-overlayfs.j2 b/roles/openshift_openstack/templates/docker-storage-setup-overlayfs.j2
index 1bf366bdc..917347073 100644
--- a/roles/openshift_openstack/templates/docker-storage-setup-overlayfs.j2
+++ b/roles/openshift_openstack/templates/docker-storage-setup-overlayfs.j2
@@ -1,4 +1,8 @@
+{% if docker_storage_mountpoints is defined %}
+DEVS="{{ docker_storage_mountpoints }}"
+{% else %}
DEVS="{{ openshift_openstack_container_storage_setup.docker_dev }}"
+{% endif %}
VG="{{ openshift_openstack_container_storage_setup.docker_vg }}"
DATA_SIZE="{{ openshift_openstack_container_storage_setup.docker_data_size }}"
STORAGE_DRIVER=overlay2
diff --git a/roles/openshift_openstack/templates/heat_stack.yaml.j2 b/roles/openshift_openstack/templates/heat_stack.yaml.j2
index 8e7c6288a..1d3173022 100644
--- a/roles/openshift_openstack/templates/heat_stack.yaml.j2
+++ b/roles/openshift_openstack/templates/heat_stack.yaml.j2
@@ -418,6 +418,10 @@ resources:
protocol: tcp
port_range_min: 443
port_range_max: 443
+ - direction: ingress
+ protocol: tcp
+ port_range_min: 1936
+ port_range_max: 1936
cns-secgrp:
type: OS::Neutron::SecurityGroup
diff --git a/roles/openshift_openstack/templates/heat_stack_server.yaml.j2 b/roles/openshift_openstack/templates/heat_stack_server.yaml.j2
index 29b09f3c9..9aeecfa74 100644
--- a/roles/openshift_openstack/templates/heat_stack_server.yaml.j2
+++ b/roles/openshift_openstack/templates/heat_stack_server.yaml.j2
@@ -261,11 +261,12 @@ resources:
properties:
size: { get_param: volume_size }
availability_zone: { get_param: availability_zone }
+ metadata:
+ purpose: openshift_docker_storage
volume_attachment:
type: OS::Cinder::VolumeAttachment
properties:
volume_id: { get_resource: cinder_volume }
instance_uuid: { get_resource: server }
- mountpoint: /dev/sdb
{% endif %}
diff --git a/roles/openshift_provisioners/defaults/main.yaml b/roles/openshift_provisioners/defaults/main.yaml
index a6f040831..34ba78404 100644
--- a/roles/openshift_provisioners/defaults/main.yaml
+++ b/roles/openshift_provisioners/defaults/main.yaml
@@ -1,7 +1,5 @@
---
openshift_provisioners_install_provisioners: True
-openshift_provisioners_image_prefix: docker.io/openshift/origin-
-openshift_provisioners_image_version: latest
openshift_provisioners_efs: False
openshift_provisioners_efs_path: /persistentvolumes
@@ -10,3 +8,11 @@ openshift_provisioners_efs_nodeselector: ""
openshift_provisioners_efs_supplementalgroup: '65534'
openshift_provisioners_project: openshift-infra
+
+openshift_provisioners_image_prefix_dict:
+ origin: "docker.io/openshift/origin-"
+ openshift-enterprise: "registry.access.redhat.com/openshift3/ose-"
+
+openshift_provisioners_image_version_dict:
+ origin: "latest"
+ openshift-enterprise: "{{ openshift_image_tag }}"
diff --git a/roles/openshift_provisioners/tasks/main.yaml b/roles/openshift_provisioners/tasks/main.yaml
index 4ba26b2b8..d00573b07 100644
--- a/roles/openshift_provisioners/tasks/main.yaml
+++ b/roles/openshift_provisioners/tasks/main.yaml
@@ -12,6 +12,11 @@
check_mode: no
tags: provisioners_init
+- name: Set eventrouter image facts
+ set_fact:
+ openshift_provisioners_image_prefix: "{{ openshift_provisioners_image_prefix | default(openshift_provisioners_image_prefix_dict[openshift_deployment_type]) }}"
+ openshift_provisioners_image_version: "{{ openshift_provisioners_image_version | default(openshift_provisioners_image_version_dict[openshift_deployment_type]) }}"
+
- include_tasks: install_provisioners.yaml
when: openshift_provisioners_install_provisioners | default(false) | bool
diff --git a/roles/openshift_sanitize_inventory/tasks/main.yml b/roles/openshift_sanitize_inventory/tasks/main.yml
index 62d460272..08dfd8284 100644
--- a/roles/openshift_sanitize_inventory/tasks/main.yml
+++ b/roles/openshift_sanitize_inventory/tasks/main.yml
@@ -61,3 +61,17 @@
when:
- template_service_broker_remove | default(false) | bool
- template_service_broker_install | default(true) | bool
+
+- name: Ensure that all requires vsphere configuration variables are set
+ fail:
+ msg: >
+ When the vSphere cloud provider is configured you must define all of these variables:
+ openshift_cloudprovider_vsphere_username, openshift_cloudprovider_vsphere_password,
+ openshift_cloudprovider_vsphere_host, openshift_cloudprovider_vsphere_datacenter,
+ openshift_cloudprovider_vsphere_datastore
+ when:
+ - openshift_cloudprovider_kind is defined
+ - openshift_cloudprovider_kind == 'vsphere'
+ - ( openshift_cloudprovider_vsphere_username is undefined or openshift_cloudprovider_vsphere_password is undefined or
+ openshift_cloudprovider_vsphere_host is undefined or openshift_cloudprovider_vsphere_datacenter is undefined or
+ openshift_cloudprovider_vsphere_datastore is undefined )
diff --git a/roles/openshift_storage_glusterfs/README.md b/roles/openshift_storage_glusterfs/README.md
index f7bd58db3..70a89b0ba 100644
--- a/roles/openshift_storage_glusterfs/README.md
+++ b/roles/openshift_storage_glusterfs/README.md
@@ -73,49 +73,51 @@ Role Variables
This role has the following variables that control the integration of a
GlusterFS cluster into a new or existing OpenShift cluster:
-| Name | Default value | Description |
-|--------------------------------------------------|-------------------------|-----------------------------------------|
-| openshift_storage_glusterfs_timeout | 300 | Seconds to wait for pods to become ready
-| openshift_storage_glusterfs_namespace | 'glusterfs' | Namespace/project in which to create GlusterFS resources
-| openshift_storage_glusterfs_is_native | True | GlusterFS should be containerized
-| openshift_storage_glusterfs_name | 'storage' | A name to identify the GlusterFS cluster, which will be used in resource names
-| openshift_storage_glusterfs_nodeselector | 'glusterfs=storage-host'| Selector to determine which nodes will host GlusterFS pods in native mode. **NOTE:** The label value is taken from the cluster name
-| openshift_storage_glusterfs_use_default_selector | False | Whether to use a default node selector for the GlusterFS namespace/project. If False, the namespace/project will have no restricting node selector. If True, uses pre-existing or default (e.g. osm_default_node_selector) node selectors. **NOTE:** If True, nodes which will host GlusterFS pods must already have the additional labels.
-| openshift_storage_glusterfs_storageclass | True | Automatically create a StorageClass for each GlusterFS cluster
-| openshift_storage_glusterfs_storageclass_default | False | Sets the StorageClass for each GlusterFS cluster as default
-| openshift_storage_glusterfs_image | 'gluster/gluster-centos'| Container image to use for GlusterFS pods, enterprise default is 'rhgs3/rhgs-server-rhel7'
-| openshift_storage_glusterfs_version | 'latest' | Container image version to use for GlusterFS pods
-| openshift_storage_glusterfs_block_deploy | True | Deploy glusterblock provisioner service
-| openshift_storage_glusterfs_block_image | 'gluster/glusterblock-provisioner'| Container image to use for glusterblock-provisioner pod, enterprise default is 'rhgs3/rhgs-gluster-block-prov-rhel7'
-| openshift_storage_glusterfs_block_version | 'latest' | Container image version to use for glusterblock-provisioner pod
-| openshift_storage_glusterfs_block_host_vol_create| True | Automatically create GlusterFS volumes to host glusterblock volumes. **NOTE:** If this is False, block-hosting volumes will need to be manually created before glusterblock volumes can be provisioned
-| openshift_storage_glusterfs_block_host_vol_size | 100 | Size, in GB, of GlusterFS volumes that will be automatically create to host glusterblock volumes if not enough space is available for a glusterblock volume create request. **NOTE:** This value is effectively an upper limit on the size of glusterblock volumes unless you manually create larger GlusterFS block-hosting volumes
-| openshift_storage_glusterfs_block_host_vol_max | 15 | Max number of GlusterFS volumes to host glusterblock volumes
-| openshift_storage_glusterfs_s3_deploy | True | Deploy gluster-s3 service
-| openshift_storage_glusterfs_s3_image | 'gluster/gluster-object'| Container image to use for gluster-s3 pod, enterprise default is 'rhgs3/rhgs-gluster-s3-server-rhel7'
-| openshift_storage_glusterfs_s3_version | 'latest' | Container image version to use for gluster=s3 pod
-| openshift_storage_glusterfs_s3_account | Undefined | S3 account name for the S3 service, required for S3 service deployment
-| openshift_storage_glusterfs_s3_user | Undefined | S3 user name for the S3 service, required for S3 service deployment
-| openshift_storage_glusterfs_s3_password | Undefined | S3 user password for the S3 service, required for S3 service deployment
-| openshift_storage_glusterfs_s3_pvc | Dynamic | Name of the GlusterFS-backed PVC which will be used for S3 object data storage, generated from the cluster name and S3 account by default
-| openshift_storage_glusterfs_s3_pvc_size | "2Gi" | Size, in Gi, of the GlusterFS-backed PVC which will be used for S3 object data storage
-| openshift_storage_glusterfs_s3_meta_pvc | Dynamic | Name of the GlusterFS-backed PVC which will be used for S3 object metadata storage, generated from the cluster name and S3 account by default
-| openshift_storage_glusterfs_s3_meta_pvc_size | "1Gi" | Size, in Gi, of the GlusterFS-backed PVC which will be used for S3 object metadata storage
-| openshift_storage_glusterfs_wipe | False | Destroy any existing GlusterFS resources and wipe storage devices. **WARNING: THIS WILL DESTROY ANY DATA ON THOSE DEVICES.**
-| openshift_storage_glusterfs_heketi_is_native | True | heketi should be containerized
-| openshift_storage_glusterfs_heketi_cli | 'heketi-cli' | Command/Path to invoke the heketi-cli tool **NOTE:** Change this only for **non-native heketi** if heketi-cli is not in the global `$PATH` of the machine running openshift-ansible
-| openshift_storage_glusterfs_heketi_image | 'heketi/heketi' | Container image to use for heketi pods, enterprise default is 'rhgs3/rhgs-volmanager-rhel7'
-| openshift_storage_glusterfs_heketi_version | 'latest' | Container image version to use for heketi pods
-| openshift_storage_glusterfs_heketi_admin_key | auto-generated | String to use as secret key for performing heketi commands as admin
-| openshift_storage_glusterfs_heketi_user_key | auto-generated | String to use as secret key for performing heketi commands as user that can only view or modify volumes
-| openshift_storage_glusterfs_heketi_topology_load | True | Load the GlusterFS topology information into heketi
-| openshift_storage_glusterfs_heketi_url | Undefined | When heketi is native, this sets the hostname portion of the final heketi route URL. When heketi is external, this is the FQDN or IP address to the heketi service.
-| openshift_storage_glusterfs_heketi_port | 8080 | TCP port for external heketi service **NOTE:** This has no effect in native mode
-| openshift_storage_glusterfs_heketi_executor | 'kubernetes' | Selects how a native heketi service will manage GlusterFS nodes: 'kubernetes' for native nodes, 'ssh' for external nodes
-| openshift_storage_glusterfs_heketi_ssh_port | 22 | SSH port for external GlusterFS nodes via native heketi
-| openshift_storage_glusterfs_heketi_ssh_user | 'root' | SSH user for external GlusterFS nodes via native heketi
-| openshift_storage_glusterfs_heketi_ssh_sudo | False | Whether to sudo (if non-root user) for SSH to external GlusterFS nodes via native heketi
-| openshift_storage_glusterfs_heketi_ssh_keyfile | Undefined | Path to a private key file for use with SSH connections to external GlusterFS nodes via native heketi **NOTE:** This must be an absolute path
+| Name | Default value | Description |
+|--------------------------------------------------------|-------------------------|-----------------------------------------|
+| openshift_storage_glusterfs_timeout | 300 | Seconds to wait for pods to become ready
+| openshift_storage_glusterfs_namespace | 'glusterfs' | Namespace/project in which to create GlusterFS resources
+| openshift_storage_glusterfs_is_native | True | GlusterFS should be containerized
+| openshift_storage_glusterfs_name | 'storage' | A name to identify the GlusterFS cluster, which will be used in resource names
+| openshift_storage_glusterfs_nodeselector | 'glusterfs=storage-host'| Selector to determine which nodes will host GlusterFS pods in native mode. **NOTE:** The label value is taken from the cluster name
+| openshift_storage_glusterfs_use_default_selector | False | Whether to use a default node selector for the GlusterFS namespace/project. If False, the namespace/project will have no restricting node selector. If True, uses pre-existing or default (e.g. osm_default_node_selector) node selectors. **NOTE:** If True, nodes which will host GlusterFS pods must already have the additional labels.
+| openshift_storage_glusterfs_storageclass | True | Automatically create a StorageClass for each GlusterFS cluster
+| openshift_storage_glusterfs_storageclass_default | False | Sets the StorageClass for each GlusterFS cluster as default
+| openshift_storage_glusterfs_image | 'gluster/gluster-centos'| Container image to use for GlusterFS pods, enterprise default is 'rhgs3/rhgs-server-rhel7'
+| openshift_storage_glusterfs_version | 'latest' | Container image version to use for GlusterFS pods
+| openshift_storage_glusterfs_block_deploy | True | Deploy glusterblock provisioner service
+| openshift_storage_glusterfs_block_image | 'gluster/glusterblock-provisioner'| Container image to use for glusterblock-provisioner pod, enterprise default is 'rhgs3/rhgs-gluster-block-prov-rhel7'
+| openshift_storage_glusterfs_block_version | 'latest' | Container image version to use for glusterblock-provisioner pod
+| openshift_storage_glusterfs_block_host_vol_create | True | Automatically create GlusterFS volumes to host glusterblock volumes. **NOTE:** If this is False, block-hosting volumes will need to be manually created before glusterblock volumes can be provisioned
+| openshift_storage_glusterfs_block_host_vol_size | 100 | Size, in GB, of GlusterFS volumes that will be automatically create to host glusterblock volumes if not enough space is available for a glusterblock volume create request. **NOTE:** This value is effectively an upper limit on the size of glusterblock volumes unless you manually create larger GlusterFS block-hosting volumes
+| openshift_storage_glusterfs_block_host_vol_max | 15 | Max number of GlusterFS volumes to host glusterblock volumes
+| openshift_storage_glusterfs_block_storageclass | False | Automatically create a StorageClass for each Gluster Block cluster
+| openshift_storage_glusterfs_block_storageclass_default | False | Sets the StorageClass for each Gluster Block cluster as default
+| openshift_storage_glusterfs_s3_deploy | True | Deploy gluster-s3 service
+| openshift_storage_glusterfs_s3_image | 'gluster/gluster-object'| Container image to use for gluster-s3 pod, enterprise default is 'rhgs3/rhgs-gluster-s3-server-rhel7'
+| openshift_storage_glusterfs_s3_version | 'latest' | Container image version to use for gluster=s3 pod
+| openshift_storage_glusterfs_s3_account | Undefined | S3 account name for the S3 service, required for S3 service deployment
+| openshift_storage_glusterfs_s3_user | Undefined | S3 user name for the S3 service, required for S3 service deployment
+| openshift_storage_glusterfs_s3_password | Undefined | S3 user password for the S3 service, required for S3 service deployment
+| openshift_storage_glusterfs_s3_pvc | Dynamic | Name of the GlusterFS-backed PVC which will be used for S3 object data storage, generated from the cluster name and S3 account by default
+| openshift_storage_glusterfs_s3_pvc_size | "2Gi" | Size, in Gi, of the GlusterFS-backed PVC which will be used for S3 object data storage
+| openshift_storage_glusterfs_s3_meta_pvc | Dynamic | Name of the GlusterFS-backed PVC which will be used for S3 object metadata storage, generated from the cluster name and S3 account by default
+| openshift_storage_glusterfs_s3_meta_pvc_size | "1Gi" | Size, in Gi, of the GlusterFS-backed PVC which will be used for S3 object metadata storage
+| openshift_storage_glusterfs_wipe | False | Destroy any existing GlusterFS resources and wipe storage devices. **WARNING: THIS WILL DESTROY ANY DATA ON THOSE DEVICES.**
+| openshift_storage_glusterfs_heketi_is_native | True | heketi should be containerized
+| openshift_storage_glusterfs_heketi_cli | 'heketi-cli' | Command/Path to invoke the heketi-cli tool **NOTE:** Change this only for **non-native heketi** if heketi-cli is not in the global `$PATH` of the machine running openshift-ansible
+| openshift_storage_glusterfs_heketi_image | 'heketi/heketi' | Container image to use for heketi pods, enterprise default is 'rhgs3/rhgs-volmanager-rhel7'
+| openshift_storage_glusterfs_heketi_version | 'latest' | Container image version to use for heketi pods
+| openshift_storage_glusterfs_heketi_admin_key | auto-generated | String to use as secret key for performing heketi commands as admin
+| openshift_storage_glusterfs_heketi_user_key | auto-generated | String to use as secret key for performing heketi commands as user that can only view or modify volumes
+| openshift_storage_glusterfs_heketi_topology_load | True | Load the GlusterFS topology information into heketi
+| openshift_storage_glusterfs_heketi_url | Undefined | When heketi is native, this sets the hostname portion of the final heketi route URL. When heketi is external, this is the FQDN or IP address to the heketi service.
+| openshift_storage_glusterfs_heketi_port | 8080 | TCP port for external heketi service **NOTE:** This has no effect in native mode
+| openshift_storage_glusterfs_heketi_executor | 'kubernetes' | Selects how a native heketi service will manage GlusterFS nodes: 'kubernetes' for native nodes, 'ssh' for external nodes
+| openshift_storage_glusterfs_heketi_ssh_port | 22 | SSH port for external GlusterFS nodes via native heketi
+| openshift_storage_glusterfs_heketi_ssh_user | 'root' | SSH user for external GlusterFS nodes via native heketi
+| openshift_storage_glusterfs_heketi_ssh_sudo | False | Whether to sudo (if non-root user) for SSH to external GlusterFS nodes via native heketi
+| openshift_storage_glusterfs_heketi_ssh_keyfile | Undefined | Path to a private key file for use with SSH connections to external GlusterFS nodes via native heketi **NOTE:** This must be an absolute path
| openshift_storage_glusterfs_heketi_fstab | '/var/lib/heketi/fstab' | When heketi is native, sets the path to the fstab file on the GlusterFS nodes to update on LVM volume mounts, changes to '/etc/fstab/' when the heketi executor is 'ssh' **NOTE:** This should not need to be changed
| openshift_storage_glusterfs_heketi_wipe | False | Destroy any existing heketi resources, defaults to the value of `openshift_storage_glusterfs_wipe`
@@ -126,14 +128,16 @@ registry. These variables start with the prefix
values in their corresponding non-registry variables. The following variables
are an exception:
-| Name | Default value | Description |
-|-----------------------------------------------------------|-----------------------|-----------------------------------------|
-| openshift_storage_glusterfs_registry_namespace | registry namespace | Default is to use the hosted registry's namespace, otherwise 'glusterfs'
-| openshift_storage_glusterfs_registry_name | 'registry' | This allows for the logical separation of the registry GlusterFS cluster from other GlusterFS clusters
-| openshift_storage_glusterfs_registry_storageclass | False | It is recommended to not create a StorageClass for GlusterFS clusters serving registry storage, so as to avoid performance penalties
-| openshift_storage_glusterfs_registry_storageclass_default | False | Sets the StorageClass for each GlusterFS cluster as default
-| openshift_storage_glusterfs_registry_heketi_admin_key | auto-generated | Separate from the above
-| openshift_storage_glusterfs_registry_heketi_user_key | auto-generated | Separate from the above
+| Name | Default value | Description |
+|-----------------------------------------------------------------|-----------------------|-----------------------------------------|
+| openshift_storage_glusterfs_registry_namespace | registry namespace | Default is to use the hosted registry's namespace, otherwise 'glusterfs'
+| openshift_storage_glusterfs_registry_name | 'registry' | This allows for the logical separation of the registry GlusterFS cluster from other GlusterFS clusters
+| openshift_storage_glusterfs_registry_storageclass | False | It is recommended to not create a StorageClass for GlusterFS clusters serving registry storage, so as to avoid performance penalties
+| openshift_storage_glusterfs_registry_storageclass_default | False | Sets the StorageClass for each GlusterFS cluster as default
+| openshift_storage_glusterfs_registry_block_storageclass | False | It is recommended to not create a StorageClass for Gluster Block clusters serving registry storage, so as to avoid performance penalties
+| openshift_storage_glusterfs_registry_block_storageclass_default | False | Sets the StorageClass for each Gluster Block cluster as default
+| openshift_storage_glusterfs_registry_heketi_admin_key | auto-generated | Separate from the above
+| openshift_storage_glusterfs_registry_heketi_user_key | auto-generated | Separate from the above
Additionally, this role's behavior responds to several registry-specific variables in the [openshift_hosted role](../openshift_hosted/README.md):
diff --git a/roles/openshift_storage_glusterfs/defaults/main.yml b/roles/openshift_storage_glusterfs/defaults/main.yml
index 4cbe262d2..7e751cc7a 100644
--- a/roles/openshift_storage_glusterfs/defaults/main.yml
+++ b/roles/openshift_storage_glusterfs/defaults/main.yml
@@ -14,6 +14,8 @@ openshift_storage_glusterfs_block_version: 'latest'
openshift_storage_glusterfs_block_host_vol_create: True
openshift_storage_glusterfs_block_host_vol_size: 100
openshift_storage_glusterfs_block_host_vol_max: 15
+openshift_storage_glusterfs_block_storageclass: False
+openshift_storage_glusterfs_block_storageclass_default: False
openshift_storage_glusterfs_s3_deploy: True
openshift_storage_glusterfs_s3_image: "{{ 'rhgs3/rhgs-gluster-s3-server-rhel7' | quote if openshift_deployment_type == 'openshift-enterprise' else 'gluster/gluster-object' | quote }}"
openshift_storage_glusterfs_s3_version: 'latest'
@@ -61,6 +63,8 @@ openshift_storage_glusterfs_registry_block_version: "{{ openshift_storage_gluste
openshift_storage_glusterfs_registry_block_host_vol_create: "{{ openshift_storage_glusterfs_block_host_vol_create }}"
openshift_storage_glusterfs_registry_block_host_vol_size: "{{ openshift_storage_glusterfs_block_host_vol_size }}"
openshift_storage_glusterfs_registry_block_host_vol_max: "{{ openshift_storage_glusterfs_block_host_vol_max }}"
+openshift_storage_glusterfs_registry_block_storageclass: False
+openshift_storage_glusterfs_registry_block_storageclass_default: False
openshift_storage_glusterfs_registry_s3_deploy: "{{ openshift_storage_glusterfs_s3_deploy }}"
openshift_storage_glusterfs_registry_s3_image: "{{ openshift_storage_glusterfs_s3_image }}"
openshift_storage_glusterfs_registry_s3_version: "{{ openshift_storage_glusterfs_s3_version }}"
@@ -103,3 +107,9 @@ r_openshift_storage_glusterfs_os_firewall_allow:
port: "24008/tcp"
- service: glusterfs_bricks
port: "49152-49251/tcp"
+- service: glusterblockd
+ port: "24010/tcp"
+- service: iscsi-targets
+ port: "3260/tcp"
+- service: rpcbind
+ port: "111/tcp"
diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml
index 001578406..a5fdae803 100644
--- a/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml
+++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml
@@ -315,5 +315,31 @@
- include_tasks: glusterblock_deploy.yml
when: glusterfs_block_deploy
+- block:
+ - name: Create heketi block secret
+ oc_secret:
+ namespace: "{{ glusterfs_namespace }}"
+ state: present
+ name: "heketi-{{ glusterfs_name }}-admin-secret-block"
+ type: "gluster.org/glusterblock"
+ force: True
+ contents:
+ - path: key
+ data: "{{ glusterfs_heketi_admin_key }}"
+ when: glusterfs_heketi_admin_key is defined
+ - name: Generate Gluster Block StorageClass file
+ template:
+ src: "{{ openshift.common.examples_content_version }}/gluster-block-storageclass.yml.j2"
+ dest: "{{ mktemp.stdout }}/gluster-block-storageclass.yml"
+
+ - name: Create Gluster Block StorageClass
+ oc_obj:
+ state: present
+ kind: storageclass
+ name: "glusterfs-{{ glusterfs_name }}-block"
+ files:
+ - "{{ mktemp.stdout }}/gluster-block-storageclass.yml"
+ when: glusterfs_block_storageclass
+
- include_tasks: gluster_s3_deploy.yml
when: glusterfs_s3_deploy
diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml
index a374df0ce..92de1b64d 100644
--- a/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml
+++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml
@@ -17,6 +17,8 @@
glusterfs_block_host_vol_create: "{{ openshift_storage_glusterfs_block_host_vol_create }}"
glusterfs_block_host_vol_size: "{{ openshift_storage_glusterfs_block_host_vol_size }}"
glusterfs_block_host_vol_max: "{{ openshift_storage_glusterfs_block_host_vol_max }}"
+ glusterfs_block_storageclass: "{{ openshift_storage_glusterfs_block_storageclass | bool }}"
+ glusterfs_block_storageclass_default: "{{ openshift_storage_glusterfs_block_storageclass_default | bool }}"
glusterfs_s3_deploy: "{{ openshift_storage_glusterfs_s3_deploy | bool }}"
glusterfs_s3_image: "{{ openshift_storage_glusterfs_s3_image }}"
glusterfs_s3_version: "{{ openshift_storage_glusterfs_s3_version }}"
diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml
index 544a6f491..befacb04f 100644
--- a/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml
+++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml
@@ -17,6 +17,8 @@
glusterfs_block_host_vol_create: "{{ openshift_storage_glusterfs_registry_block_host_vol_create }}"
glusterfs_block_host_vol_size: "{{ openshift_storage_glusterfs_registry_block_host_vol_size }}"
glusterfs_block_host_vol_max: "{{ openshift_storage_glusterfs_registry_block_host_vol_max }}"
+ glusterfs_block_storageclass: "{{ openshift_storage_glusterfs_registry_block_storageclass | bool }}"
+ glusterfs_block_storageclass_default: "{{ openshift_storage_glusterfs_registry_block_storageclass_default | bool }}"
glusterfs_s3_deploy: "{{ openshift_storage_glusterfs_registry_s3_deploy | bool }}"
glusterfs_s3_image: "{{ openshift_storage_glusterfs_registry_s3_image }}"
glusterfs_s3_version: "{{ openshift_storage_glusterfs_registry_s3_version }}"
diff --git a/roles/openshift_storage_glusterfs/templates/glusterfs.conf b/roles/openshift_storage_glusterfs/templates/glusterfs.conf
index dd4d6e6f7..bcc02e217 100644
--- a/roles/openshift_storage_glusterfs/templates/glusterfs.conf
+++ b/roles/openshift_storage_glusterfs/templates/glusterfs.conf
@@ -1,4 +1,7 @@
#{{ ansible_managed }}
dm_thin_pool
dm_snapshot
-dm_mirror \ No newline at end of file
+dm_mirror
+#glusterblock
+dm_multipath
+target_core_user
diff --git a/roles/openshift_storage_glusterfs/templates/v3.7/gluster-block-storageclass.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.7/gluster-block-storageclass.yml.j2
new file mode 100644
index 000000000..02ed8fa8d
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/templates/v3.7/gluster-block-storageclass.yml.j2
@@ -0,0 +1,19 @@
+---
+apiVersion: storage.k8s.io/v1
+kind: StorageClass
+metadata:
+ name: glusterfs-{{ glusterfs_name }}-block
+{% if glusterfs_block_storageclass_default is defined and glusterfs_block_storageclass_default %}
+ annotations:
+ storageclass.kubernetes.io/is-default-class: "true"
+{% endif %}
+provisioner: gluster.org/glusterblock
+parameters:
+ resturl: "http://{% if glusterfs_heketi_is_native %}{{ glusterfs_heketi_route }}{% else %}{{ glusterfs_heketi_url }}:{{ glusterfs_heketi_port }}{% endif %}"
+ restuser: "admin"
+ chapauthenabled: "true"
+ hacount: "3"
+{% if glusterfs_heketi_admin_key is defined %}
+ restsecretnamespace: "{{ glusterfs_namespace }}"
+ restsecretname: "heketi-{{ glusterfs_name }}-admin-secret-block"
+{%- endif -%}
diff --git a/roles/openshift_storage_glusterfs/templates/v3.8/gluster-block-storageclass.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.8/gluster-block-storageclass.yml.j2
new file mode 100644
index 000000000..02ed8fa8d
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/templates/v3.8/gluster-block-storageclass.yml.j2
@@ -0,0 +1,19 @@
+---
+apiVersion: storage.k8s.io/v1
+kind: StorageClass
+metadata:
+ name: glusterfs-{{ glusterfs_name }}-block
+{% if glusterfs_block_storageclass_default is defined and glusterfs_block_storageclass_default %}
+ annotations:
+ storageclass.kubernetes.io/is-default-class: "true"
+{% endif %}
+provisioner: gluster.org/glusterblock
+parameters:
+ resturl: "http://{% if glusterfs_heketi_is_native %}{{ glusterfs_heketi_route }}{% else %}{{ glusterfs_heketi_url }}:{{ glusterfs_heketi_port }}{% endif %}"
+ restuser: "admin"
+ chapauthenabled: "true"
+ hacount: "3"
+{% if glusterfs_heketi_admin_key is defined %}
+ restsecretnamespace: "{{ glusterfs_namespace }}"
+ restsecretname: "heketi-{{ glusterfs_name }}-admin-secret-block"
+{%- endif -%}
diff --git a/roles/openshift_storage_glusterfs/templates/v3.9/gluster-block-storageclass.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.9/gluster-block-storageclass.yml.j2
new file mode 100644
index 000000000..02ed8fa8d
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/templates/v3.9/gluster-block-storageclass.yml.j2
@@ -0,0 +1,19 @@
+---
+apiVersion: storage.k8s.io/v1
+kind: StorageClass
+metadata:
+ name: glusterfs-{{ glusterfs_name }}-block
+{% if glusterfs_block_storageclass_default is defined and glusterfs_block_storageclass_default %}
+ annotations:
+ storageclass.kubernetes.io/is-default-class: "true"
+{% endif %}
+provisioner: gluster.org/glusterblock
+parameters:
+ resturl: "http://{% if glusterfs_heketi_is_native %}{{ glusterfs_heketi_route }}{% else %}{{ glusterfs_heketi_url }}:{{ glusterfs_heketi_port }}{% endif %}"
+ restuser: "admin"
+ chapauthenabled: "true"
+ hacount: "3"
+{% if glusterfs_heketi_admin_key is defined %}
+ restsecretnamespace: "{{ glusterfs_namespace }}"
+ restsecretname: "heketi-{{ glusterfs_name }}-admin-secret-block"
+{%- endif -%}