summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.tito/packages/openshift-ansible2
-rw-r--r--filter_plugins/oo_filters.py70
-rw-r--r--inventory/byo/hosts.example50
-rw-r--r--inventory/byo/hosts.origin.example900
-rw-r--r--openshift-ansible.spec72
-rw-r--r--playbooks/aws/README.md2
-rw-r--r--playbooks/aws/provisioning_vars.yml.example4
-rw-r--r--playbooks/byo/openshift-cluster/config.yml2
-rw-r--r--playbooks/byo/openshift-management/add_container_provider.yml6
-rw-r--r--playbooks/byo/openshift-management/add_many_container_providers.yml36
l---------playbooks/byo/openshift-management/roles1
-rw-r--r--playbooks/byo/openshift-management/uninstall.yml2
-rw-r--r--playbooks/common/openshift-checks/adhoc.yml5
-rw-r--r--playbooks/common/openshift-checks/health.yml6
-rw-r--r--playbooks/common/openshift-checks/install.yml47
-rw-r--r--playbooks/common/openshift-checks/pre-install.yml6
-rw-r--r--playbooks/common/openshift-cluster/config.yml28
-rw-r--r--playbooks/common/openshift-cluster/initialize_facts.yml1
-rw-r--r--playbooks/common/openshift-cluster/initialize_openshift_version.yml11
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml4
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml1
-rw-r--r--playbooks/common/openshift-glusterfs/config.yml10
-rw-r--r--playbooks/common/openshift-management/add_container_provider.yml8
-rw-r--r--playbooks/common/openshift-management/uninstall.yml2
-rw-r--r--playbooks/common/openshift-node/clean_image.yml10
-rw-r--r--playbooks/common/openshift-node/image_prep.yml3
-rw-r--r--roles/ansible_service_broker/tasks/install.yml2
-rw-r--r--roles/docker/defaults/main.yml11
-rw-r--r--roles/docker/meta/main.yml1
-rw-r--r--roles/docker/tasks/crio_firewall.yml40
-rw-r--r--roles/docker/tasks/main.yml2
-rw-r--r--roles/docker/tasks/systemcontainer_crio.yml18
-rw-r--r--roles/docker/tasks/systemcontainer_docker.yml2
-rw-r--r--roles/docker/templates/crio.conf.j22
-rw-r--r--roles/etcd/defaults/main.yaml4
-rw-r--r--roles/etcd/tasks/backup.force_new_cluster.yml4
-rw-r--r--roles/etcd/tasks/backup/backup.yml16
-rw-r--r--roles/etcd/tasks/backup/copy.yml2
-rw-r--r--roles/etcd/tasks/backup/unarchive.yml2
-rw-r--r--roles/etcd/tasks/backup/vars.yml5
-rw-r--r--roles/etcd/tasks/certificates/fetch_server_certificates_from_ca.yml21
-rw-r--r--roles/etcd/tasks/system_container.yml54
-rw-r--r--roles/installer_checkpoint/callback_plugins/installer_checkpoint.py10
-rw-r--r--roles/kuryr/defaults/main.yaml6
-rw-r--r--roles/openshift_aws/tasks/seal_ami.yml7
-rw-r--r--roles/openshift_aws/templates/user_data.j22
-rw-r--r--roles/openshift_etcd_facts/vars/main.yml1
-rw-r--r--roles/openshift_health_checker/openshift_checks/disk_availability.py13
-rw-r--r--roles/openshift_health_checker/openshift_checks/docker_storage.py2
-rw-r--r--roles/openshift_health_checker/test/disk_availability_test.py23
-rw-r--r--roles/openshift_logging/README.md2
-rw-r--r--roles/openshift_logging/filter_plugins/openshift_logging.py27
-rw-r--r--roles/openshift_logging/filter_plugins/test15
-rw-r--r--roles/openshift_logging/tasks/install_logging.yaml33
-rw-r--r--roles/openshift_logging_curator/tasks/main.yaml2
-rw-r--r--roles/openshift_logging_elasticsearch/files/es_migration.sh79
-rw-r--r--roles/openshift_logging_elasticsearch/tasks/main.yaml2
-rw-r--r--roles/openshift_logging_fluentd/tasks/main.yaml3
-rw-r--r--roles/openshift_logging_kibana/tasks/main.yaml4
-rw-r--r--roles/openshift_logging_mux/tasks/main.yaml2
-rw-r--r--roles/openshift_management/README.md163
-rw-r--r--roles/openshift_management/defaults/main.yml14
-rw-r--r--roles/openshift_management/files/examples/container_providers.yml22
-rw-r--r--roles/openshift_management/filter_plugins/oo_management_filters.py32
-rw-r--r--roles/openshift_management/tasks/add_container_provider.yml65
-rw-r--r--roles/openshift_management/tasks/main.yml29
-rw-r--r--roles/openshift_management/tasks/noop.yml1
-rw-r--r--roles/openshift_management/tasks/storage/create_nfs_pvs.yml8
-rw-r--r--roles/openshift_management/tasks/storage/nfs.yml31
-rw-r--r--roles/openshift_management/tasks/storage/nfs_server.yml31
-rw-r--r--roles/openshift_management/tasks/template.yml26
-rw-r--r--roles/openshift_master/tasks/journald.yml5
-rw-r--r--roles/openshift_master/tasks/main.yml30
-rw-r--r--roles/openshift_master/templates/master.yaml.v1.j25
-rw-r--r--roles/openshift_nfs/tasks/create_export.yml2
-rw-r--r--roles/openshift_node/tasks/bootstrap.yml2
-rw-r--r--roles/openshift_node_dnsmasq/defaults/main.yml5
-rw-r--r--roles/openshift_node_dnsmasq/templates/origin-dns.conf.j25
-rw-r--r--roles/openshift_sanitize_inventory/tasks/main.yml4
-rw-r--r--roles/openshift_service_catalog/templates/api_server.j23
-rw-r--r--roles/openshift_service_catalog/templates/controller_manager.j23
-rw-r--r--roles/openshift_storage_glusterfs/files/v1.5/deploy-heketi-template.yml135
-rw-r--r--roles/openshift_storage_glusterfs/files/v1.5/glusterfs-template.yml136
-rw-r--r--roles/openshift_storage_glusterfs/files/v1.5/heketi-template.yml134
-rw-r--r--roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml2
-rw-r--r--roles/openshift_storage_glusterfs/tasks/kernel_modules.yml12
-rw-r--r--roles/openshift_storage_glusterfs/templates/glusterfs.conf4
-rw-r--r--roles/openshift_storage_glusterfs/templates/v1.5/glusterfs-registry-endpoints.yml.j212
-rw-r--r--roles/openshift_storage_glusterfs/templates/v1.5/glusterfs-registry-service.yml.j210
-rw-r--r--roles/openshift_storage_glusterfs/templates/v1.5/glusterfs-storageclass.yml.j213
-rw-r--r--roles/openshift_storage_glusterfs/templates/v1.5/heketi-endpoints.yml.j212
-rw-r--r--roles/openshift_storage_glusterfs/templates/v1.5/heketi-service.yml.j210
-rw-r--r--roles/openshift_storage_glusterfs/templates/v1.5/heketi.json.j236
-rw-r--r--roles/openshift_storage_glusterfs/templates/v1.5/topology.json.j249
-rw-r--r--roles/openshift_version/defaults/main.yml1
-rw-r--r--roles/openshift_version/tasks/main.yml10
102 files changed, 1467 insertions, 1311 deletions
diff --git a/.tito/packages/openshift-ansible b/.tito/packages/openshift-ansible
index b340654f4..99fd69afc 100644
--- a/.tito/packages/openshift-ansible
+++ b/.tito/packages/openshift-ansible
@@ -1 +1 @@
-3.7.0-0.178.0 ./
+3.7.0-0.187.0 ./
diff --git a/filter_plugins/oo_filters.py b/filter_plugins/oo_filters.py
index 2fbd23450..f9564499d 100644
--- a/filter_plugins/oo_filters.py
+++ b/filter_plugins/oo_filters.py
@@ -1125,6 +1125,73 @@ of items as ['region=infra', 'zone=primary']
return selectors
+def oo_filter_sa_secrets(sa_secrets, secret_hint='-token-'):
+ """Parse the Service Account Secrets list, `sa_secrets`, (as from
+oc_serviceaccount_secret:state=list) and return the name of the secret
+containing the `secret_hint` string. For example, by default this will
+return the name of the secret holding the SA bearer token.
+
+Only provide the 'results' object to this filter. This filter expects
+to receive a list like this:
+
+ [
+ {
+ "name": "management-admin-dockercfg-p31s2"
+ },
+ {
+ "name": "management-admin-token-bnqsh"
+ }
+ ]
+
+
+Returns:
+
+* `secret_name` [string] - The name of the secret matching the
+ `secret_hint` parameter. By default this is the secret holding the
+ SA's bearer token.
+
+Example playbook usage:
+
+Register a return value from oc_serviceaccount_secret with and pass
+that result to this filter plugin.
+
+ - name: Get all SA Secrets
+ oc_serviceaccount_secret:
+ state: list
+ service_account: management-admin
+ namespace: management-infra
+ register: sa
+
+ - name: Save the SA bearer token secret name
+ set_fact:
+ management_token: "{{ sa.results | oo_filter_sa_secrets }}"
+
+ - name: Get the SA bearer token value
+ oc_secret:
+ state: list
+ name: "{{ management_token }}"
+ namespace: management-infra
+ decode: true
+ register: sa_secret
+
+ - name: Print the bearer token value
+ debug:
+ var: sa_secret.results.decoded.token
+
+ """
+ secret_name = None
+
+ for secret in sa_secrets:
+ # each secret is a hash
+ if secret['name'].find(secret_hint) == -1:
+ continue
+ else:
+ secret_name = secret['name']
+ break
+
+ return secret_name
+
+
class FilterModule(object):
""" Custom ansible filter mapping """
@@ -1167,5 +1234,6 @@ class FilterModule(object):
"to_padded_yaml": to_padded_yaml,
"oo_random_word": oo_random_word,
"oo_contains_rule": oo_contains_rule,
- "oo_selector_to_string_list": oo_selector_to_string_list
+ "oo_selector_to_string_list": oo_selector_to_string_list,
+ "oo_filter_sa_secrets": oo_filter_sa_secrets,
}
diff --git a/inventory/byo/hosts.example b/inventory/byo/hosts.example
index 7c4a7885d..75ddf8e10 100644
--- a/inventory/byo/hosts.example
+++ b/inventory/byo/hosts.example
@@ -974,25 +974,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# openshift_upgrade_post_storage_migration_enabled=true
# openshift_upgrade_post_storage_migration_fatal=false
-# host group for masters
-[masters]
-ose3-master[1:3]-ansible.test.example.com
-
-[etcd]
-ose3-etcd[1:3]-ansible.test.example.com
-
-# NOTE: Containerized load balancer hosts are not yet supported, if using a global
-# containerized=true host variable we must set to false.
-[lb]
-ose3-lb-ansible.test.example.com containerized=false
-
-# NOTE: Currently we require that masters be part of the SDN which requires that they also be nodes
-# However, in order to ensure that your masters are not burdened with running pods you should
-# make them unschedulable by adding openshift_schedulable=False any node that's also a master.
-[nodes]
-ose3-master[1:3]-ansible.test.example.com
-ose3-node[1:2]-ansible.test.example.com openshift_node_labels="{'region': 'primary', 'zone': 'default'}"
-
+######################################################################
# CloudForms/ManageIQ (CFME/MIQ) Configuration
# See the readme for full descriptions and getting started
@@ -1042,6 +1024,17 @@ ose3-node[1:2]-ansible.test.example.com openshift_node_labels="{'region': 'prima
# setting this variable. Useful for testing specific task files.
#openshift_management_storage_nfs_local_hostname: false
+# These are the default values for the username and password of the
+# management app. Changing these values in your inventory will not
+# change your username or password. You should only need to change
+# these values in your inventory if you already changed the actual
+# name and password AND are trying to use integration scripts.
+#
+# For example, adding this cluster as a container provider,
+# playbooks/byo/openshift-management/add_container_provider.yml
+#openshift_management_username: admin
+#openshift_management_password: smartvm
+
# A hash of parameters you want to override or set in the
# miq-template.yaml or miq-template-ext-db.yaml templates. Set this in
# your inventory file as a simple hash. Acceptable values are defined
@@ -1050,3 +1043,22 @@ ose3-node[1:2]-ansible.test.example.com openshift_node_labels="{'region': 'prima
#
# openshift_management_template_parameters={'APPLICATION_MEM_REQ': '512Mi'}
#openshift_management_template_parameters: {}
+
+# host group for masters
+[masters]
+ose3-master[1:3]-ansible.test.example.com
+
+[etcd]
+ose3-etcd[1:3]-ansible.test.example.com
+
+# NOTE: Containerized load balancer hosts are not yet supported, if using a global
+# containerized=true host variable we must set to false.
+[lb]
+ose3-lb-ansible.test.example.com containerized=false
+
+# NOTE: Currently we require that masters be part of the SDN which requires that they also be nodes
+# However, in order to ensure that your masters are not burdened with running pods you should
+# make them unschedulable by adding openshift_schedulable=False any node that's also a master.
+[nodes]
+ose3-master[1:3]-ansible.test.example.com
+ose3-node[1:2]-ansible.test.example.com openshift_node_labels="{'region': 'primary', 'zone': 'default'}"
diff --git a/inventory/byo/hosts.origin.example b/inventory/byo/hosts.origin.example
deleted file mode 100644
index 9d811fcab..000000000
--- a/inventory/byo/hosts.origin.example
+++ /dev/null
@@ -1,900 +0,0 @@
-# This is an example of a bring your own (byo) host inventory
-
-# Create an OSEv3 group that contains the masters and nodes groups
-[OSEv3:children]
-masters
-nodes
-etcd
-lb
-nfs
-
-# Set variables common for all OSEv3 hosts
-[OSEv3:vars]
-# Enable unsupported configurations, things that will yield a partially
-# functioning cluster but would not be supported for production use
-#openshift_enable_unsupported_configurations=false
-
-# SSH user, this user should allow ssh based auth without requiring a
-# password. If using ssh key based auth, then the key should be managed by an
-# ssh agent.
-ansible_ssh_user=root
-
-# If ansible_ssh_user is not root, ansible_become must be set to true and the
-# user must be configured for passwordless sudo
-#ansible_become=yes
-
-# Debug level for all OpenShift components (Defaults to 2)
-debug_level=2
-
-# Specify the deployment type. Valid values are origin and openshift-enterprise.
-openshift_deployment_type=origin
-
-# Specify the generic release of OpenShift to install. This is used mainly just during installation, after which we
-# rely on the version running on the first master. Works best for containerized installs where we can usually
-# use this to lookup the latest exact version of the container images, which is the tag actually used to configure
-# the cluster. For RPM installations we just verify the version detected in your configured repos matches this
-# release.
-openshift_release=v3.7
-
-# Specify an exact container image tag to install or configure.
-# WARNING: This value will be used for all hosts in containerized environments, even those that have another version installed.
-# This could potentially trigger an upgrade and downtime, so be careful with modifying this value after the cluster is set up.
-#openshift_image_tag=v3.7.0
-
-# Specify an exact rpm version to install or configure.
-# WARNING: This value will be used for all hosts in RPM based environments, even those that have another version installed.
-# This could potentially trigger an upgrade and downtime, so be careful with modifying this value after the cluster is set up.
-#openshift_pkg_version=-3.7.0
-
-# This enables all the system containers except for docker:
-#openshift_use_system_containers=False
-#
-# But you can choose separately each component that must be a
-# system container:
-#
-#openshift_use_openvswitch_system_container=False
-#openshift_use_node_system_container=False
-#openshift_use_master_system_container=False
-#openshift_use_etcd_system_container=False
-#
-# In either case, system_images_registry must be specified to be able to find the system images
-#system_images_registry="docker.io"
-
-# Install the openshift examples
-#openshift_install_examples=true
-
-# Configure logoutURL in the master config for console customization
-# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#changing-the-logout-url
-#openshift_master_logout_url=http://example.com
-
-# Configure extensionScripts in the master config for console customization
-# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#loading-custom-scripts-and-stylesheets
-#openshift_master_extension_scripts=['/path/to/script1.js','/path/to/script2.js']
-
-# Configure extensionStylesheets in the master config for console customization
-# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#loading-custom-scripts-and-stylesheets
-#openshift_master_extension_stylesheets=['/path/to/stylesheet1.css','/path/to/stylesheet2.css']
-
-# Configure extensions in the master config for console customization
-# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#serving-static-files
-#openshift_master_extensions=[{'name': 'images', 'sourceDirectory': '/path/to/my_images'}]
-
-# Configure extensions in the master config for console customization
-# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#serving-static-files
-#openshift_master_oauth_template=/path/to/login-template.html
-
-# Configure imagePolicyConfig in the master config
-# See: https://godoc.org/github.com/openshift/origin/pkg/cmd/server/api#ImagePolicyConfig
-#openshift_master_image_policy_config={"maxImagesBulkImportedPerRepository": 3, "disableScheduledImport": true}
-
-# Configure master API rate limits for external clients
-#openshift_master_external_ratelimit_qps=200
-#openshift_master_external_ratelimit_burst=400
-# Configure master API rate limits for loopback clients
-#openshift_master_loopback_ratelimit_qps=300
-#openshift_master_loopback_ratelimit_burst=600
-
-# Docker Configuration
-# Add additional, insecure, and blocked registries to global docker configuration
-# For enterprise deployment types we ensure that registry.access.redhat.com is
-# included if you do not include it
-#openshift_docker_additional_registries=registry.example.com
-#openshift_docker_insecure_registries=registry.example.com
-#openshift_docker_blocked_registries=registry.hacker.com
-# Disable pushing to dockerhub
-#openshift_docker_disable_push_dockerhub=True
-# Use Docker inside a System Container. Note that this is a tech preview and should
-# not be used to upgrade!
-# The following options for docker are ignored:
-# - docker_version
-# - docker_upgrade
-# The following options must not be used
-# - openshift_docker_options
-#openshift_docker_use_system_container=False
-# Instead of using docker, replacec it with cri-o
-# NOTE: This uses openshift_docker_systemcontainer_image_registry_override as it's override
-# just as container-engine does.
-#openshift_use_crio=False
-# Force the registry to use for the docker/crio system container. By default the registry
-# will be built off of the deployment type and ansible_distribution. Only
-# use this option if you are sure you know what you are doing!
-#openshift_docker_systemcontainer_image_override="registry.example.com/container-engine:latest"
-#openshift_crio_systemcontainer_image_override="registry.example.com/cri-o:latest"
-# Items added, as is, to end of /etc/sysconfig/docker OPTIONS
-# Default value: "--log-driver=journald"
-#openshift_docker_options="-l warn --ipv6=false"
-
-# Specify exact version of Docker to configure or upgrade to.
-# Downgrades are not supported and will error out. Be careful when upgrading docker from < 1.10 to > 1.10.
-# docker_version="1.12.1"
-
-# Specify whether to run Docker daemon with SELinux enabled in containers. Default is True.
-# Uncomment below to disable; for example if your kernel does not support the
-# Docker overlay/overlay2 storage drivers with SELinux enabled.
-#openshift_docker_selinux_enabled=False
-
-# Skip upgrading Docker during an OpenShift upgrade, leaves the current Docker version alone.
-# docker_upgrade=False
-
-# Specify exact version of etcd to configure or upgrade to.
-# etcd_version="3.1.0"
-# Enable etcd debug logging, defaults to false
-# etcd_debug=true
-# Set etcd log levels by package
-# etcd_log_package_levels="etcdserver=WARNING,security=DEBUG"
-
-# Upgrade Hooks
-#
-# Hooks are available to run custom tasks at various points during a cluster
-# upgrade. Each hook should point to a file with Ansible tasks defined. Suggest using
-# absolute paths, if not the path will be treated as relative to the file where the
-# hook is actually used.
-#
-# Tasks to run before each master is upgraded.
-# openshift_master_upgrade_pre_hook=/usr/share/custom/pre_master.yml
-#
-# Tasks to run to upgrade the master. These tasks run after the main openshift-ansible
-# upgrade steps, but before we restart system/services.
-# openshift_master_upgrade_hook=/usr/share/custom/master.yml
-#
-# Tasks to run after each master is upgraded and system/services have been restarted.
-# openshift_master_upgrade_post_hook=/usr/share/custom/post_master.yml
-
-
-# Alternate image format string, useful if you've got your own registry mirror
-# Configure this setting just on node or master
-#oreg_url_master=example.com/openshift3/ose-${component}:${version}
-#oreg_url_node=example.com/openshift3/ose-${component}:${version}
-# For setting the configuration globally
-#oreg_url=example.com/openshift3/ose-${component}:${version}
-# If oreg_url points to a registry other than registry.access.redhat.com we can
-# modify image streams to point at that registry by setting the following to true
-#openshift_examples_modify_imagestreams=true
-
-# OpenShift repository configuration
-#openshift_additional_repos=[{'id': 'openshift-origin-copr', 'name': 'OpenShift Origin COPR', 'baseurl': 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/epel-7-$basearch/', 'enabled': 1, 'gpgcheck': 1, 'gpgkey': 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/pubkey.gpg'}]
-#openshift_repos_enable_testing=false
-
-# htpasswd auth
-openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider', 'filename': '/etc/origin/master/htpasswd'}]
-# Defining htpasswd users
-#openshift_master_htpasswd_users={'user1': '<pre-hashed password>', 'user2': '<pre-hashed password>'}
-# or
-#openshift_master_htpasswd_file=<path to local pre-generated htpasswd file>
-
-# Allow all auth
-#openshift_master_identity_providers=[{'name': 'allow_all', 'login': 'true', 'challenge': 'true', 'kind': 'AllowAllPasswordIdentityProvider'}]
-
-# LDAP auth
-#openshift_master_identity_providers=[{'name': 'my_ldap_provider', 'challenge': 'true', 'login': 'true', 'kind': 'LDAPPasswordIdentityProvider', 'attributes': {'id': ['dn'], 'email': ['mail'], 'name': ['cn'], 'preferredUsername': ['uid']}, 'bindDN': '', 'bindPassword': '', 'ca': 'my-ldap-ca.crt', 'insecure': 'false', 'url': 'ldap://ldap.example.com:389/ou=users,dc=example,dc=com?uid'}]
-#
-# Configure LDAP CA certificate
-# Specify either the ASCII contents of the certificate or the path to
-# the local file that will be copied to the remote host. CA
-# certificate contents will be copied to master systems and saved
-# within /etc/origin/master/ with a filename matching the "ca" key set
-# within the LDAPPasswordIdentityProvider.
-#
-#openshift_master_ldap_ca=<ca text>
-# or
-#openshift_master_ldap_ca_file=<path to local ca file to use>
-
-# OpenID auth
-#openshift_master_identity_providers=[{"name": "openid_auth", "login": "true", "challenge": "false", "kind": "OpenIDIdentityProvider", "client_id": "my_client_id", "client_secret": "my_client_secret", "claims": {"id": ["sub"], "preferredUsername": ["preferred_username"], "name": ["name"], "email": ["email"]}, "urls": {"authorize": "https://myidp.example.com/oauth2/authorize", "token": "https://myidp.example.com/oauth2/token"}, "ca": "my-openid-ca-bundle.crt"}]
-#
-# Configure OpenID CA certificate
-# Specify either the ASCII contents of the certificate or the path to
-# the local file that will be copied to the remote host. CA
-# certificate contents will be copied to master systems and saved
-# within /etc/origin/master/ with a filename matching the "ca" key set
-# within the OpenIDIdentityProvider.
-#
-#openshift_master_openid_ca=<ca text>
-# or
-#openshift_master_openid_ca_file=<path to local ca file to use>
-
-# Request header auth
-#openshift_master_identity_providers=[{"name": "my_request_header_provider", "challenge": "true", "login": "true", "kind": "RequestHeaderIdentityProvider", "challengeURL": "https://www.example.com/challenging-proxy/oauth/authorize?${query}", "loginURL": "https://www.example.com/login-proxy/oauth/authorize?${query}", "clientCA": "my-request-header-ca.crt", "clientCommonNames": ["my-auth-proxy"], "headers": ["X-Remote-User", "SSO-User"], "emailHeaders": ["X-Remote-User-Email"], "nameHeaders": ["X-Remote-User-Display-Name"], "preferredUsernameHeaders": ["X-Remote-User-Login"]}]
-#
-# Configure request header CA certificate
-# Specify either the ASCII contents of the certificate or the path to
-# the local file that will be copied to the remote host. CA
-# certificate contents will be copied to master systems and saved
-# within /etc/origin/master/ with a filename matching the "clientCA"
-# key set within the RequestHeaderIdentityProvider.
-#
-#openshift_master_request_header_ca=<ca text>
-# or
-#openshift_master_request_header_ca_file=<path to local ca file to use>
-
-# CloudForms Management Engine (ManageIQ) App Install
-#
-# Enables installation of MIQ server. Recommended for dedicated
-# clusters only. See roles/openshift_cfme/README.md for instructions
-# and requirements.
-#openshift_cfme_install_app=False
-
-# Cloud Provider Configuration
-#
-# Note: You may make use of environment variables rather than store
-# sensitive configuration within the ansible inventory.
-# For example:
-#openshift_cloudprovider_aws_access_key="{{ lookup('env','AWS_ACCESS_KEY_ID') }}"
-#openshift_cloudprovider_aws_secret_key="{{ lookup('env','AWS_SECRET_ACCESS_KEY') }}"
-#
-# AWS
-#openshift_cloudprovider_kind=aws
-# Note: IAM profiles may be used instead of storing API credentials on disk.
-#openshift_cloudprovider_aws_access_key=aws_access_key_id
-#openshift_cloudprovider_aws_secret_key=aws_secret_access_key
-#
-# Openstack
-#openshift_cloudprovider_kind=openstack
-#openshift_cloudprovider_openstack_auth_url=http://openstack.example.com:35357/v2.0/
-#openshift_cloudprovider_openstack_username=username
-#openshift_cloudprovider_openstack_password=password
-#openshift_cloudprovider_openstack_domain_id=domain_id
-#openshift_cloudprovider_openstack_domain_name=domain_name
-#openshift_cloudprovider_openstack_tenant_id=tenant_id
-#openshift_cloudprovider_openstack_tenant_name=tenant_name
-#openshift_cloudprovider_openstack_region=region
-#openshift_cloudprovider_openstack_lb_subnet_id=subnet_id
-#
-# GCE
-#openshift_cloudprovider_kind=gce
-
-# Project Configuration
-#osm_project_request_message=''
-#osm_project_request_template=''
-#osm_mcs_allocator_range='s0:/2'
-#osm_mcs_labels_per_project=5
-#osm_uid_allocator_range='1000000000-1999999999/10000'
-
-# Configure additional projects
-#openshift_additional_projects={'my-project': {'default_node_selector': 'label=value'}}
-
-# Enable cockpit
-#osm_use_cockpit=true
-#
-# Set cockpit plugins
-#osm_cockpit_plugins=['cockpit-kubernetes']
-
-# Native high availability cluster method with optional load balancer.
-# If no lb group is defined, the installer assumes that a load balancer has
-# been preconfigured. For installation the value of
-# openshift_master_cluster_hostname must resolve to the load balancer
-# or to one or all of the masters defined in the inventory if no load
-# balancer is present.
-#openshift_master_cluster_method=native
-#openshift_master_cluster_hostname=openshift-ansible.test.example.com
-#openshift_master_cluster_public_hostname=openshift-ansible.test.example.com
-
-# Pacemaker high availability cluster method.
-# Pacemaker HA environment must be able to self provision the
-# configured VIP. For installation openshift_master_cluster_hostname
-# must resolve to the configured VIP.
-#openshift_master_cluster_method=pacemaker
-#openshift_master_cluster_password=openshift_cluster
-#openshift_master_cluster_vip=192.168.133.25
-#openshift_master_cluster_public_vip=192.168.133.25
-#openshift_master_cluster_hostname=openshift-ansible.test.example.com
-#openshift_master_cluster_public_hostname=openshift-ansible.test.example.com
-
-# Override the default controller lease ttl
-#osm_controller_lease_ttl=30
-
-# Configure controller arguments
-#osm_controller_args={'resource-quota-sync-period': ['10s']}
-
-# Configure api server arguments
-#osm_api_server_args={'max-requests-inflight': ['400']}
-
-# default subdomain to use for exposed routes
-#openshift_master_default_subdomain=apps.test.example.com
-
-# additional cors origins
-#osm_custom_cors_origins=['foo.example.com', 'bar.example.com']
-
-# default project node selector
-#osm_default_node_selector='region=primary'
-
-# Override the default pod eviction timeout
-#openshift_master_pod_eviction_timeout=5m
-
-# Override the default oauth tokenConfig settings:
-# openshift_master_access_token_max_seconds=86400
-# openshift_master_auth_token_max_seconds=500
-
-# Override master servingInfo.maxRequestsInFlight
-#openshift_master_max_requests_inflight=500
-
-# Override master and node servingInfo.minTLSVersion and .cipherSuites
-# valid TLS versions are VersionTLS10, VersionTLS11, VersionTLS12
-# example cipher suites override, valid cipher suites are https://golang.org/pkg/crypto/tls/#pkg-constants
-#openshift_master_min_tls_version=VersionTLS12
-#openshift_master_cipher_suites=['TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256', '...']
-#
-#openshift_node_min_tls_version=VersionTLS12
-#openshift_node_cipher_suites=['TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256', '...']
-
-# default storage plugin dependencies to install, by default the ceph and
-# glusterfs plugin dependencies will be installed, if available.
-#osn_storage_plugin_deps=['ceph','glusterfs','iscsi']
-
-# OpenShift Router Options
-#
-# An OpenShift router will be created during install if there are
-# nodes present with labels matching the default router selector,
-# "region=infra". Set openshift_node_labels per node as needed in
-# order to label nodes.
-#
-# Example:
-# [nodes]
-# node.example.com openshift_node_labels="{'region': 'infra'}"
-#
-# Router selector (optional)
-# Router will only be created if nodes matching this label are present.
-# Default value: 'region=infra'
-#openshift_hosted_router_selector='region=infra'
-#
-# Router replicas (optional)
-# Unless specified, openshift-ansible will calculate the replica count
-# based on the number of nodes matching the openshift router selector.
-#openshift_hosted_router_replicas=2
-#
-# Router force subdomain (optional)
-# A router path format to force on all routes used by this router
-# (will ignore the route host value)
-#openshift_hosted_router_force_subdomain='${name}-${namespace}.apps.example.com'
-#
-# Router certificate (optional)
-# Provide local certificate paths which will be configured as the
-# router's default certificate.
-#openshift_hosted_router_certificate={"certfile": "/path/to/router.crt", "keyfile": "/path/to/router.key", "cafile": "/path/to/router-ca.crt"}
-#
-# Manage the OpenShift Router
-#openshift_hosted_manage_router=true
-#
-# Router sharding support has been added and can be achieved by supplying the correct
-# data to the inventory. The variable to house the data is openshift_hosted_routers
-# and is in the form of a list. If no data is passed then a default router will be
-# created. There are multiple combinations of router sharding. The one described
-# below supports routers on separate nodes.
-#
-#openshift_hosted_routers=[{'name': 'router1', 'certificate': {'certfile': '/path/to/certificate/abc.crt', 'keyfile': '/path/to/certificate/abc.key', 'cafile': '/path/to/certificate/ca.crt'}, 'replicas': 1, 'serviceaccount': 'router', 'namespace': 'default', 'stats_port': 1936, 'edits': [], 'images': 'openshift3/ose-${component}:${version}', 'selector': 'type=router1', 'ports': ['80:80', '443:443']}, {'name': 'router2', 'certificate': {'certfile': '/path/to/certificate/xyz.crt', 'keyfile': '/path/to/certificate/xyz.key', 'cafile': '/path/to/certificate/ca.crt'}, 'replicas': 1, 'serviceaccount': 'router', 'namespace': 'default', 'stats_port': 1936, 'edits': [{'action': 'append', 'key': 'spec.template.spec.containers[0].env', 'value': {'name': 'ROUTE_LABELS', 'value': 'route=external'}}], 'images': 'openshift3/ose-${component}:${version}', 'selector': 'type=router2', 'ports': ['80:80', '443:443']}]
-
-# OpenShift Registry Console Options
-# Override the console image prefix for enterprise deployments, not used in origin
-# default is "registry.access.redhat.com/openshift3/" and the image appended is "registry-console"
-#openshift_cockpit_deployer_prefix=registry.example.com/myrepo/
-# Override image version, defaults to latest for origin, matches the product version for enterprise
-#openshift_cockpit_deployer_version=1.4.1
-
-# Openshift Registry Options
-#
-# An OpenShift registry will be created during install if there are
-# nodes present with labels matching the default registry selector,
-# "region=infra". Set openshift_node_labels per node as needed in
-# order to label nodes.
-#
-# Example:
-# [nodes]
-# node.example.com openshift_node_labels="{'region': 'infra'}"
-#
-# Registry selector (optional)
-# Registry will only be created if nodes matching this label are present.
-# Default value: 'region=infra'
-#openshift_hosted_registry_selector='region=infra'
-#
-# Registry replicas (optional)
-# Unless specified, openshift-ansible will calculate the replica count
-# based on the number of nodes matching the openshift registry selector.
-#openshift_hosted_registry_replicas=2
-#
-# Validity of the auto-generated certificate in days (optional)
-#openshift_hosted_registry_cert_expire_days=730
-#
-# Manage the OpenShift Registry
-#openshift_hosted_manage_registry=true
-
-# Registry Storage Options
-#
-# NFS Host Group
-# An NFS volume will be created with path "nfs_directory/volume_name"
-# on the host within the [nfs] host group. For example, the volume
-# path using these options would be "/exports/registry"
-#openshift_hosted_registry_storage_kind=nfs
-#openshift_hosted_registry_storage_access_modes=['ReadWriteMany']
-#openshift_hosted_registry_storage_nfs_directory=/exports
-#openshift_hosted_registry_storage_nfs_options='*(rw,root_squash)'
-#openshift_hosted_registry_storage_volume_name=registry
-#openshift_hosted_registry_storage_volume_size=10Gi
-#
-# External NFS Host
-# NFS volume must already exist with path "nfs_directory/_volume_name" on
-# the storage_host. For example, the remote volume path using these
-# options would be "nfs.example.com:/exports/registry"
-#openshift_hosted_registry_storage_kind=nfs
-#openshift_hosted_registry_storage_access_modes=['ReadWriteMany']
-#openshift_hosted_registry_storage_host=nfs.example.com
-#openshift_hosted_registry_storage_nfs_directory=/exports
-#openshift_hosted_registry_storage_volume_name=registry
-#openshift_hosted_registry_storage_volume_size=10Gi
-#
-# Openstack
-# Volume must already exist.
-#openshift_hosted_registry_storage_kind=openstack
-#openshift_hosted_registry_storage_access_modes=['ReadWriteOnce']
-#openshift_hosted_registry_storage_openstack_filesystem=ext4
-#openshift_hosted_registry_storage_openstack_volumeID=3a650b4f-c8c5-4e0a-8ca5-eaee11f16c57
-#openshift_hosted_registry_storage_volume_size=10Gi
-#
-# AWS S3
-# S3 bucket must already exist.
-#openshift_hosted_registry_storage_kind=object
-#openshift_hosted_registry_storage_provider=s3
-#openshift_hosted_registry_storage_s3_encrypt=false
-#openshift_hosted_registry_storage_s3_kmskeyid=aws_kms_key_id
-#openshift_hosted_registry_storage_s3_accesskey=aws_access_key_id
-#openshift_hosted_registry_storage_s3_secretkey=aws_secret_access_key
-#openshift_hosted_registry_storage_s3_bucket=bucket_name
-#openshift_hosted_registry_storage_s3_region=bucket_region
-#openshift_hosted_registry_storage_s3_chunksize=26214400
-#openshift_hosted_registry_storage_s3_rootdirectory=/registry
-#openshift_hosted_registry_pullthrough=true
-#openshift_hosted_registry_acceptschema2=true
-#openshift_hosted_registry_enforcequota=true
-#
-# Any S3 service (Minio, ExoScale, ...): Basically the same as above
-# but with regionendpoint configured
-# S3 bucket must already exist.
-#openshift_hosted_registry_storage_kind=object
-#openshift_hosted_registry_storage_provider=s3
-#openshift_hosted_registry_storage_s3_accesskey=access_key_id
-#openshift_hosted_registry_storage_s3_secretkey=secret_access_key
-#openshift_hosted_registry_storage_s3_regionendpoint=https://myendpoint.example.com/
-#openshift_hosted_registry_storage_s3_bucket=bucket_name
-#openshift_hosted_registry_storage_s3_region=bucket_region
-#openshift_hosted_registry_storage_s3_chunksize=26214400
-#openshift_hosted_registry_storage_s3_rootdirectory=/registry
-#openshift_hosted_registry_pullthrough=true
-#openshift_hosted_registry_acceptschema2=true
-#openshift_hosted_registry_enforcequota=true
-#
-# Additional CloudFront Options. When using CloudFront all three
-# of the followingg variables must be defined.
-#openshift_hosted_registry_storage_s3_cloudfront_baseurl=https://myendpoint.cloudfront.net/
-#openshift_hosted_registry_storage_s3_cloudfront_privatekeyfile=/full/path/to/secret.pem
-#openshift_hosted_registry_storage_s3_cloudfront_keypairid=yourpairid
-
-# Metrics deployment
-# See: https://docs.openshift.com/enterprise/latest/install_config/cluster_metrics.html
-#
-# By default metrics are not automatically deployed, set this to enable them
-#openshift_metrics_install_metrics=true
-#
-# Storage Options
-# If openshift_metrics_storage_kind is unset then metrics will be stored
-# in an EmptyDir volume and will be deleted when the cassandra pod terminates.
-# Storage options A & B currently support only one cassandra pod which is
-# generally enough for up to 1000 pods. Additional volumes can be created
-# manually after the fact and metrics scaled per the docs.
-#
-# Option A - NFS Host Group
-# An NFS volume will be created with path "nfs_directory/volume_name"
-# on the host within the [nfs] host group. For example, the volume
-# path using these options would be "/exports/metrics"
-#openshift_metrics_storage_kind=nfs
-#openshift_metrics_storage_access_modes=['ReadWriteOnce']
-#openshift_metrics_storage_nfs_directory=/exports
-#openshift_metrics_storage_nfs_options='*(rw,root_squash)'
-#openshift_metrics_storage_volume_name=metrics
-#openshift_metrics_storage_volume_size=10Gi
-#openshift_metrics_storage_labels={'storage': 'metrics'}
-#
-# Option B - External NFS Host
-# NFS volume must already exist with path "nfs_directory/_volume_name" on
-# the storage_host. For example, the remote volume path using these
-# options would be "nfs.example.com:/exports/metrics"
-#openshift_metrics_storage_kind=nfs
-#openshift_metrics_storage_access_modes=['ReadWriteOnce']
-#openshift_metrics_storage_host=nfs.example.com
-#openshift_metrics_storage_nfs_directory=/exports
-#openshift_metrics_storage_volume_name=metrics
-#openshift_metrics_storage_volume_size=10Gi
-#openshift_metrics_storage_labels={'storage': 'metrics'}
-#
-# Option C - Dynamic -- If openshift supports dynamic volume provisioning for
-# your cloud platform use this.
-#openshift_metrics_storage_kind=dynamic
-#
-# Other Metrics Options -- Common items you may wish to reconfigure, for the complete
-# list of options please see roles/openshift_metrics/README.md
-#
-# Override metricsPublicURL in the master config for cluster metrics
-# Defaults to https://hawkular-metrics.{{openshift_master_default_subdomain}}/hawkular/metrics
-# Currently, you may only alter the hostname portion of the url, alterting the
-# `/hawkular/metrics` path will break installation of metrics.
-#openshift_metrics_hawkular_hostname=https://hawkular-metrics.example.com/hawkular/metrics
-# Configure the prefix and version for the component images
-#openshift_metrics_image_prefix=docker.io/openshift/origin-
-#openshift_metrics_image_version=v3.7.0
-#
-# StorageClass
-# openshift_storageclass_name=gp2
-# openshift_storageclass_parameters={'type': 'gp2', 'encrypted': 'false'}
-#
-
-# Logging deployment
-#
-# Currently logging deployment is disabled by default, enable it by setting this
-#openshift_logging_install_logging=true
-#
-# Logging storage config
-# Option A - NFS Host Group
-# An NFS volume will be created with path "nfs_directory/volume_name"
-# on the host within the [nfs] host group. For example, the volume
-# path using these options would be "/exports/logging"
-#openshift_logging_storage_kind=nfs
-#openshift_logging_storage_access_modes=['ReadWriteOnce']
-#openshift_logging_storage_nfs_directory=/exports
-#openshift_logging_storage_nfs_options='*(rw,root_squash)'
-#openshift_logging_storage_volume_name=logging
-#openshift_logging_storage_volume_size=10Gi
-#openshift_logging_storage_labels={'storage': 'logging'}
-#
-# Option B - External NFS Host
-# NFS volume must already exist with path "nfs_directory/_volume_name" on
-# the storage_host. For example, the remote volume path using these
-# options would be "nfs.example.com:/exports/logging"
-#openshift_logging_storage_kind=nfs
-#openshift_logging_storage_access_modes=['ReadWriteOnce']
-#openshift_logging_storage_host=nfs.example.com
-#openshift_logging_storage_nfs_directory=/exports
-#openshift_logging_storage_volume_name=logging
-#openshift_logging_storage_volume_size=10Gi
-#openshift_logging_storage_labels={'storage': 'logging'}
-#
-# Option C - Dynamic -- If openshift supports dynamic volume provisioning for
-# your cloud platform use this.
-#openshift_logging_storage_kind=dynamic
-#
-# Option D - none -- Logging will use emptydir volumes which are destroyed when
-# pods are deleted
-#
-# Other Logging Options -- Common items you may wish to reconfigure, for the complete
-# list of options please see roles/openshift_logging/README.md
-#
-# Configure loggingPublicURL in the master config for aggregate logging, defaults
-# to kibana.{{ openshift_master_default_subdomain }}
-#openshift_logging_kibana_hostname=logging.apps.example.com
-# Configure the number of elastic search nodes, unless you're using dynamic provisioning
-# this value must be 1
-#openshift_logging_es_cluster_size=1
-# Configure the prefix and version for the component images
-#openshift_logging_image_prefix=docker.io/openshift/origin-
-#openshift_logging_image_version=v3.7.0
-
-# Configure the multi-tenant SDN plugin (default is 'redhat/openshift-ovs-subnet')
-# os_sdn_network_plugin_name='redhat/openshift-ovs-multitenant'
-
-# Disable the OpenShift SDN plugin
-# openshift_use_openshift_sdn=False
-
-# Configure SDN cluster network and kubernetes service CIDR blocks. These
-# network blocks should be private and should not conflict with network blocks
-# in your infrastructure that pods may require access to. Can not be changed
-# after deployment.
-#
-# WARNING : Do not pick subnets that overlap with the default Docker bridge subnet of
-# 172.17.0.0/16. Your installation will fail and/or your configuration change will
-# cause the Pod SDN or Cluster SDN to fail.
-#
-# WORKAROUND : If you must use an overlapping subnet, you can configure a non conflicting
-# docker0 CIDR range by adding '--bip=192.168.2.1/24' to DOCKER_NETWORK_OPTIONS
-# environment variable located in /etc/sysconfig/docker-network.
-# When upgrading or scaling up the following must match whats in your master config!
-# Inventory: master yaml field
-# osm_cluster_network_cidr: clusterNetworkCIDR
-# openshift_portal_net: serviceNetworkCIDR
-# When installing osm_cluster_network_cidr and openshift_portal_net must be set.
-# Sane examples are provided below.
-#osm_cluster_network_cidr=10.128.0.0/14
-#openshift_portal_net=172.30.0.0/16
-
-# ExternalIPNetworkCIDRs controls what values are acceptable for the
-# service external IP field. If empty, no externalIP may be set. It
-# may contain a list of CIDRs which are checked for access. If a CIDR
-# is prefixed with !, IPs in that CIDR will be rejected. Rejections
-# will be applied first, then the IP checked against one of the
-# allowed CIDRs. You should ensure this range does not overlap with
-# your nodes, pods, or service CIDRs for security reasons.
-#openshift_master_external_ip_network_cidrs=['0.0.0.0/0']
-
-# IngressIPNetworkCIDR controls the range to assign ingress IPs from for
-# services of type LoadBalancer on bare metal. If empty, ingress IPs will not
-# be assigned. It may contain a single CIDR that will be allocated from. For
-# security reasons, you should ensure that this range does not overlap with
-# the CIDRs reserved for external IPs, nodes, pods, or services.
-#openshift_master_ingress_ip_network_cidr=172.46.0.0/16
-
-# Configure number of bits to allocate to each host’s subnet e.g. 9
-# would mean a /23 network on the host.
-# When upgrading or scaling up the following must match whats in your master config!
-# Inventory: master yaml field
-# osm_host_subnet_length: hostSubnetLength
-# When installing osm_host_subnet_length must be set. A sane example is provided below.
-#osm_host_subnet_length=9
-
-# Configure master API and console ports.
-#openshift_master_api_port=8443
-#openshift_master_console_port=8443
-
-# set RPM version for debugging purposes
-#openshift_pkg_version=-1.1
-
-# Configure custom ca certificate
-#openshift_master_ca_certificate={'certfile': '/path/to/ca.crt', 'keyfile': '/path/to/ca.key'}
-#
-# NOTE: CA certificate will not be replaced with existing clusters.
-# This option may only be specified when creating a new cluster or
-# when redeploying cluster certificates with the redeploy-certificates
-# playbook.
-
-# Configure custom named certificates (SNI certificates)
-#
-# https://docs.openshift.org/latest/install_config/certificate_customization.html
-#
-# NOTE: openshift_master_named_certificates is cached on masters and is an
-# additive fact, meaning that each run with a different set of certificates
-# will add the newly provided certificates to the cached set of certificates.
-#
-# An optional CA may be specified for each named certificate. CAs will
-# be added to the OpenShift CA bundle which allows for the named
-# certificate to be served for internal cluster communication.
-#
-# If you would like openshift_master_named_certificates to be overwritten with
-# the provided value, specify openshift_master_overwrite_named_certificates.
-#openshift_master_overwrite_named_certificates=true
-#
-# Provide local certificate paths which will be deployed to masters
-#openshift_master_named_certificates=[{"certfile": "/path/to/custom1.crt", "keyfile": "/path/to/custom1.key", "cafile": "/path/to/custom-ca1.crt"}]
-#
-# Detected names may be overridden by specifying the "names" key
-#openshift_master_named_certificates=[{"certfile": "/path/to/custom1.crt", "keyfile": "/path/to/custom1.key", "names": ["public-master-host.com"], "cafile": "/path/to/custom-ca1.crt"}]
-
-# Session options
-#openshift_master_session_name=ssn
-#openshift_master_session_max_seconds=3600
-
-# An authentication and encryption secret will be generated if secrets
-# are not provided. If provided, openshift_master_session_auth_secrets
-# and openshift_master_encryption_secrets must be equal length.
-#
-# Signing secrets, used to authenticate sessions using
-# HMAC. Recommended to use secrets with 32 or 64 bytes.
-#openshift_master_session_auth_secrets=['DONT+USE+THIS+SECRET+b4NV+pmZNSO']
-#
-# Encrypting secrets, used to encrypt sessions. Must be 16, 24, or 32
-# characters long, to select AES-128, AES-192, or AES-256.
-#openshift_master_session_encryption_secrets=['DONT+USE+THIS+SECRET+b4NV+pmZNSO']
-
-# configure how often node iptables rules are refreshed
-#openshift_node_iptables_sync_period=5s
-
-# Configure nodeIP in the node config
-# This is needed in cases where node traffic is desired to go over an
-# interface other than the default network interface.
-#openshift_set_node_ip=True
-
-# Force setting of system hostname when configuring OpenShift
-# This works around issues related to installations that do not have valid dns
-# entries for the interfaces attached to the host.
-#openshift_set_hostname=True
-
-# Configure dnsIP in the node config
-#openshift_dns_ip=172.30.0.1
-
-# Configure node kubelet arguments. pods-per-core is valid in OpenShift Origin 1.3 or OpenShift Container Platform 3.3 and later.
-#openshift_node_kubelet_args={'pods-per-core': ['10'], 'max-pods': ['250'], 'image-gc-high-threshold': ['85'], 'image-gc-low-threshold': ['80']}
-
-# Configure logrotate scripts
-# See: https://github.com/nickhammond/ansible-logrotate
-#logrotate_scripts=[{"name": "syslog", "path": "/var/log/cron\n/var/log/maillog\n/var/log/messages\n/var/log/secure\n/var/log/spooler\n", "options": ["daily", "rotate 7", "compress", "sharedscripts", "missingok"], "scripts": {"postrotate": "/bin/kill -HUP `cat /var/run/syslogd.pid 2> /dev/null` 2> /dev/null || true"}}]
-
-# openshift-ansible will wait indefinitely for your input when it detects that the
-# value of openshift_hostname resolves to an IP address not bound to any local
-# interfaces. This mis-configuration is problematic for any pod leveraging host
-# networking and liveness or readiness probes.
-# Setting this variable to true will override that check.
-#openshift_override_hostname_check=true
-
-# openshift_use_dnsmasq is deprecated. This must be true, or installs will fail
-# in versions >= 3.6
-#openshift_use_dnsmasq=False
-
-# Define an additional dnsmasq.conf file to deploy to /etc/dnsmasq.d/openshift-ansible.conf
-# This is useful for POC environments where DNS may not actually be available yet or to set
-# options like 'strict-order' to alter dnsmasq configuration.
-#openshift_node_dnsmasq_additional_config_file=/home/bob/ose-dnsmasq.conf
-
-# Global Proxy Configuration
-# These options configure HTTP_PROXY, HTTPS_PROXY, and NOPROXY environment
-# variables for docker and master services.
-#
-# Hosts in the openshift_no_proxy list will NOT use any globally
-# configured HTTP(S)_PROXYs. openshift_no_proxy accepts domains
-# (.example.com), and hosts (example.com), and IP addresses.
-#openshift_http_proxy=http://USER:PASSWORD@IPADDR:PORT
-#openshift_https_proxy=https://USER:PASSWORD@IPADDR:PORT
-#openshift_no_proxy='.hosts.example.com,some-host.com'
-#
-# Most environments don't require a proxy between openshift masters, nodes, and
-# etcd hosts. So automatically add those hostnames to the openshift_no_proxy list.
-# If all of your hosts share a common domain you may wish to disable this and
-# specify that domain above instead.
-#
-# For example, having hosts with FQDNs: m1.ex.com, n1.ex.com, and
-# n2.ex.com, one would simply add '.ex.com' to the openshift_no_proxy
-# variable (above) and set this value to False
-#openshift_generate_no_proxy_hosts=True
-#
-# These options configure the BuildDefaults admission controller which injects
-# configuration into Builds. Proxy related values will default to the global proxy
-# config values. You only need to set these if they differ from the global proxy settings.
-# See BuildDefaults documentation at
-# https://docs.openshift.org/latest/admin_guide/build_defaults_overrides.html
-#openshift_builddefaults_http_proxy=http://USER:PASSWORD@HOST:PORT
-#openshift_builddefaults_https_proxy=https://USER:PASSWORD@HOST:PORT
-#openshift_builddefaults_no_proxy=mycorp.com
-#openshift_builddefaults_git_http_proxy=http://USER:PASSWORD@HOST:PORT
-#openshift_builddefaults_git_https_proxy=https://USER:PASSWORD@HOST:PORT
-#openshift_builddefaults_git_no_proxy=mycorp.com
-#openshift_builddefaults_image_labels=[{'name':'imagelabelname1','value':'imagelabelvalue1'}]
-#openshift_builddefaults_nodeselectors={'nodelabel1':'nodelabelvalue1'}
-#openshift_builddefaults_annotations={'annotationkey1':'annotationvalue1'}
-#openshift_builddefaults_resources_requests_cpu=100m
-#openshift_builddefaults_resources_requests_memory=256Mi
-#openshift_builddefaults_resources_limits_cpu=1000m
-#openshift_builddefaults_resources_limits_memory=512Mi
-
-# Or you may optionally define your own build defaults configuration serialized as json
-#openshift_builddefaults_json='{"BuildDefaults":{"configuration":{"apiVersion":"v1","env":[{"name":"HTTP_PROXY","value":"http://proxy.example.com.redhat.com:3128"},{"name":"NO_PROXY","value":"ose3-master.example.com"}],"gitHTTPProxy":"http://proxy.example.com:3128","gitNoProxy":"ose3-master.example.com","kind":"BuildDefaultsConfig"}}}'
-
-# These options configure the BuildOverrides admission controller which injects
-# configuration into Builds.
-# See BuildOverrides documentation at
-# https://docs.openshift.org/latest/admin_guide/build_defaults_overrides.html
-#openshift_buildoverrides_force_pull=true
-#openshift_buildoverrides_image_labels=[{'name':'imagelabelname1','value':'imagelabelvalue1'}]
-#openshift_buildoverrides_nodeselectors={'nodelabel1':'nodelabelvalue1'}
-#openshift_buildoverrides_annotations={'annotationkey1':'annotationvalue1'}
-
-# Or you may optionally define your own build overrides configuration serialized as json
-#openshift_buildoverrides_json='{"BuildOverrides":{"configuration":{"apiVersion":"v1","kind":"BuildDefaultsConfig","forcePull":"true"}}}'
-
-# Enable template service broker by specifying one of more namespaces whose
-# templates will be served by the broker
-#openshift_template_service_broker_namespaces=['openshift']
-
-# masterConfig.volumeConfig.dynamicProvisioningEnabled, configurable as of 1.2/3.2, enabled by default
-#openshift_master_dynamic_provisioning_enabled=False
-
-# Admission plugin config
-#openshift_master_admission_plugin_config={"ProjectRequestLimit":{"configuration":{"apiVersion":"v1","kind":"ProjectRequestLimitConfig","limits":[{"selector":{"admin":"true"}},{"maxProjects":"1"}]}},"PodNodeConstraints":{"configuration":{"apiVersion":"v1","kind":"PodNodeConstraintsConfig"}}}
-
-# Configure usage of openshift_clock role.
-#openshift_clock_enabled=true
-
-# OpenShift Per-Service Environment Variables
-# Environment variables are added to /etc/sysconfig files for
-# each OpenShift service: node, master (api and controllers).
-# API and controllers environment variables are merged in single
-# master environments.
-#openshift_master_api_env_vars={"ENABLE_HTTP2": "true"}
-#openshift_master_controllers_env_vars={"ENABLE_HTTP2": "true"}
-#openshift_node_env_vars={"ENABLE_HTTP2": "true"}
-
-# Enable API service auditing, available as of 1.3
-#openshift_master_audit_config={"enabled": true}
-#
-# In case you want more advanced setup for the auditlog you can
-# use this line.
-# The directory in "auditFilePath" will be created if it's not
-# exist
-#openshift_master_audit_config={"enabled": true, "auditFilePath": "/var/log/openpaas-oscp-audit/openpaas-oscp-audit.log", "maximumFileRetentionDays": 14, "maximumFileSizeMegabytes": 500, "maximumRetainedFiles": 5}
-
-# Enable origin repos that point at Centos PAAS SIG, defaults to true, only used
-# by deployment_type=origin
-#openshift_enable_origin_repo=false
-
-# Validity of the auto-generated OpenShift certificates in days.
-# See also openshift_hosted_registry_cert_expire_days above.
-#
-#openshift_ca_cert_expire_days=1825
-#openshift_node_cert_expire_days=730
-#openshift_master_cert_expire_days=730
-
-# Validity of the auto-generated external etcd certificates in days.
-# Controls validity for etcd CA, peer, server and client certificates.
-#
-#etcd_ca_default_days=1825
-#
-# ServiceAccountConfig:LimitSecretRefences rejects pods that reference secrets their service accounts do not reference
-# openshift_master_saconfig_limitsecretreferences=false
-
-# Upgrade Control
-#
-# By default nodes are upgraded in a serial manner one at a time and all failures
-# are fatal, one set of variables for normal nodes, one set of variables for
-# nodes that are part of control plane as the number of hosts may be different
-# in those two groups.
-#openshift_upgrade_nodes_serial=1
-#openshift_upgrade_nodes_max_fail_percentage=0
-#openshift_upgrade_control_plane_nodes_serial=1
-#openshift_upgrade_control_plane_nodes_max_fail_percentage=0
-#
-# You can specify the number of nodes to upgrade at once. We do not currently
-# attempt to verify that you have capacity to drain this many nodes at once
-# so please be careful when specifying these values. You should also verify that
-# the expected number of nodes are all schedulable and ready before starting an
-# upgrade. If it's not possible to drain the requested nodes the upgrade will
-# stall indefinitely until the drain is successful.
-#
-# If you're upgrading more than one node at a time you can specify the maximum
-# percentage of failure within the batch before the upgrade is aborted. Any
-# nodes that do fail are ignored for the rest of the playbook run and you should
-# take care to investigate the failure and return the node to service so that
-# your cluster.
-#
-# The percentage must exceed the value, this would fail on two failures
-# openshift_upgrade_nodes_serial=4 openshift_upgrade_nodes_max_fail_percentage=49
-# where as this would not
-# openshift_upgrade_nodes_serial=4 openshift_upgrade_nodes_max_fail_percentage=50
-#
-# Multiple data migrations take place and if they fail they will fail the upgrade
-# You may wish to disable these or make them non fatal
-#
-# openshift_upgrade_pre_storage_migration_enabled=true
-# openshift_upgrade_pre_storage_migration_fatal==true
-# openshift_upgrade_post_storage_migration_enabled=true
-# openshift_upgrade_post_storage_migration_fatal==false
-
-# host group for masters
-[masters]
-ose3-master[1:3]-ansible.test.example.com
-
-[etcd]
-ose3-etcd[1:3]-ansible.test.example.com
-
-# NOTE: Containerized load balancer hosts are not yet supported, if using a global
-# containerized=true host variable we must set to false.
-[lb]
-ose3-lb-ansible.test.example.com containerized=false
-
-# NOTE: Currently we require that masters be part of the SDN which requires that they also be nodes
-# However, in order to ensure that your masters are not burdened with running pods you should
-# make them unschedulable by adding openshift_schedulable=False any node that's also a master.
-[nodes]
-ose3-master[1:3]-ansible.test.example.com
-ose3-node[1:2]-ansible.test.example.com openshift_node_labels="{'region': 'primary', 'zone': 'default'}"
diff --git a/openshift-ansible.spec b/openshift-ansible.spec
index 5ca9ac3a9..f1ace9b22 100644
--- a/openshift-ansible.spec
+++ b/openshift-ansible.spec
@@ -10,7 +10,7 @@
Name: openshift-ansible
Version: 3.7.0
-Release: 0.178.0%{?dist}
+Release: 0.187.0%{?dist}
Summary: Openshift and Atomic Enterprise Ansible
License: ASL 2.0
URL: https://github.com/openshift/openshift-ansible
@@ -21,7 +21,12 @@ Requires: ansible >= 2.3
Requires: python2
Requires: python-six
Requires: tar
-Requires: openshift-ansible-docs = %{version}
+Requires: %{name}-docs = %{version}
+Requires: %{name}-playbooks = %{version}
+Requires: %{name}-roles = %{version}
+Requires: %{name}-filter-plugins = %{version}
+Requires: %{name}-lookup-plugins = %{version}
+Requires: %{name}-callback-plugins = %{version}
Requires: java-1.8.0-openjdk-headless
Requires: httpd-tools
Requires: libselinux-python
@@ -280,6 +285,69 @@ Atomic OpenShift Utilities includes
%changelog
+* Mon Oct 30 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.187.0
+-
+
+* Sun Oct 29 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.186.0
+-
+
+* Sat Oct 28 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.185.0
+- bug 1506073. Lower cpu request for logging when it exceeds limit
+ (jcantril@redhat.com)
+- Update the name of the service-catalog binary (staebler@redhat.com)
+- disk_availability check: include submount storage (lmeyer@redhat.com)
+
+* Fri Oct 27 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.184.0
+- cri-o: Set max log size to 50 mb (mrunalp@gmail.com)
+- cri-o: open port 10010 (gscrivan@redhat.com)
+- bug 1435144. Remove uneeded upgrade in openshift_logging role
+ (jcantril@redhat.com)
+- Remove inadvertently committed inventory file (rteague@redhat.com)
+- crio: restorcon /var/lib/containers (smilner@redhat.com)
+- Correct openshift_release regular expression (rteague@redhat.com)
+- crio: Add failed_when to overlay check (smilner@redhat.com)
+- docker: set credentials when using system container (gscrivan@redhat.com)
+- Change dnsmasq to bind-interfaces + except-interfaces (mgugino@redhat.com)
+- Fix CA Bundle passed to service-catalog broker for ansible-service-broker
+ (staebler@redhat.com)
+- Renaming csr to bootstrap for consistency. (kwoodson@redhat.com)
+- Add master config upgrade hook to upgrade-all plays (mgugino@redhat.com)
+- Remove 'Not Started' status from playbook checkpoint (rteague@redhat.com)
+- Force include_role to static for loading openshift_facts module
+ (rteague@redhat.com)
+- Make openshift-ansible depend on all subpackages (sdodson@redhat.com)
+- Refactor health check playbooks (rteague@redhat.com)
+
+* Fri Oct 27 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.183.0
+-
+
+* Thu Oct 26 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.182.0
+- Fixing documentation for the cert_key_path variable name.
+ (kwoodson@redhat.com)
+- Moving removal of unwanted artifacts to image_prep. (kwoodson@redhat.com)
+- Ensure journald persistence directories exist (mgugino@redhat.com)
+- Fix lint (tbielawa@redhat.com)
+- Move add_many_container_providers.yml to playbooks/byo/openshift-management
+ with a noop task include to load filter plugins. (abutcher@redhat.com)
+- Refactor adding multiple container providers (tbielawa@redhat.com)
+- Management Cleanup and Provider Integration (tbielawa@redhat.com)
+
+* Thu Oct 26 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.181.0
+- Fix loop_var warnings during logging install (mgugino@redhat.com)
+- Fix typo and add detailed comments in kuryr (sngchlko@gmail.com)
+
+* Thu Oct 26 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.179.0
+- Remove pause from master service startup (rteague@redhat.com)
+- Change default in prometheus storage type to emptydir (zgalor@redhat.com)
+- Bug 1491636 - honor node selectors (jwozniak@redhat.com)
+- Sync latest imagestreams and templates (sdodson@redhat.com)
+- Remove base package install (mgugino@redhat.com)
+- etcd: remove hacks for the system container (gscrivan@redhat.com)
+- Ensure deployment_subtype is set within openshift_sanitize_inventory.
+ (abutcher@redhat.com)
+- Add installer checkpoint for prometheus (zgalor@redhat.com)
+- Remove unused registry_volume_claim variable (hansmi@vshn.ch)
+
* Wed Oct 25 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.178.0
- Split prometheus image defaults to prefix and version (zgalor@redhat.com)
- Remove extraneous spaces that yamllint dislikes (staebler@redhat.com)
diff --git a/playbooks/aws/README.md b/playbooks/aws/README.md
index fbab61189..4e5c1017b 100644
--- a/playbooks/aws/README.md
+++ b/playbooks/aws/README.md
@@ -66,7 +66,7 @@ openshift_pkg_version: # example: -3.7.0
openshift_aws_ssh_key_name: # example: myuser_key
openshift_aws_base_ami: # example: ami-12345678
openshift_aws_iam_cert_path: # example: '/path/to/wildcard.<clusterid>.example.com.crt'
-openshift_aws_iam_key_path: # example: '/path/to/wildcard.<clusterid>.example.com.key'
+openshift_aws_iam_cert_key_path: # example: '/path/to/wildcard.<clusterid>.example.com.key'
```
If customization is required for the instances, scale groups, or any other configurable option please see the ['openshift_aws/defaults/main.yml'](../../roles/openshift_aws/defaults/main.yml) for variables and overrides. These overrides can be placed in the `provisioning_vars.yml`, `inventory`, or `group_vars`.
diff --git a/playbooks/aws/provisioning_vars.yml.example b/playbooks/aws/provisioning_vars.yml.example
index aa91363ae..1491fb868 100644
--- a/playbooks/aws/provisioning_vars.yml.example
+++ b/playbooks/aws/provisioning_vars.yml.example
@@ -116,5 +116,5 @@ openshift_aws_base_ami: # ami-12345678
# custom certificates are required for the ELB
openshift_aws_iam_cert_path: # '/path/to/wildcard.<clusterid>.example.com.crt'
-openshift_aws_iam_key_path: # '/path/to/wildcard.<clusterid>.example.com.key'
-#openshift_aws_iam_cert_chain_path: '/path/to/cert.ca.crt'
+openshift_aws_iam_cert_key_path: # '/path/to/wildcard.<clusterid>.example.com.key'
+openshift_aws_iam_cert_chain_path: # '/path/to/cert.ca.crt'
diff --git a/playbooks/byo/openshift-cluster/config.yml b/playbooks/byo/openshift-cluster/config.yml
index 60fa44c5b..f2e52782b 100644
--- a/playbooks/byo/openshift-cluster/config.yml
+++ b/playbooks/byo/openshift-cluster/config.yml
@@ -8,5 +8,3 @@
- always
- include: ../../common/openshift-cluster/config.yml
- vars:
- openshift_deployment_subtype: "{{ deployment_subtype | default(none) }}"
diff --git a/playbooks/byo/openshift-management/add_container_provider.yml b/playbooks/byo/openshift-management/add_container_provider.yml
new file mode 100644
index 000000000..3378b5abd
--- /dev/null
+++ b/playbooks/byo/openshift-management/add_container_provider.yml
@@ -0,0 +1,6 @@
+---
+- include: ../openshift-cluster/initialize_groups.yml
+
+- include: ../../common/openshift-cluster/evaluate_groups.yml
+
+- include: ../../common/openshift-management/add_container_provider.yml
diff --git a/playbooks/byo/openshift-management/add_many_container_providers.yml b/playbooks/byo/openshift-management/add_many_container_providers.yml
new file mode 100644
index 000000000..62fdb11c5
--- /dev/null
+++ b/playbooks/byo/openshift-management/add_many_container_providers.yml
@@ -0,0 +1,36 @@
+---
+- hosts: localhost
+ tasks:
+ - name: Ensure the container provider configuration is defined
+ assert:
+ that: container_providers_config is defined
+ msg: |
+ Error: Must provide providers config path. Fix: Add '-e container_providers_config=/path/to/your/config' to the ansible-playbook command
+
+ - name: Include providers/management configuration
+ include_vars:
+ file: "{{ container_providers_config }}"
+
+ - name: Ensure this cluster is a container provider
+ uri:
+ url: "https://{{ management_server['hostname'] }}/api/providers"
+ body_format: json
+ method: POST
+ user: "{{ management_server['user'] }}"
+ password: "{{ management_server['password'] }}"
+ validate_certs: no
+ # Docs on formatting the BODY of the POST request:
+ # http://manageiq.org/docs/reference/latest/api/reference/providers.html#specifying-connection-configurations
+ body: "{{ item }}"
+ failed_when: false
+ with_items: "{{ container_providers }}"
+ register: results
+
+ # Include openshift_management for access to filter_plugins.
+ - include_role:
+ name: openshift_management
+ tasks_from: noop
+
+ - name: print each result
+ debug:
+ msg: "{{ results.results | oo_filter_container_providers }}"
diff --git a/playbooks/byo/openshift-management/roles b/playbooks/byo/openshift-management/roles
new file mode 120000
index 000000000..20c4c58cf
--- /dev/null
+++ b/playbooks/byo/openshift-management/roles
@@ -0,0 +1 @@
+../../../roles \ No newline at end of file
diff --git a/playbooks/byo/openshift-management/uninstall.yml b/playbooks/byo/openshift-management/uninstall.yml
index a1fb1cdc4..e95c1c88a 100644
--- a/playbooks/byo/openshift-management/uninstall.yml
+++ b/playbooks/byo/openshift-management/uninstall.yml
@@ -1,4 +1,2 @@
---
-# - include: ../openshift-cluster/initialize_groups.yml
-
- include: ../../common/openshift-management/uninstall.yml
diff --git a/playbooks/common/openshift-checks/adhoc.yml b/playbooks/common/openshift-checks/adhoc.yml
index dfcef8435..d0deaeb65 100644
--- a/playbooks/common/openshift-checks/adhoc.yml
+++ b/playbooks/common/openshift-checks/adhoc.yml
@@ -1,12 +1,13 @@
---
-- name: OpenShift health checks
+- name: OpenShift Health Checks
hosts: oo_all_hosts
+
roles:
- openshift_health_checker
vars:
- r_openshift_health_checker_playbook_context: adhoc
post_tasks:
- - name: Run health checks
+ - name: Run health checks (adhoc)
action: openshift_health_check
args:
checks: '{{ openshift_checks | default([]) }}'
diff --git a/playbooks/common/openshift-checks/health.yml b/playbooks/common/openshift-checks/health.yml
index 21ea785ef..d0921b9d3 100644
--- a/playbooks/common/openshift-checks/health.yml
+++ b/playbooks/common/openshift-checks/health.yml
@@ -1,11 +1,13 @@
---
-- name: Run OpenShift health checks
+- name: OpenShift Health Checks
hosts: oo_all_hosts
+
roles:
- openshift_health_checker
vars:
- r_openshift_health_checker_playbook_context: health
post_tasks:
- - action: openshift_health_check
+ - name: Run health checks (@health)
+ action: openshift_health_check
args:
checks: ['@health']
diff --git a/playbooks/common/openshift-checks/install.yml b/playbooks/common/openshift-checks/install.yml
new file mode 100644
index 000000000..6701a2e15
--- /dev/null
+++ b/playbooks/common/openshift-checks/install.yml
@@ -0,0 +1,47 @@
+---
+- name: Health Check Checkpoint Start
+ hosts: oo_all_hosts
+ gather_facts: false
+ tasks:
+ - name: Set Health Check 'In Progress'
+ set_stats:
+ data:
+ installer_phase_health: "In Progress"
+ aggregate: false
+
+- name: OpenShift Health Checks
+ hosts: oo_all_hosts
+ any_errors_fatal: true
+ roles:
+ - openshift_health_checker
+ vars:
+ - r_openshift_health_checker_playbook_context: install
+ post_tasks:
+ - name: Run health checks (install) - EL
+ when: ansible_distribution != "Fedora"
+ action: openshift_health_check
+ args:
+ checks:
+ - disk_availability
+ - memory_availability
+ - package_availability
+ - package_version
+ - docker_image_availability
+ - docker_storage
+
+ - name: Run health checks (install) - Fedora
+ when: ansible_distribution == "Fedora"
+ action: openshift_health_check
+ args:
+ checks:
+ - docker_image_availability
+
+- name: Health Check Checkpoint End
+ hosts: oo_all_hosts
+ gather_facts: false
+ tasks:
+ - name: Set Health Check 'Complete'
+ set_stats:
+ data:
+ installer_phase_health: "Complete"
+ aggregate: false
diff --git a/playbooks/common/openshift-checks/pre-install.yml b/playbooks/common/openshift-checks/pre-install.yml
index 88e6f9120..32449d4e4 100644
--- a/playbooks/common/openshift-checks/pre-install.yml
+++ b/playbooks/common/openshift-checks/pre-install.yml
@@ -1,11 +1,13 @@
---
-- name: run OpenShift pre-install checks
+- name: OpenShift Health Checks
hosts: oo_all_hosts
+
roles:
- openshift_health_checker
vars:
- r_openshift_health_checker_playbook_context: pre-install
post_tasks:
- - action: openshift_health_check
+ - name: Run health checks (@preflight)
+ action: openshift_health_check
args:
checks: ['@preflight']
diff --git a/playbooks/common/openshift-cluster/config.yml b/playbooks/common/openshift-cluster/config.yml
index 395eb51f1..244787985 100644
--- a/playbooks/common/openshift-cluster/config.yml
+++ b/playbooks/common/openshift-cluster/config.yml
@@ -1,31 +1,5 @@
---
-# TODO: refactor this into its own include
-# and pass a variable for ctx
-- name: Verify Requirements
- hosts: oo_all_hosts
- roles:
- - openshift_health_checker
- vars:
- - r_openshift_health_checker_playbook_context: install
- post_tasks:
-
- - name: Verify Requirements - EL
- when: ansible_distribution != "Fedora"
- action: openshift_health_check
- args:
- checks:
- - disk_availability
- - memory_availability
- - package_availability
- - package_version
- - docker_image_availability
- - docker_storage
- - name: Verify Requirements - Fedora
- when: ansible_distribution == "Fedora"
- action: openshift_health_check
- args:
- checks:
- - docker_image_availability
+- include: ../openshift-checks/install.yml
- include: ../openshift-etcd/config.yml
diff --git a/playbooks/common/openshift-cluster/initialize_facts.yml b/playbooks/common/openshift-cluster/initialize_facts.yml
index 0f563adb7..91223d368 100644
--- a/playbooks/common/openshift-cluster/initialize_facts.yml
+++ b/playbooks/common/openshift-cluster/initialize_facts.yml
@@ -10,6 +10,7 @@
- name: load openshift_facts module
include_role:
name: openshift_facts
+ static: yes
# TODO: Should this role be refactored into health_checks??
- name: Run openshift_sanitize_inventory to set variables
diff --git a/playbooks/common/openshift-cluster/initialize_openshift_version.yml b/playbooks/common/openshift-cluster/initialize_openshift_version.yml
index e6400ea61..37a5284d5 100644
--- a/playbooks/common/openshift-cluster/initialize_openshift_version.yml
+++ b/playbooks/common/openshift-cluster/initialize_openshift_version.yml
@@ -1,15 +1,4 @@
---
-# openshift_install_base_package_group may be set in a play variable to limit
-# the host groups the base package is installed on. This is currently used
-# for master/control-plane upgrades.
-- name: Set version_install_base_package true on masters and nodes
- hosts: "{{ openshift_install_base_package_group | default('oo_masters_to_config:oo_nodes_to_config') }}"
- tasks:
- - name: Set version_install_base_package true
- set_fact:
- version_install_base_package: True
- when: version_install_base_package is not defined
-
# NOTE: requires openshift_facts be run
- name: Determine openshift_version to configure on first master
hosts: oo_first_master
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml
index f64f0e003..54c85f0fb 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml
@@ -68,7 +68,6 @@
# defined, and overriding the normal behavior of protecting the installed version
openshift_release: "{{ openshift_upgrade_target }}"
openshift_protect_installed_version: False
- openshift_install_base_package_group: "oo_masters_to_config"
# We skip the docker role at this point in upgrade to prevent
# unintended package, container, or config upgrades which trigger
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml
index 43da5b629..d7cb38d03 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml
@@ -68,7 +68,6 @@
# defined, and overriding the normal behavior of protecting the installed version
openshift_release: "{{ openshift_upgrade_target }}"
openshift_protect_installed_version: False
- openshift_install_base_package_group: "oo_masters_to_config"
# We skip the docker role at this point in upgrade to prevent
# unintended package, container, or config upgrades which trigger
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml
index 30e719d8f..bda245fe1 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml
@@ -112,6 +112,8 @@
- include: ../cleanup_unused_images.yml
- include: ../upgrade_control_plane.yml
+ vars:
+ master_config_hook: "v3_5/master_config_upgrade.yml"
- include: ../upgrade_nodes.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml
index e9cec9220..6cdea7b84 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml
@@ -72,7 +72,6 @@
# defined, and overriding the normal behavior of protecting the installed version
openshift_release: "{{ openshift_upgrade_target }}"
openshift_protect_installed_version: False
- openshift_install_base_package_group: "oo_masters_to_config"
# We skip the docker role at this point in upgrade to prevent
# unintended package, container, or config upgrades which trigger
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml
index 920dc2ffc..dd109cfa9 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml
@@ -116,6 +116,8 @@
- include: ../cleanup_unused_images.yml
- include: ../upgrade_control_plane.yml
+ vars:
+ master_config_hook: "v3_6/master_config_upgrade.yml"
- include: ../upgrade_nodes.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
index 27d8515dc..8ab68002d 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
@@ -76,7 +76,6 @@
# defined, and overriding the normal behavior of protecting the installed version
openshift_release: "{{ openshift_upgrade_target }}"
openshift_protect_installed_version: False
- openshift_install_base_package_group: "oo_masters_to_config"
# We skip the docker role at this point in upgrade to prevent
# unintended package, container, or config upgrades which trigger
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml
index 81f6dc8a4..f4862e321 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml
@@ -119,9 +119,9 @@
tasks:
- include: ../cleanup_unused_images.yml
-#TODO: Why doesn't this compose using ./upgrade_control_plane rather than
-# ../upgrade_control_plane?
- include: ../upgrade_control_plane.yml
+ vars:
+ master_config_hook: "v3_7/master_config_upgrade.yml"
# All controllers must be stopped at the same time then restarted
- name: Cycle all controller services to force new leader election mode
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml
index b91bea617..d5a8379d7 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml
@@ -80,7 +80,6 @@
# defined, and overriding the normal behavior of protecting the installed version
openshift_release: "{{ openshift_upgrade_target }}"
openshift_protect_installed_version: False
- openshift_install_base_package_group: "oo_masters_to_config"
# We skip the docker role at this point in upgrade to prevent
# unintended package, container, or config upgrades which trigger
diff --git a/playbooks/common/openshift-glusterfs/config.yml b/playbooks/common/openshift-glusterfs/config.yml
index 80cda9e21..c2ae5f313 100644
--- a/playbooks/common/openshift-glusterfs/config.yml
+++ b/playbooks/common/openshift-glusterfs/config.yml
@@ -17,6 +17,11 @@
tasks_from: firewall.yml
when:
- openshift_storage_glusterfs_is_native | default(True) | bool
+ - include_role:
+ name: openshift_storage_glusterfs
+ tasks_from: kernel_modules.yml
+ when:
+ - openshift_storage_glusterfs_is_native | default(True) | bool
- name: Open firewall ports for GlusterFS registry nodes
hosts: glusterfs_registry
@@ -26,6 +31,11 @@
tasks_from: firewall.yml
when:
- openshift_storage_glusterfs_registry_is_native | default(True) | bool
+ - include_role:
+ name: openshift_storage_glusterfs
+ tasks_from: kernel_modules.yml
+ when:
+ - openshift_storage_glusterfs_registry_is_native | default(True) | bool
- name: Configure GlusterFS
hosts: oo_first_master
diff --git a/playbooks/common/openshift-management/add_container_provider.yml b/playbooks/common/openshift-management/add_container_provider.yml
new file mode 100644
index 000000000..facb3a5b9
--- /dev/null
+++ b/playbooks/common/openshift-management/add_container_provider.yml
@@ -0,0 +1,8 @@
+---
+- name: Add Container Provider to Management
+ hosts: oo_first_master
+ tasks:
+ - name: Run the Management Integration Tasks
+ include_role:
+ name: openshift_management
+ tasks_from: add_container_provider
diff --git a/playbooks/common/openshift-management/uninstall.yml b/playbooks/common/openshift-management/uninstall.yml
index 698d93405..9f35cc276 100644
--- a/playbooks/common/openshift-management/uninstall.yml
+++ b/playbooks/common/openshift-management/uninstall.yml
@@ -1,6 +1,6 @@
---
- name: Uninstall CFME
- hosts: masters
+ hosts: masters[0]
tasks:
- name: Run the CFME Uninstall Role Tasks
include_role:
diff --git a/playbooks/common/openshift-node/clean_image.yml b/playbooks/common/openshift-node/clean_image.yml
new file mode 100644
index 000000000..38753d0af
--- /dev/null
+++ b/playbooks/common/openshift-node/clean_image.yml
@@ -0,0 +1,10 @@
+---
+- name: Configure nodes
+ hosts: oo_nodes_to_config:!oo_containerized_master_nodes
+ tasks:
+ - name: Remove any ansible facts created during AMI creation
+ file:
+ path: "/etc/ansible/facts.d/{{ item }}"
+ state: absent
+ with_items:
+ - openshift.fact
diff --git a/playbooks/common/openshift-node/image_prep.yml b/playbooks/common/openshift-node/image_prep.yml
index 00d167c22..30651a1df 100644
--- a/playbooks/common/openshift-node/image_prep.yml
+++ b/playbooks/common/openshift-node/image_prep.yml
@@ -19,3 +19,6 @@
- name: Re-enable excluders
include: enable_excluders.yml
+
+- name: Remove any undesired artifacts from build
+ include: clean_image.yml
diff --git a/roles/ansible_service_broker/tasks/install.yml b/roles/ansible_service_broker/tasks/install.yml
index 66c3d9cc4..89a84c4df 100644
--- a/roles/ansible_service_broker/tasks/install.yml
+++ b/roles/ansible_service_broker/tasks/install.yml
@@ -123,7 +123,7 @@
register: asb_client_secret
- set_fact:
- service_ca_crt: asb_client_secret.results.results.0.data['service-ca.crt']
+ service_ca_crt: "{{ asb_client_secret.results.results.0.data['service-ca.crt'] }}"
# Using oc_obj because oc_service doesn't seem to allow annotations
# TODO: Extend oc_service to allow annotations
diff --git a/roles/docker/defaults/main.yml b/roles/docker/defaults/main.yml
index 1c830cb4e..fe938e52b 100644
--- a/roles/docker/defaults/main.yml
+++ b/roles/docker/defaults/main.yml
@@ -21,3 +21,14 @@ l2_docker_blocked_registries: "{% if openshift_docker_blocked_registries is stri
l2_docker_insecure_registries: "{% if openshift_docker_insecure_registries is string %}{% if openshift_docker_insecure_registries == '' %}[]{% elif ',' in openshift_docker_insecure_registries %}{{ openshift_docker_insecure_registries.split(',') | list }}{% else %}{{ [ openshift_docker_insecure_registries ] }}{% endif %}{% else %}{{ openshift_docker_insecure_registries }}{% endif %}"
containers_registries_conf_path: /etc/containers/registries.conf
+
+r_crio_firewall_enabled: "{{ os_firewall_enabled | default(True) }}"
+r_crio_use_firewalld: "{{ os_firewall_use_firewalld | default(False) }}"
+
+r_crio_os_firewall_deny: []
+r_crio_os_firewall_allow:
+- service: crio
+ port: 10010/tcp
+
+
+openshift_docker_is_node_or_master: "{{ True if inventory_hostname in (groups['oo_masters_to_config']|default([])) or inventory_hostname in (groups['oo_nodes_to_config']|default([])) else False | bool }}"
diff --git a/roles/docker/meta/main.yml b/roles/docker/meta/main.yml
index b773a417c..62b8a2eb5 100644
--- a/roles/docker/meta/main.yml
+++ b/roles/docker/meta/main.yml
@@ -11,3 +11,4 @@ galaxy_info:
- 7
dependencies:
- role: lib_openshift
+- role: lib_os_firewall
diff --git a/roles/docker/tasks/crio_firewall.yml b/roles/docker/tasks/crio_firewall.yml
new file mode 100644
index 000000000..fbd1ff515
--- /dev/null
+++ b/roles/docker/tasks/crio_firewall.yml
@@ -0,0 +1,40 @@
+---
+- when: r_crio_firewall_enabled | bool and not r_crio_use_firewalld | bool
+ block:
+ - name: Add iptables allow rules
+ os_firewall_manage_iptables:
+ name: "{{ item.service }}"
+ action: add
+ protocol: "{{ item.port.split('/')[1] }}"
+ port: "{{ item.port.split('/')[0] }}"
+ when: item.cond | default(True)
+ with_items: "{{ r_crio_os_firewall_allow }}"
+
+ - name: Remove iptables rules
+ os_firewall_manage_iptables:
+ name: "{{ item.service }}"
+ action: remove
+ protocol: "{{ item.port.split('/')[1] }}"
+ port: "{{ item.port.split('/')[0] }}"
+ when: item.cond | default(True)
+ with_items: "{{ r_crio_os_firewall_deny }}"
+
+- when: r_crio_firewall_enabled | bool and r_crio_use_firewalld | bool
+ block:
+ - name: Add firewalld allow rules
+ firewalld:
+ port: "{{ item.port }}"
+ permanent: true
+ immediate: true
+ state: enabled
+ when: item.cond | default(True)
+ with_items: "{{ r_crio_os_firewall_allow }}"
+
+ - name: Remove firewalld allow rules
+ firewalld:
+ port: "{{ item.port }}"
+ permanent: true
+ immediate: true
+ state: disabled
+ when: item.cond | default(True)
+ with_items: "{{ r_crio_os_firewall_deny }}"
diff --git a/roles/docker/tasks/main.yml b/roles/docker/tasks/main.yml
index 5ea73568a..1539af53f 100644
--- a/roles/docker/tasks/main.yml
+++ b/roles/docker/tasks/main.yml
@@ -35,4 +35,4 @@
include: systemcontainer_crio.yml
when:
- l_use_crio
- - inventory_hostname in groups['oo_masters_to_config'] or inventory_hostname in groups['oo_nodes_to_config']
+ - openshift_docker_is_node_or_master | bool
diff --git a/roles/docker/tasks/systemcontainer_crio.yml b/roles/docker/tasks/systemcontainer_crio.yml
index 13bbd359e..67ede0d21 100644
--- a/roles/docker/tasks/systemcontainer_crio.yml
+++ b/roles/docker/tasks/systemcontainer_crio.yml
@@ -3,16 +3,16 @@
# TODO: Much of this file is shared with container engine tasks
- set_fact:
l_insecure_crio_registries: "{{ '\"{}\"'.format('\", \"'.join(l2_docker_insecure_registries)) }}"
- when: l2_docker_insecure_registries
+ when: l2_docker_insecure_registries | bool
- set_fact:
l_crio_registries: "{{ l2_docker_additional_registries + ['docker.io'] }}"
- when: l2_docker_additional_registries
+ when: l2_docker_additional_registries | bool
- set_fact:
l_crio_registries: "{{ ['docker.io'] }}"
- when: not l2_docker_additional_registries
+ when: not (l2_docker_additional_registries | bool)
- set_fact:
l_additional_crio_registries: "{{ '\"{}\"'.format('\", \"'.join(l_crio_registries)) }}"
- when: l2_docker_additional_registries
+ when: l2_docker_additional_registries | bool
- set_fact:
l_openshift_image_tag: "{{ openshift_image_tag | string }}"
@@ -62,7 +62,7 @@
shell: lsmod | grep overlay
register: l_has_overlay_in_kernel
ignore_errors: yes
-
+ failed_when: false
- when: l_has_overlay_in_kernel.rc != 0
block:
@@ -161,11 +161,19 @@
path: /etc/cni/net.d/
state: directory
+- name: setup firewall for CRI-O
+ include: crio_firewall.yml
+ static: yes
+
- name: Configure the CNI network
template:
dest: /etc/cni/net.d/openshift-sdn.conf
src: 80-openshift-sdn.conf.j2
+- name: Fix SELinux Permissions on /var/lib/containers
+ command: "restorecon -R /var/lib/containers/"
+ changed_when: false
+
- name: Start the CRI-O service
systemd:
name: "cri-o"
diff --git a/roles/docker/tasks/systemcontainer_docker.yml b/roles/docker/tasks/systemcontainer_docker.yml
index 726e8ada7..aa3b35ddd 100644
--- a/roles/docker/tasks/systemcontainer_docker.yml
+++ b/roles/docker/tasks/systemcontainer_docker.yml
@@ -173,4 +173,6 @@
- set_fact:
docker_service_status_changed: "{{ r_docker_systemcontainer_docker_start_result | changed }}"
+- include: registry_auth.yml
+
- meta: flush_handlers
diff --git a/roles/docker/templates/crio.conf.j2 b/roles/docker/templates/crio.conf.j2
index b715c2ffa..93014a80d 100644
--- a/roles/docker/templates/crio.conf.j2
+++ b/roles/docker/templates/crio.conf.j2
@@ -108,7 +108,7 @@ pids_limit = 1024
# log_size_max is the max limit for the container log size in bytes.
# Negative values indicate that no limit is imposed.
-log_size_max = -1
+log_size_max = 52428800
# The "crio.image" table contains settings pertaining to the
# management of OCI images.
diff --git a/roles/etcd/defaults/main.yaml b/roles/etcd/defaults/main.yaml
index 78f231416..4b734d4ed 100644
--- a/roles/etcd/defaults/main.yaml
+++ b/roles/etcd/defaults/main.yaml
@@ -13,8 +13,6 @@ r_etcd_common_etcdctl_command: "{{ 'etcdctl' if r_etcd_common_etcd_runtime == 'h
# etcd server vars
etcd_conf_dir: '/etc/etcd'
-r_etcd_common_system_container_host_dir: /var/lib/etcd/etcd.etcd
-etcd_system_container_conf_dir: /var/lib/etcd/etc
etcd_conf_file: "{{ etcd_conf_dir }}/etcd.conf"
etcd_ca_file: "{{ etcd_conf_dir }}/ca.crt"
etcd_cert_file: "{{ etcd_conf_dir }}/server.crt"
@@ -54,7 +52,7 @@ etcd_is_containerized: False
etcd_is_thirdparty: False
# etcd dir vars
-etcd_data_dir: "{{ '/var/lib/origin/openshift.local.etcd' if r_etcd_common_embedded_etcd | bool else '/var/lib/etcd/' if r_etcd_common_etcd_runtime != 'runc' else '/var/lib/etcd/etcd.etcd/' }}"
+etcd_data_dir: "{{ '/var/lib/origin/openshift.local.etcd' if r_etcd_common_embedded_etcd | bool else '/var/lib/etcd/' }}"
# etcd ports and protocols
etcd_client_port: 2379
diff --git a/roles/etcd/tasks/backup.force_new_cluster.yml b/roles/etcd/tasks/backup.force_new_cluster.yml
index 24bd0540d..d2e866416 100644
--- a/roles/etcd/tasks/backup.force_new_cluster.yml
+++ b/roles/etcd/tasks/backup.force_new_cluster.yml
@@ -3,10 +3,10 @@
- name: Move content of etcd backup under the etcd data directory
command: >
- mv "{{ l_etcd_backup_dir }}/member" "{{ l_etcd_data_dir }}"
+ mv "{{ l_etcd_backup_dir }}/member" "{{ etcd_data_dir }}"
- name: Set etcd group for the etcd data directory
command: >
- chown -R etcd:etcd "{{ l_etcd_data_dir }}"
+ chown -R etcd:etcd "{{ etcd_data_dir }}"
- include: auxiliary/force_new_cluster.yml
diff --git a/roles/etcd/tasks/backup/backup.yml b/roles/etcd/tasks/backup/backup.yml
index ec1a1989c..ca0d29155 100644
--- a/roles/etcd/tasks/backup/backup.yml
+++ b/roles/etcd/tasks/backup/backup.yml
@@ -3,7 +3,7 @@
# TODO: replace shell module with command and update later checks
- name: Check available disk space for etcd backup
- shell: df --output=avail -k {{ l_etcd_data_dir }} | tail -n 1
+ shell: df --output=avail -k {{ etcd_data_dir }} | tail -n 1
register: l_avail_disk
# AUDIT:changed_when: `false` because we are only inspecting
# state, not manipulating anything
@@ -11,7 +11,7 @@
# TODO: replace shell module with command and update later checks
- name: Check current etcd disk usage
- shell: du --exclude='*openshift-backup*' -k {{ l_etcd_data_dir }} | tail -n 1 | cut -f1
+ shell: du --exclude='*openshift-backup*' -k {{ etcd_data_dir }} | tail -n 1 | cut -f1
register: l_etcd_disk_usage
# AUDIT:changed_when: `false` because we are only inspecting
# state, not manipulating anything
@@ -44,17 +44,17 @@
- r_etcd_common_embedded_etcd | bool
- not l_ostree_booted.stat.exists | bool
-- name: Check selinux label of '{{ l_etcd_data_dir }}'
+- name: Check selinux label of '{{ etcd_data_dir }}'
command: >
- stat -c '%C' {{ l_etcd_data_dir }}
+ stat -c '%C' {{ etcd_data_dir }}
register: l_etcd_selinux_labels
- debug:
msg: "{{ l_etcd_selinux_labels }}"
-- name: Make sure the '{{ l_etcd_data_dir }}' has the proper label
+- name: Make sure the '{{ etcd_data_dir }}' has the proper label
command: >
- chcon -t svirt_sandbox_file_t "{{ l_etcd_data_dir }}"
+ chcon -t svirt_sandbox_file_t "{{ etcd_data_dir }}"
when:
- l_etcd_selinux_labels.rc == 0
- "'svirt_sandbox_file_t' not in l_etcd_selinux_labels.stdout"
@@ -68,12 +68,12 @@
# https://github.com/openshift/openshift-docs/commit/b38042de02d9780842dce95cfa0ef45d53b58bc6
- name: Check for v3 data store
stat:
- path: "{{ l_etcd_data_dir }}/member/snap/db"
+ path: "{{ etcd_data_dir }}/member/snap/db"
register: l_v3_db
- name: Copy etcd v3 data store
command: >
- cp -a {{ l_etcd_data_dir }}/member/snap/db
+ cp -a {{ etcd_data_dir }}/member/snap/db
{{ l_etcd_backup_dir }}/member/snap/
when: l_v3_db.stat.exists
diff --git a/roles/etcd/tasks/backup/copy.yml b/roles/etcd/tasks/backup/copy.yml
index 16604bae8..967e5ee66 100644
--- a/roles/etcd/tasks/backup/copy.yml
+++ b/roles/etcd/tasks/backup/copy.yml
@@ -2,4 +2,4 @@
- name: Copy etcd backup
copy:
src: "{{ etcd_backup_sync_directory }}/{{ l_backup_dir_name }}.tgz"
- dest: "{{ l_etcd_data_dir }}"
+ dest: "{{ etcd_data_dir }}"
diff --git a/roles/etcd/tasks/backup/unarchive.yml b/roles/etcd/tasks/backup/unarchive.yml
index 6c75d00a7..a85f533c2 100644
--- a/roles/etcd/tasks/backup/unarchive.yml
+++ b/roles/etcd/tasks/backup/unarchive.yml
@@ -11,4 +11,4 @@
# src: "{{ l_etcd_backup_dir }}.tgz"
# dest: "{{ l_etcd_backup_dir }}"
command: >
- tar -xf "{{ l_etcd_backup_dir }}.tgz" -C "{{ l_etcd_data_dir }}"
+ tar -xf "{{ l_etcd_backup_dir }}.tgz" -C "{{ etcd_data_dir }}"
diff --git a/roles/etcd/tasks/backup/vars.yml b/roles/etcd/tasks/backup/vars.yml
index 3c009f557..3ffa641b3 100644
--- a/roles/etcd/tasks/backup/vars.yml
+++ b/roles/etcd/tasks/backup/vars.yml
@@ -6,13 +6,10 @@
l_backup_dir_name: "openshift-backup-{{ r_etcd_common_backup_tag }}{{ r_etcd_common_backup_sufix_name }}"
- set_fact:
- l_etcd_data_dir: "{{ etcd_data_dir }}{{ '/etcd.etcd' if r_etcd_common_etcd_runtime == 'runc' else '' }}"
-
-- set_fact:
l_etcd_incontainer_data_dir: "{{ etcd_data_dir }}"
- set_fact:
l_etcd_incontainer_backup_dir: "{{ l_etcd_incontainer_data_dir }}/{{ l_backup_dir_name }}"
- set_fact:
- l_etcd_backup_dir: "{{ l_etcd_data_dir }}/{{ l_backup_dir_name }}"
+ l_etcd_backup_dir: "{{ etcd_data_dir }}/{{ l_backup_dir_name }}"
diff --git a/roles/etcd/tasks/certificates/fetch_server_certificates_from_ca.yml b/roles/etcd/tasks/certificates/fetch_server_certificates_from_ca.yml
index 26492fb3c..00b8f4a0b 100644
--- a/roles/etcd/tasks/certificates/fetch_server_certificates_from_ca.yml
+++ b/roles/etcd/tasks/certificates/fetch_server_certificates_from_ca.yml
@@ -12,9 +12,6 @@
- "{{ etcd_cert_config_dir }}/{{ etcd_cert_prefix }}server.crt"
- "{{ etcd_cert_config_dir }}/{{ etcd_cert_prefix }}peer.crt"
- "{{ etcd_cert_config_dir }}/{{ etcd_cert_prefix }}ca.crt"
- - "{{ etcd_system_container_cert_config_dir }}/{{ etcd_cert_prefix }}server.crt"
- - "{{ etcd_system_container_cert_config_dir }}/{{ etcd_cert_prefix }}peer.crt"
- - "{{ etcd_system_container_cert_config_dir }}/{{ etcd_cert_prefix }}ca.crt"
register: g_etcd_server_cert_stat_result
when: not etcd_certificates_redeploy | default(false) | bool
@@ -141,7 +138,6 @@
state: directory
with_items:
- "{{ etcd_cert_config_dir }}"
- - "{{ etcd_system_container_cert_config_dir }}"
when: etcd_server_certs_missing | bool
- name: Unarchive cert tarball
@@ -176,25 +172,8 @@
state: directory
with_items:
- "{{ etcd_ca_dir }}"
- - "{{ etcd_system_container_cert_config_dir }}/ca"
when: etcd_server_certs_missing | bool
-- name: Unarchive cert tarball for the system container
- unarchive:
- src: "{{ g_etcd_server_mktemp.stdout }}/{{ etcd_cert_subdir }}.tgz"
- dest: "{{ etcd_system_container_cert_config_dir }}"
- when:
- - etcd_server_certs_missing | bool
- - r_etcd_common_etcd_runtime == 'runc'
-
-- name: Unarchive etcd ca cert tarballs for the system container
- unarchive:
- src: "{{ g_etcd_server_mktemp.stdout }}/{{ etcd_ca_name }}.tgz"
- dest: "{{ etcd_system_container_cert_config_dir }}/ca"
- when:
- - etcd_server_certs_missing | bool
- - r_etcd_common_etcd_runtime == 'runc'
-
- name: Delete temporary directory
local_action: file path="{{ g_etcd_server_mktemp.stdout }}" state=absent
become: no
diff --git a/roles/etcd/tasks/system_container.yml b/roles/etcd/tasks/system_container.yml
index 9a6951920..f71d9b551 100644
--- a/roles/etcd/tasks/system_container.yml
+++ b/roles/etcd/tasks/system_container.yml
@@ -1,7 +1,4 @@
---
-- set_fact:
- l_etcd_src_data_dir: "{{ '/var/lib/origin/openshift.local.etcd' if r_etcd_common_embedded_etcd | bool else '/var/lib/etcd/' }}"
-
- name: Ensure proxies are in the atomic.conf
include_role:
name: openshift_atomic
@@ -57,36 +54,13 @@
- name: Systemd reload configuration
systemd: name=etcd_container daemon_reload=yes
-- name: Check for previous etcd data store
- stat:
- path: "{{ l_etcd_src_data_dir }}/member/"
- register: src_datastore
-
-- name: Check for etcd system container data store
- stat:
- path: "{{ r_etcd_common_system_container_host_dir }}/etcd.etcd/member"
- register: dest_datastore
-
-- name: Ensure that etcd system container data dirs exist
- file: path="{{ item }}" state=directory
- with_items:
- - "{{ r_etcd_common_system_container_host_dir }}/etc"
- - "{{ r_etcd_common_system_container_host_dir }}/etcd.etcd"
-
-- name: Copy etcd data store
- command: >
- cp -a {{ l_etcd_src_data_dir }}/member
- {{ r_etcd_common_system_container_host_dir }}/etcd.etcd/member
- when:
- - src_datastore.stat.exists
- - not dest_datastore.stat.exists
-
- name: Install or Update Etcd system container package
oc_atomic_container:
name: etcd
image: "{{ openshift.etcd.etcd_image }}"
state: latest
values:
+ - ETCD_DATA_DIR=/var/lib/etcd
- ETCD_LISTEN_PEER_URLS={{ etcd_listen_peer_urls }}
- ETCD_NAME={{ etcd_hostname }}
- ETCD_INITIAL_CLUSTER={{ etcd_initial_cluster }}
@@ -95,11 +69,21 @@
- ETCD_INITIAL_CLUSTER_STATE={{ etcd_initial_cluster_state }}
- ETCD_INITIAL_CLUSTER_TOKEN={{ etcd_initial_cluster_token }}
- ETCD_ADVERTISE_CLIENT_URLS={{ etcd_advertise_client_urls }}
- - ETCD_CA_FILE={{ etcd_system_container_conf_dir }}/ca.crt
- - ETCD_CERT_FILE={{ etcd_system_container_conf_dir }}/server.crt
- - ETCD_KEY_FILE={{ etcd_system_container_conf_dir }}/server.key
- - ETCD_PEER_CA_FILE={{ etcd_system_container_conf_dir }}/ca.crt
- - ETCD_PEER_CERT_FILE={{ etcd_system_container_conf_dir }}/peer.crt
- - ETCD_PEER_KEY_FILE={{ etcd_system_container_conf_dir }}/peer.key
- - ETCD_TRUSTED_CA_FILE={{ etcd_system_container_conf_dir }}/ca.crt
- - ETCD_PEER_TRUSTED_CA_FILE={{ etcd_system_container_conf_dir }}/ca.crt
+ - ETCD_CA_FILE={{ etcd_ca_file }}
+ - ETCD_CERT_FILE={{ etcd_cert_file }}
+ - ETCD_KEY_FILE={{ etcd_key_file }}
+ - ETCD_PEER_CA_FILE={{ etcd_peer_ca_file }}
+ - ETCD_PEER_CERT_FILE={{ etcd_peer_cert_file }}
+ - ETCD_PEER_KEY_FILE={{ etcd_peer_key_file }}
+ - ETCD_TRUSTED_CA_FILE={{ etcd_ca_file }}
+ - ETCD_PEER_TRUSTED_CA_FILE={{ etcd_peer_ca_file }}
+ - 'ADDTL_MOUNTS=,{"type":"bind","source":"/etc/","destination":"/etc/","options":["rbind","rw","rslave"]},{"type":"bind","source":"/var/lib/etcd","destination":"/var/lib/etcd/","options":["rbind","rw","rslave"]}'
+
+- name: Ensure etcd datadir ownership for the system container
+ file:
+ path: "{{ etcd_data_dir }}"
+ state: directory
+ mode: 0700
+ owner: root
+ group: root
+ recurse: True
diff --git a/roles/installer_checkpoint/callback_plugins/installer_checkpoint.py b/roles/installer_checkpoint/callback_plugins/installer_checkpoint.py
index 55c44bb84..b17358882 100644
--- a/roles/installer_checkpoint/callback_plugins/installer_checkpoint.py
+++ b/roles/installer_checkpoint/callback_plugins/installer_checkpoint.py
@@ -72,6 +72,7 @@ class CallbackModule(CallbackBase):
# Set the order of the installer phases
installer_phases = [
'installer_phase_initialize',
+ 'installer_phase_health',
'installer_phase_etcd',
'installer_phase_nfs',
'installer_phase_loadbalancer',
@@ -93,6 +94,10 @@ class CallbackModule(CallbackBase):
'title': 'Initialization',
'playbook': ''
},
+ 'installer_phase_health': {
+ 'title': 'Health Check',
+ 'playbook': 'playbooks/byo/openshift-checks/pre-install.yml'
+ },
'installer_phase_etcd': {
'title': 'etcd Install',
'playbook': 'playbooks/byo/openshift-etcd/config.yml'
@@ -166,11 +171,6 @@ class CallbackModule(CallbackBase):
self._display.display(
'\tThis phase can be restarted by running: {}'.format(
phase_attributes[phase]['playbook']))
- else:
- # Phase was not found in custom stats
- self._display.display(
- '{}{}: {}'.format(phase_title, ' ' * padding, 'Not Started'),
- color=C.COLOR_SKIP)
self._display.display("", screen_only=True)
diff --git a/roles/kuryr/defaults/main.yaml b/roles/kuryr/defaults/main.yaml
index ff298dda0..af05d80df 100644
--- a/roles/kuryr/defaults/main.yaml
+++ b/roles/kuryr/defaults/main.yaml
@@ -5,10 +5,10 @@ kuryr_config_dir: /etc/kuryr
# Kuryr username
kuryr_openstack_username: kuryr
-# Kuryr username domain
+# Kuryr domain name or id containing user
kuryr_openstack_user_domain_name: default
-# Kuryr username domain
+# Kuryr domain name or id containing project
kuryr_openstack_project_domain_name: default
# Kuryr OpenShift namespace
@@ -31,7 +31,7 @@ cni_bin_url_base: "https://github.com/containernetworking/cni/releases/download/
cni_bin_url: "{{ cni_bin_url_base }}/{{ cni_version }}/cni-{{ cni_version }}.tgz"
cni_bin_checksum: "71f411080245aa14d0cc06f6824e8039607dd9e9"
-# Kuryr ClusterRole definiton
+# Kuryr ClusterRole definition
kuryr_clusterrole:
name: kuryrctl
state: present
diff --git a/roles/openshift_aws/tasks/seal_ami.yml b/roles/openshift_aws/tasks/seal_ami.yml
index d319fdd1a..0cb749dcc 100644
--- a/roles/openshift_aws/tasks/seal_ami.yml
+++ b/roles/openshift_aws/tasks/seal_ami.yml
@@ -1,11 +1,4 @@
---
-- name: Remove any ansible facts created during AMI creation
- file:
- path: "/etc/ansible/facts.d/{{ item }}"
- state: absent
- with_items:
- - openshift.fact
-
- name: fetch newly created instances
ec2_remote_facts:
region: "{{ openshift_aws_region }}"
diff --git a/roles/openshift_aws/templates/user_data.j2 b/roles/openshift_aws/templates/user_data.j2
index ed9c0ed0b..76aebdcea 100644
--- a/roles/openshift_aws/templates/user_data.j2
+++ b/roles/openshift_aws/templates/user_data.j2
@@ -9,7 +9,7 @@ write_files:
content: |
openshift_group_type: {{ openshift_aws_node_group_type }}
{% if openshift_aws_node_group_type != 'master' %}
-- path: /etc/origin/node/csr_kubeconfig
+- path: /etc/origin/node/bootstrap.kubeconfig
owner: 'root:root'
permissions: '0640'
encoding: b64
diff --git a/roles/openshift_etcd_facts/vars/main.yml b/roles/openshift_etcd_facts/vars/main.yml
index b3ecd57a6..0c072b64a 100644
--- a/roles/openshift_etcd_facts/vars/main.yml
+++ b/roles/openshift_etcd_facts/vars/main.yml
@@ -6,6 +6,5 @@ etcd_ip: "{{ openshift.common.ip }}"
etcd_cert_subdir: "etcd-{{ openshift.common.hostname }}"
etcd_cert_prefix:
etcd_cert_config_dir: "/etc/etcd"
-etcd_system_container_cert_config_dir: /var/lib/etcd/etcd.etcd/etc
etcd_peer_url_scheme: https
etcd_url_scheme: https
diff --git a/roles/openshift_health_checker/openshift_checks/disk_availability.py b/roles/openshift_health_checker/openshift_checks/disk_availability.py
index 7956559c6..87e6146d4 100644
--- a/roles/openshift_health_checker/openshift_checks/disk_availability.py
+++ b/roles/openshift_health_checker/openshift_checks/disk_availability.py
@@ -1,6 +1,7 @@
"""Check that there is enough disk space in predefined paths."""
import tempfile
+import os.path
from openshift_checks import OpenShiftCheck, OpenShiftCheckException
@@ -121,11 +122,21 @@ class DiskAvailability(OpenShiftCheck):
return {}
+ def find_ansible_submounts(self, path):
+ """Return a list of ansible_mounts that are below the given path."""
+ base = os.path.join(path, "")
+ return [
+ mount
+ for mount in self.get_var("ansible_mounts")
+ if mount["mount"].startswith(base)
+ ]
+
def free_bytes(self, path):
"""Return the size available in path based on ansible_mounts."""
+ submounts = sum(mnt.get('size_available', 0) for mnt in self.find_ansible_submounts(path))
mount = self.find_ansible_mount(path)
try:
- return mount['size_available']
+ return mount['size_available'] + submounts
except KeyError:
raise OpenShiftCheckException(
'Unable to retrieve disk availability for "{path}".\n'
diff --git a/roles/openshift_health_checker/openshift_checks/docker_storage.py b/roles/openshift_health_checker/openshift_checks/docker_storage.py
index 0558ddf14..6808d8b2f 100644
--- a/roles/openshift_health_checker/openshift_checks/docker_storage.py
+++ b/roles/openshift_health_checker/openshift_checks/docker_storage.py
@@ -14,7 +14,7 @@ class DockerStorage(DockerHostMixin, OpenShiftCheck):
"""
name = "docker_storage"
- tags = ["pre-install", "health", "preflight"]
+ tags = ["health", "preflight"]
dependencies = ["python-docker-py"]
storage_drivers = ["devicemapper", "overlay", "overlay2"]
diff --git a/roles/openshift_health_checker/test/disk_availability_test.py b/roles/openshift_health_checker/test/disk_availability_test.py
index 29a325a17..7acdb40ec 100644
--- a/roles/openshift_health_checker/test/disk_availability_test.py
+++ b/roles/openshift_health_checker/test/disk_availability_test.py
@@ -96,6 +96,24 @@ def test_cannot_determine_available_disk(desc, ansible_mounts, expect_chunks):
'size_available': 20 * 10**9 + 1,
}],
),
+ (
+ ['oo_masters_to_config'],
+ 0,
+ [{
+ 'mount': '/',
+ 'size_available': 2 * 10**9,
+ }, { # not enough directly on /var
+ 'mount': '/var',
+ 'size_available': 10 * 10**9 + 1,
+ }, {
+ # but subdir mounts add up to enough
+ 'mount': '/var/lib/docker',
+ 'size_available': 20 * 10**9 + 1,
+ }, {
+ 'mount': '/var/lib/origin',
+ 'size_available': 20 * 10**9 + 1,
+ }],
+ ),
])
def test_succeeds_with_recommended_disk_space(group_names, configured_min, ansible_mounts):
task_vars = dict(
@@ -104,9 +122,10 @@ def test_succeeds_with_recommended_disk_space(group_names, configured_min, ansib
ansible_mounts=ansible_mounts,
)
- result = DiskAvailability(fake_execute_module, task_vars).run()
+ check = DiskAvailability(fake_execute_module, task_vars)
+ check.run()
- assert not result.get('failed', False)
+ assert not check.failures
@pytest.mark.parametrize('name,group_names,configured_min,ansible_mounts,expect_chunks', [
diff --git a/roles/openshift_logging/README.md b/roles/openshift_logging/README.md
index 0ea34faf2..6c5bb8693 100644
--- a/roles/openshift_logging/README.md
+++ b/roles/openshift_logging/README.md
@@ -297,6 +297,8 @@ oc delete pod --selector=<ds_selector>
Changelog
---------
+Tue Oct 26, 2017
+- Make CPU request equal limit if limit is greater then request
Tue Oct 10, 2017
- Default imagePullPolicy changed from Always to IfNotPresent
diff --git a/roles/openshift_logging/filter_plugins/openshift_logging.py b/roles/openshift_logging/filter_plugins/openshift_logging.py
index 959573635..e1a5ea726 100644
--- a/roles/openshift_logging/filter_plugins/openshift_logging.py
+++ b/roles/openshift_logging/filter_plugins/openshift_logging.py
@@ -3,6 +3,7 @@
'''
import random
+import re
def es_storage(os_logging_facts, dc_name, pvc_claim, root='elasticsearch'):
@@ -17,6 +18,31 @@ def es_storage(os_logging_facts, dc_name, pvc_claim, root='elasticsearch'):
return dict(kind='emptydir')
+def min_cpu(left, right):
+ '''Return the minimum cpu value of the two values given'''
+ message = "Unable to evaluate {} cpu value is specified correctly '{}'. Exp whole, decimal or int followed by M"
+ pattern = re.compile(r"^(\d*\.?\d*)([Mm])?$")
+ millis_per_core = 1000
+ if not right:
+ return left
+ m_left = pattern.match(left)
+ if not m_left:
+ raise RuntimeError(message.format("left", left))
+ m_right = pattern.match(right)
+ if not m_right:
+ raise RuntimeError(message.format("right", right))
+ left_value = float(m_left.group(1))
+ right_value = float(m_right.group(1))
+ if m_left.group(2) not in ["M", "m"]:
+ left_value = left_value * millis_per_core
+ if m_right.group(2) not in ["M", "m"]:
+ right_value = right_value * millis_per_core
+ response = left
+ if left_value != min(left_value, right_value):
+ response = right
+ return response
+
+
def walk(source, path, default, delimiter='.'):
'''Walk the sourch hash given the path and return the value or default if not found'''
if not isinstance(source, dict):
@@ -87,6 +113,7 @@ class FilterModule(object):
'random_word': random_word,
'entry_from_named_pair': entry_from_named_pair,
'map_from_pairs': map_from_pairs,
+ 'min_cpu': min_cpu,
'es_storage': es_storage,
'serviceaccount_name': serviceaccount_name,
'serviceaccount_namespace': serviceaccount_namespace,
diff --git a/roles/openshift_logging/filter_plugins/test b/roles/openshift_logging/filter_plugins/test
index 3ad956cca..bac25c012 100644
--- a/roles/openshift_logging/filter_plugins/test
+++ b/roles/openshift_logging/filter_plugins/test
@@ -1,7 +1,22 @@
import unittest
from openshift_logging import walk
+from openshift_logging import min_cpu
class TestFilterMethods(unittest.TestCase):
+
+
+ def test_min_cpu_for_none(self):
+ source = "1000M"
+ self.assertEquals(min_cpu(source, None), "1000M")
+
+ def test_min_cpu_for_millis(self):
+ source = "1"
+ self.assertEquals(min_cpu(source, "0.1"), "0.1")
+
+
+ def test_min_cpu_for_whole(self):
+ source = "120M"
+ self.assertEquals(min_cpu(source, "2"), "120M")
def test_walk_find_key(self):
diff --git a/roles/openshift_logging/tasks/install_logging.yaml b/roles/openshift_logging/tasks/install_logging.yaml
index 3f705d02c..b98e281a3 100644
--- a/roles/openshift_logging/tasks/install_logging.yaml
+++ b/roles/openshift_logging/tasks/install_logging.yaml
@@ -77,21 +77,23 @@
vars:
generated_certs_dir: "{{openshift.common.config_base}}/logging"
openshift_logging_elasticsearch_namespace: "{{ openshift_logging_namespace }}"
- openshift_logging_elasticsearch_deployment_name: "{{ item.0.name }}"
- openshift_logging_elasticsearch_pvc_name: "{{ openshift_logging_es_pvc_prefix ~ '-' ~ item.2 if item.1 is none else item.1 }}"
+ openshift_logging_elasticsearch_deployment_name: "{{ outer_item.0.name }}"
+ openshift_logging_elasticsearch_pvc_name: "{{ openshift_logging_es_pvc_prefix ~ '-' ~ outer_item.2 if outer_item.1 is none else outer_item.1 }}"
openshift_logging_elasticsearch_replica_count: "{{ openshift_logging_es_cluster_size | int }}"
openshift_logging_elasticsearch_storage_type: "{{ elasticsearch_storage_type }}"
openshift_logging_elasticsearch_pvc_pv_selector: "{{ openshift_logging_es_pv_selector }}"
- openshift_logging_elasticsearch_nodeselector: "{{ openshift_logging_es_nodeselector if item.0.nodeSelector | default(None) is none else item.0.nodeSelector }}"
- openshift_logging_elasticsearch_storage_group: "{{ [openshift_logging_es_storage_group] if item.0.storageGroups | default([]) | length == 0 else item.0.storageGroups }}"
- _es_containers: "{{item.0.containers}}"
+ openshift_logging_elasticsearch_nodeselector: "{{ openshift_logging_es_nodeselector if outer_item.0.nodeSelector | default(None) is none else outer_item.0.nodeSelector }}"
+ openshift_logging_elasticsearch_storage_group: "{{ [openshift_logging_es_storage_group] if outer_item.0.storageGroups | default([]) | length == 0 else outer_item.0.storageGroups }}"
+ _es_containers: "{{ outer_item.0.containers}}"
_es_configmap: "{{ openshift_logging_facts | walk('elasticsearch#configmaps#logging-elasticsearch#elasticsearch.yml', '{}', delimiter='#') | from_yaml }}"
with_together:
- "{{ openshift_logging_facts.elasticsearch.deploymentconfigs.values() }}"
- "{{ openshift_logging_facts.elasticsearch.pvcs }}"
- "{{ es_indices }}"
+ loop_control:
+ loop_var: outer_item
when:
- openshift_logging_facts.elasticsearch.deploymentconfigs.keys() | count > 0
@@ -101,13 +103,15 @@
vars:
generated_certs_dir: "{{openshift.common.config_base}}/logging"
openshift_logging_elasticsearch_namespace: "{{ openshift_logging_namespace }}"
- openshift_logging_elasticsearch_pvc_name: "{{ openshift_logging_es_pvc_prefix }}-{{ item | int + openshift_logging_facts.elasticsearch.deploymentconfigs | count - 1 }}"
+ openshift_logging_elasticsearch_pvc_name: "{{ openshift_logging_es_pvc_prefix }}-{{ outer_item | int + openshift_logging_facts.elasticsearch.deploymentconfigs | count - 1 }}"
openshift_logging_elasticsearch_replica_count: "{{ openshift_logging_es_cluster_size | int }}"
openshift_logging_elasticsearch_storage_type: "{{ elasticsearch_storage_type }}"
openshift_logging_elasticsearch_pvc_pv_selector: "{{ openshift_logging_es_pv_selector }}"
with_sequence: count={{ openshift_logging_es_cluster_size | int - openshift_logging_facts.elasticsearch.deploymentconfigs.keys() | count }}
+ loop_control:
+ loop_var: outer_item
- set_fact: es_ops_indices={{ es_ops_indices | default([]) + [item | int - 1] }}
with_sequence: count={{ openshift_logging_facts.elasticsearch_ops.deploymentconfigs.keys() | count }}
@@ -131,8 +135,8 @@
vars:
generated_certs_dir: "{{openshift.common.config_base}}/logging"
openshift_logging_elasticsearch_namespace: "{{ openshift_logging_namespace }}"
- openshift_logging_elasticsearch_deployment_name: "{{ item.0.name }}"
- openshift_logging_elasticsearch_pvc_name: "{{ openshift_logging_es_ops_pvc_prefix ~ '-' ~ item.2 if item.1 is none else item.1 }}"
+ openshift_logging_elasticsearch_deployment_name: "{{ outer_item.0.name }}"
+ openshift_logging_elasticsearch_pvc_name: "{{ openshift_logging_es_ops_pvc_prefix ~ '-' ~ outer_item.2 if outer_item.1 is none else outer_item.1 }}"
openshift_logging_elasticsearch_ops_deployment: true
openshift_logging_elasticsearch_replica_count: "{{ openshift_logging_es_ops_cluster_size | int }}"
@@ -143,8 +147,8 @@
openshift_logging_elasticsearch_memory_limit: "{{ openshift_logging_es_ops_memory_limit }}"
openshift_logging_elasticsearch_cpu_limit: "{{ openshift_logging_es_ops_cpu_limit }}"
openshift_logging_elasticsearch_cpu_request: "{{ openshift_logging_es_ops_cpu_request }}"
- openshift_logging_elasticsearch_nodeselector: "{{ openshift_logging_es_ops_nodeselector if item.0.nodeSelector | default(None) is none else item.0.nodeSelector }}"
- openshift_logging_elasticsearch_storage_group: "{{ [openshift_logging_es_ops_storage_group] if item.0.storageGroups | default([]) | length == 0 else item.0.storageGroups }}"
+ openshift_logging_elasticsearch_nodeselector: "{{ openshift_logging_es_ops_nodeselector if outer_item.0.nodeSelector | default(None) is none else outer_item.0.nodeSelector }}"
+ openshift_logging_elasticsearch_storage_group: "{{ [openshift_logging_es_ops_storage_group] if outer_item.0.storageGroups | default([]) | length == 0 else outer_item.0.storageGroups }}"
openshift_logging_es_key: "{{ openshift_logging_es_ops_key }}"
openshift_logging_es_cert: "{{ openshift_logging_es_ops_cert }}"
openshift_logging_es_ca_ext: "{{ openshift_logging_es_ops_ca_ext }}"
@@ -153,13 +157,16 @@
openshift_logging_es_allow_external: "{{ openshift_logging_es_ops_allow_external }}"
openshift_logging_es_number_of_shards: "{{ openshift_logging_es_ops_number_of_shards | default(None) }}"
openshift_logging_es_number_of_replicas: "{{ openshift_logging_es_ops_number_of_replicas | default(None) }}"
- _es_containers: "{{item.0.containers}}"
+ _es_containers: "{{ outer_item.0.containers}}"
_es_configmap: "{{ openshift_logging_facts | walk('elasticsearch_ops#configmaps#logging-elasticsearch-ops#elasticsearch.yml', '{}', delimiter='#') | from_yaml }}"
with_together:
- "{{ openshift_logging_facts.elasticsearch_ops.deploymentconfigs.values() }}"
- "{{ openshift_logging_facts.elasticsearch_ops.pvcs }}"
- "{{ es_ops_indices }}"
+ loop_control:
+ loop_var: outer_item
+
when:
- openshift_logging_use_ops | bool
- openshift_logging_facts.elasticsearch_ops.deploymentconfigs.keys() | count > 0
@@ -170,7 +177,7 @@
vars:
generated_certs_dir: "{{openshift.common.config_base}}/logging"
openshift_logging_elasticsearch_namespace: "{{ openshift_logging_namespace }}"
- openshift_logging_elasticsearch_pvc_name: "{{ openshift_logging_es_ops_pvc_prefix }}-{{ item | int + openshift_logging_facts.elasticsearch_ops.deploymentconfigs | count - 1 }}"
+ openshift_logging_elasticsearch_pvc_name: "{{ openshift_logging_es_ops_pvc_prefix }}-{{ outer_item | int + openshift_logging_facts.elasticsearch_ops.deploymentconfigs | count - 1 }}"
openshift_logging_elasticsearch_ops_deployment: true
openshift_logging_elasticsearch_replica_count: "{{ openshift_logging_es_ops_cluster_size | int }}"
@@ -190,6 +197,8 @@
openshift_logging_es_allow_external: "{{ openshift_logging_es_ops_allow_external }}"
with_sequence: count={{ openshift_logging_es_ops_cluster_size | int - openshift_logging_facts.elasticsearch_ops.deploymentconfigs.keys() | count }}
+ loop_control:
+ loop_var: outer_item
when:
- openshift_logging_use_ops | bool
diff --git a/roles/openshift_logging_curator/tasks/main.yaml b/roles/openshift_logging_curator/tasks/main.yaml
index b4ddf45d9..fcaf18ed4 100644
--- a/roles/openshift_logging_curator/tasks/main.yaml
+++ b/roles/openshift_logging_curator/tasks/main.yaml
@@ -90,7 +90,7 @@
es_host: "{{ openshift_logging_curator_es_host }}"
es_port: "{{ openshift_logging_curator_es_port }}"
curator_cpu_limit: "{{ openshift_logging_curator_cpu_limit }}"
- curator_cpu_request: "{{ openshift_logging_curator_cpu_request }}"
+ curator_cpu_request: "{{ openshift_logging_curator_cpu_request | min_cpu(openshift_logging_curator_cpu_limit | default(none)) }}"
curator_memory_limit: "{{ openshift_logging_curator_memory_limit }}"
curator_replicas: "{{ openshift_logging_curator_replicas | default (1) }}"
curator_node_selector: "{{openshift_logging_curator_nodeselector | default({})}}"
diff --git a/roles/openshift_logging_elasticsearch/files/es_migration.sh b/roles/openshift_logging_elasticsearch/files/es_migration.sh
deleted file mode 100644
index 339b5a1b2..000000000
--- a/roles/openshift_logging_elasticsearch/files/es_migration.sh
+++ /dev/null
@@ -1,79 +0,0 @@
-CA=${1:-/etc/openshift/logging/ca.crt}
-KEY=${2:-/etc/openshift/logging/system.admin.key}
-CERT=${3:-/etc/openshift/logging/system.admin.crt}
-openshift_logging_es_host=${4:-logging-es}
-openshift_logging_es_port=${5:-9200}
-namespace=${6:-logging}
-
-# for each index in _cat/indices
-# skip indices that begin with . - .kibana, .operations, etc.
-# skip indices that contain a uuid
-# get a list of unique project
-# daterx - the date regex that matches the .%Y.%m.%d at the end of the indices
-# we are interested in - the awk will strip that part off
-function get_list_of_indices() {
- curl -s --cacert $CA --key $KEY --cert $CERT https://$openshift_logging_es_host:$openshift_logging_es_port/_cat/indices | \
- awk -v daterx='[.]20[0-9]{2}[.][0-1]?[0-9][.][0-9]{1,2}$' \
- '$3 !~ "^[.]" && $3 !~ "^[^.]+[.][^.]+"daterx && $3 !~ "^project." && $3 ~ daterx {print gensub(daterx, "", "", $3)}' | \
- sort -u
-}
-
-# for each index in _cat/indices
-# skip indices that begin with . - .kibana, .operations, etc.
-# get a list of unique project.uuid
-# daterx - the date regex that matches the .%Y.%m.%d at the end of the indices
-# we are interested in - the awk will strip that part off
-function get_list_of_proj_uuid_indices() {
- curl -s --cacert $CA --key $KEY --cert $CERT https://$openshift_logging_es_host:$openshift_logging_es_port/_cat/indices | \
- awk -v daterx='[.]20[0-9]{2}[.][0-1]?[0-9][.][0-9]{1,2}$' \
- '$3 !~ "^[.]" && $3 ~ "^[^.]+[.][^.]+"daterx && $3 !~ "^project." && $3 ~ daterx {print gensub(daterx, "", "", $3)}' | \
- sort -u
-}
-
-if [[ -z "$(oc get pods -l component=es -o jsonpath='{.items[?(@.status.phase == "Running")].metadata.name}')" ]]; then
- echo "No Elasticsearch pods found running. Cannot update common data model."
- exit 1
-fi
-
-count=$(get_list_of_indices | wc -l)
-if [ $count -eq 0 ]; then
- echo No matching indices found - skipping update_for_uuid
-else
- echo Creating aliases for $count index patterns . . .
- {
- echo '{"actions":['
- get_list_of_indices | \
- while IFS=. read proj ; do
- # e.g. make test.uuid.* an alias of test.* so we can search for
- # /test.uuid.*/_search and get both the test.uuid.* and
- # the test.* indices
- uid=$(oc get project "$proj" -o jsonpath='{.metadata.uid}' 2>/dev/null)
- [ -n "$uid" ] && echo "{\"add\":{\"index\":\"$proj.*\",\"alias\":\"$proj.$uuid.*\"}}"
- done
- echo ']}'
- } | curl -s --cacert $CA --key $KEY --cert $CERT -XPOST -d @- "https://$openshift_logging_es_host:$openshift_logging_es_port/_aliases"
-fi
-
-count=$(get_list_of_proj_uuid_indices | wc -l)
-if [ $count -eq 0 ] ; then
- echo No matching indexes found - skipping update_for_common_data_model
- exit 0
-fi
-
-echo Creating aliases for $count index patterns . . .
-# for each index in _cat/indices
-# skip indices that begin with . - .kibana, .operations, etc.
-# get a list of unique project.uuid
-# daterx - the date regex that matches the .%Y.%m.%d at the end of the indices
-# we are interested in - the awk will strip that part off
-{
- echo '{"actions":['
- get_list_of_proj_uuid_indices | \
- while IFS=. read proj uuid ; do
- # e.g. make project.test.uuid.* and alias of test.uuid.* so we can search for
- # /project.test.uuid.*/_search and get both the test.uuid.* and
- # the project.test.uuid.* indices
- echo "{\"add\":{\"index\":\"$proj.$uuid.*\",\"alias\":\"${PROJ_PREFIX}$proj.$uuid.*\"}}"
- done
- echo ']}'
-} | curl -s --cacert $CA --key $KEY --cert $CERT -XPOST -d @- "https://$openshift_logging_es_host:$openshift_logging_es_port/_aliases"
diff --git a/roles/openshift_logging_elasticsearch/tasks/main.yaml b/roles/openshift_logging_elasticsearch/tasks/main.yaml
index 7aabdc861..e7ef443bd 100644
--- a/roles/openshift_logging_elasticsearch/tasks/main.yaml
+++ b/roles/openshift_logging_elasticsearch/tasks/main.yaml
@@ -354,7 +354,7 @@
image: "{{ openshift_logging_elasticsearch_image_prefix }}logging-elasticsearch:{{ openshift_logging_elasticsearch_image_version }}"
proxy_image: "{{ openshift_logging_elasticsearch_proxy_image_prefix }}oauth-proxy:{{ openshift_logging_elasticsearch_proxy_image_version }}"
es_cpu_limit: "{{ openshift_logging_elasticsearch_cpu_limit | default('') }}"
- es_cpu_request: "{{ openshift_logging_elasticsearch_cpu_request }}"
+ es_cpu_request: "{{ openshift_logging_elasticsearch_cpu_request | min_cpu(openshift_logging_elasticsearch_cpu_limit | default(none)) }}"
es_memory_limit: "{{ openshift_logging_elasticsearch_memory_limit }}"
es_node_selector: "{{ openshift_logging_elasticsearch_nodeselector | default({}) }}"
es_storage_groups: "{{ openshift_logging_elasticsearch_storage_group | default([]) }}"
diff --git a/roles/openshift_logging_fluentd/tasks/main.yaml b/roles/openshift_logging_fluentd/tasks/main.yaml
index f56810610..2f89c3f9f 100644
--- a/roles/openshift_logging_fluentd/tasks/main.yaml
+++ b/roles/openshift_logging_fluentd/tasks/main.yaml
@@ -154,7 +154,6 @@
path: "{{ generated_certs_dir }}/system.logging.fluentd.crt"
# create Fluentd daemonset
-
# this should change based on the type of fluentd deployment to be done...
# TODO: pass in aggregation configurations
- name: Generate logging-fluentd daemonset definition
@@ -173,7 +172,7 @@
fluentd_nodeselector_key: "{{ openshift_logging_fluentd_nodeselector.keys()[0] }}"
fluentd_nodeselector_value: "{{ openshift_logging_fluentd_nodeselector.values()[0] }}"
fluentd_cpu_limit: "{{ openshift_logging_fluentd_cpu_limit }}"
- fluentd_cpu_request: "{{ openshift_logging_fluentd_cpu_request }}"
+ fluentd_cpu_request: "{{ openshift_logging_fluentd_cpu_request | min_cpu(openshift_logging_fluentd_cpu_limit | default(none)) }}"
fluentd_memory_limit: "{{ openshift_logging_fluentd_memory_limit }}"
audit_container_engine: "{{ openshift_logging_fluentd_audit_container_engine | default(False) | bool }}"
audit_log_file: "{{ openshift_logging_fluentd_audit_file | default() }}"
diff --git a/roles/openshift_logging_kibana/tasks/main.yaml b/roles/openshift_logging_kibana/tasks/main.yaml
index 809f7a631..8ef8ede9a 100644
--- a/roles/openshift_logging_kibana/tasks/main.yaml
+++ b/roles/openshift_logging_kibana/tasks/main.yaml
@@ -230,10 +230,10 @@
es_host: "{{ openshift_logging_kibana_es_host }}"
es_port: "{{ openshift_logging_kibana_es_port }}"
kibana_cpu_limit: "{{ openshift_logging_kibana_cpu_limit }}"
- kibana_cpu_request: "{{ openshift_logging_kibana_cpu_request }}"
+ kibana_cpu_request: "{{ openshift_logging_kibana_cpu_request | min_cpu(openshift_logging_kibana_cpu_limit | default(none)) }}"
kibana_memory_limit: "{{ openshift_logging_kibana_memory_limit }}"
kibana_proxy_cpu_limit: "{{ openshift_logging_kibana_proxy_cpu_limit }}"
- kibana_proxy_cpu_request: "{{ openshift_logging_kibana_proxy_cpu_request }}"
+ kibana_proxy_cpu_request: "{{ openshift_logging_kibana_proxy_cpu_request | min_cpu(openshift_logging_kibana_proxy_cpu_limit | default(none)) }}"
kibana_proxy_memory_limit: "{{ openshift_logging_kibana_proxy_memory_limit }}"
kibana_replicas: "{{ openshift_logging_kibana_replicas | default (1) }}"
kibana_node_selector: "{{ openshift_logging_kibana_nodeselector | default({}) }}"
diff --git a/roles/openshift_logging_mux/tasks/main.yaml b/roles/openshift_logging_mux/tasks/main.yaml
index 1b46a7ac3..5b257139e 100644
--- a/roles/openshift_logging_mux/tasks/main.yaml
+++ b/roles/openshift_logging_mux/tasks/main.yaml
@@ -171,7 +171,7 @@
ops_host: "{{ openshift_logging_mux_ops_host }}"
ops_port: "{{ openshift_logging_mux_ops_port }}"
mux_cpu_limit: "{{ openshift_logging_mux_cpu_limit }}"
- mux_cpu_request: "{{ openshift_logging_mux_cpu_request }}"
+ mux_cpu_request: "{{ openshift_logging_mux_cpu_request | min_cpu(openshift_logging_mux_cpu_limit | default(none)) }}"
mux_memory_limit: "{{ openshift_logging_mux_memory_limit }}"
mux_replicas: "{{ openshift_logging_mux_replicas | default(1) }}"
mux_node_selector: "{{ openshift_logging_mux_nodeselector | default({}) }}"
diff --git a/roles/openshift_management/README.md b/roles/openshift_management/README.md
index 3a71d9211..05ca27913 100644
--- a/roles/openshift_management/README.md
+++ b/roles/openshift_management/README.md
@@ -38,6 +38,10 @@ deployment type (`openshift_deployment_type`):
* [Cloud Provider](#cloud-provider)
* [Preconfigured (Expert Configuration Only)](#preconfigured-expert-configuration-only)
* [Customization](#customization)
+ * [Container Provider](#container-provider)
+ * [Manually](#manually)
+ * [Automatically](#automatically)
+ * [Multiple Providers](#multiple-providers)
* [Uninstall](#uninstall)
* [Additional Information](#additional-information)
@@ -80,30 +84,10 @@ to there being no databases that require pods.
*Be extra careful* if you are overriding template
parameters. Including parameters not defined in a template **will
-cause errors**.
-
-**Container Provider Integration** - If you want add your container
-platform (OCP/Origin) as a *Container Provider* in CFME/MIQ then you
-must ensure that the infrastructure management hooks are installed.
-
-* During your OCP/Origin install, ensure that you have the
- `openshift_use_manageiq` parameter set to `true` in your inventory
- at install time. This will create a `management-infra` project and a
- service account user.
-* After CFME/MIQ is installed, obtain the `management-admin` service
- account token and copy it somewhere safe.
-
-```bash
-$ oc serviceaccounts get-token -n management-infra management-admin
-eyJhuGdiOiJSUzI1NiIsInR5dCI6IkpXVCJ9.eyJpd9MiOiJrbWJldm5lbGVzL9NldnZpY2VhY2NvbW50Iiwiy9ViZXJuZXRldy5puy9zZXJ2yWNlYWNju9VubC9uYW1ld9BhY2UiOiJtYW5hZ2VtZW50LWluZnJhIiwiy9ViZXJuZXRldy5puy9zZXJ2yWNlYWNju9VubC9zZWNyZXQuumFtZSI6Im1humFnZW1lunQtYWRtyW4tbG9rZW4tdDBnOTAiLCJrbWJldm5lbGVzLmlvL9NldnZpY2VhY2NvbW50L9NldnZpY2UtYWNju9VubC5uYW1lIjoiuWFuYWbluWVubC1hZG1puiIsImt1YmVyumV0ZXMuyW8vd2VybmljZWFjY291unQvd2VybmljZS1hY2NvbW50LnVpZCI6IjRiZDM2MWQ1LWE1NDAtMTFlNy04YzI5LTUyNTQwMDliMmNkZCIsInN1YiI6InN5d9RluTpzZXJ2yWNlYWNju9VubDptYW5hZ2VtZW50LWluZnJhOm1humFnZW1lunQtYWRtyW4ifQ.B6sZLGD9O4vBu9MHwiG-C_4iEwjBXb7Af8BPw-LNlujDmHhOnQ-Oo4QxQKyj9edynfmDy2yutUyJ2Mm9HfDGWg4C9xhWImHoq6Nl7T5_9djkeGKkK7Ejvg4fA-IkrzEsZeQuluBvXnE6wvP0LCjUo_dx4pPyZJyp46teV9NqKQeDzeysjlMCyqp6AK6-Lj8ILG8YA6d_97HlzL_EgFBLAu0lBSn-uC_9J0gLysqBtK6TI0nExfhv9Bm1_5bdHEbKHPW7xIlYlI9AgmyTyhsQ6SoQWtL2khBjkG9TlPBq9wYJj9bzqgVZlqEfICZxgtXO7sYyuoje4y8lo0YQ0kZmig
-```
-
-* In the CFME/MIQ web interface, navigate to `Compute` →
- `Containers` → `Providers` and select `⚙ Configuration` → `⊕
- Add a new Containers Provider`
-
-*See the [upstream documentation](http://manageiq.org/docs/reference/latest/doc-Managing_Providers/miq/index.html#containers-providers) for additional information.*
-
+cause errors**. If you do receive an error during the `Ensure the CFME
+App is created` task, we recommend running the
+[uninstall scripts](#uninstall) first before running the installer
+again.
# Requirements
@@ -140,11 +124,13 @@ used in your Ansible inventory to control the behavior of this
installer.
-| Variable | Required | Default | Description |
-|------------------------------------------------|:--------:|:------------------------------:|-------------------------------------|
-| `openshift_management_project` | **No** | `openshift-management` | Namespace for the installation. |
+| Variable | Required | Default | Description |
+|------------------------------------------------------|:--------:|:------------------------------:|-------------------------------------|
+| `openshift_management_project` | **No** | `openshift-management` | Namespace for the installation. |
| `openshift_management_project_description` | **No** | *CloudForms Management Engine* | Namespace/project description. |
-| `openshift_management_install_management` | **No** | `false` | Boolean, set to `true` to install the application |
+| `openshift_management_install_management` | **No** | `false` | Boolean, set to `true` to install the application |
+| `openshift_management_username` | **No** | `admin` | Default management username. Changing this values **does not change the username**. Only change this value if you have changed the name already and are running integration scripts (such as the [add container provider](#container-provider) script) |
+| `openshift_management_password` | **No** | `smartvm` | Default management password. Changing this values **does not change the password**. Only change this value if you have changed the password already and are running integration scripts (such as the [add-container-provider](#container-provider) script) |
| **PRODUCT CHOICE** | | | | |
| `openshift_management_app_template` | **No** | `miq-template` | The project flavor to install. Choices: <ul><li>`miq-template`: ManageIQ using a podified database</li> <li> `miq-template-ext-db`: ManageIQ using an external database</li> <li>`cfme-template`: CloudForms using a podified database<sup>[1]</sup></li> <li> `cfme-template-ext-db`: CloudForms using an external database.<sup>[1]</sup></li></ul> |
| **STORAGE CLASSES** | | | | |
@@ -268,6 +254,9 @@ openshift_management_app_template=cfme-template-ext-db
openshift_management_template_parameters={'DATABASE_USER': 'root', 'DATABASE_PASSWORD': 'r1ck&M0r7y', 'DATABASE_IP': '10.10.10.10', 'DATABASE_PORT': '5432', 'DATABASE_NAME': 'cfme'}
```
+**NOTE:** Ensure your are running PostgreSQL 9.5 or you may not be
+able to deploy the app successfully.
+
# Limitations
This release is the first OpenShift CFME release in the OCP 3.7
@@ -318,6 +307,9 @@ inventory. The following keys are required:
* `DATABASE_PORT` - *note: Most PostgreSQL servers run on port `5432`*
* `DATABASE_NAME`
+**NOTE:** Ensure your are running PostgreSQL 9.5 or you may not be
+able to deploy the app successfully.
+
Your inventory would contain a line similar to this:
```ini
@@ -453,6 +445,116 @@ hash. This applies to **CloudForms** installations as well:
[cfme-template.yaml](files/templates/cloudforms/cfme-template.yaml),
[cfme-template-ext-db.yaml](files/templates/cloudforms/cfme-template-ext-db.yaml).
+# Container Provider
+
+There are two methods for enabling container provider integration. You
+can manually add OCP/Origin as a container provider, or you can try
+the playbooks included with this role.
+
+## Manually
+
+See the online documentation for steps to manually add you cluster as
+a container provider:
+
+* [Container Providers](http://manageiq.org/docs/reference/latest/doc-Managing_Providers/miq/#containers-providers)
+
+## Automatically
+
+Automated container provider integration can be accomplished using the
+playbooks included with this role.
+
+This playbook will:
+
+1. Gather the necessary authentication secrets
+1. Find the public routes to the Management app and the cluster API
+1. Make a REST call to add this cluster as a container provider
+
+
+```
+$ ansible-playbook -v -i <YOUR_INVENTORY> playbooks/byo/openshift-management/add_container_provider.yml
+```
+
+## Multiple Providers
+
+As well as providing playbooks to integrate your *current* container
+platform into the management service, this role includes a **tech
+preview** script which allows you to add multiple container platforms
+as container providers in any arbitrary MIQ/CFME server.
+
+Using the multiple-provider script requires manual configuration and
+setting an `EXTRA_VARS` parameter on the command-line.
+
+
+1. Copy the
+ [container_providers.yml](files/examples/container_providers.yml)
+ example somewhere, such as `/tmp/cp.yml`
+1. If you changed your CFME/MIQ name or password, update the
+ `hostname`, `user`, and `password` parameters in the
+ `management_server` key in the `container_providers.yml` file copy
+1. Fill in an entry under the `container_providers` key for *each* OCP
+ or Origin cluster you want to add as container providers
+
+**Parameters Which MUST Be Configured:**
+
+* `auth_key` - This is the token of a service account which has admin capabilities on the cluster.
+* `hostname` - This is the hostname that points to the cluster API. Each container provider must have a unique hostname.
+* `name` - This is the name of the cluster as displayed in the management server container providers overview. This must be unique.
+
+*Note*: You can obtain the `auth_key` bearer token from your clusters
+ with this command: `oc serviceaccounts get-token -n management-infra
+ management-admin`
+
+**Parameters Which MAY Be Configured:**
+
+* `port` - Update this key if your OCP/Origin cluster runs the API on a port other than `8443`
+* `endpoint` - You may enable SSL verification (`verify_ssl`) or change the validation setting to `ssl-with-validation`. Support for custom trusted CA certificates is not available at this time.
+
+
+Let's see an example describing the following scenario:
+
+* You copied `files/examples/container_providers.yml` to `/tmp/cp.yml`
+* You're adding two OCP clusters
+* Your management server runs on `mgmt.example.com`
+
+You would customize `/tmp/cp.yml` as such:
+
+```yaml
+---
+container_providers:
+ - connection_configurations:
+ - authentication: {auth_key: "management-token-for-this-cluster", authtype: bearer, type: AuthToken}
+ endpoint: {role: default, security_protocol: ssl-without-validation, verify_ssl: 0}
+ hostname: "ocp-prod.example.com"
+ name: OCP Production
+ port: 8443
+ type: "ManageIQ::Providers::Openshift::ContainerManager"
+ - connection_configurations:
+ - authentication: {auth_key: "management-token-for-this-cluster", authtype: bearer, type: AuthToken}
+ endpoint: {role: default, security_protocol: ssl-without-validation, verify_ssl: 0}
+ hostname: "ocp-test.example.com"
+ name: OCP Testing
+ port: 8443
+ type: "ManageIQ::Providers::Openshift::ContainerManager"
+management_server:
+ hostname: "mgmt.example.com"
+ user: admin
+ password: b3tt3r_p4SSw0rd
+```
+
+Then you will run the many-container-providers integration script. You
+**must** provide the path to the container providers configuration
+file as an `EXTRA_VARS` parameter to `ansible-playbook`. Use the `-e`
+(or `--extra-vars`) parameter to set `container_providers_config` to
+the config file path.
+
+```
+$ ansible-playbook -v -e container_providers_config=/tmp/cp.yml \
+ playbooks/byo/openshift-management/add_many_container_providers.yml
+```
+
+Afterwards you will find two new container providers in your
+management service. Navigate to `Compute` → `Containers` → `Providers`
+to see an overview.
# Uninstall
@@ -461,6 +563,11 @@ installation:
* `playbooks/byo/openshift-management/uninstall.yml`
+NFS export definitions and data stored on NFS exports are not
+automatically removed. You are urged to manually erase any data from
+old application or database deployments before attempting to
+initialize a new deployment.
+
# Additional Information
The upstream project,
diff --git a/roles/openshift_management/defaults/main.yml b/roles/openshift_management/defaults/main.yml
index ebb56313f..8ba65b386 100644
--- a/roles/openshift_management/defaults/main.yml
+++ b/roles/openshift_management/defaults/main.yml
@@ -77,6 +77,20 @@ openshift_management_storage_nfs_base_dir: /exports
openshift_management_storage_nfs_local_hostname: false
######################################################################
+# DEFAULT ACCOUNT INFORMATION
+######################################################################
+# These are the default values for the username and password of the
+# management app. Changing these values in your inventory will not
+# change your username or password. You should only need to change
+# these values in your inventory if you already changed the actual
+# name and password AND are trying to use integration scripts.
+#
+# For example, adding this cluster as a container provider,
+# playbooks/byo/openshift-management/add_container_provider.yml
+openshift_management_username: admin
+openshift_management_password: smartvm
+
+######################################################################
# SCAFFOLDING - These are parameters we pre-seed that a user may or
# may not set later
######################################################################
diff --git a/roles/openshift_management/files/examples/container_providers.yml b/roles/openshift_management/files/examples/container_providers.yml
new file mode 100644
index 000000000..661f62e4d
--- /dev/null
+++ b/roles/openshift_management/files/examples/container_providers.yml
@@ -0,0 +1,22 @@
+---
+container_providers:
+ - connection_configurations:
+ - authentication: {auth_key: "management-admin-token-here", authtype: bearer, type: AuthToken}
+ endpoint: {role: default, security_protocol: ssl-without-validation, verify_ssl: 0}
+ hostname: "OCP/Origin cluster hostname (providing API access)"
+ name: openshift-management
+ port: 8443
+ type: "ManageIQ::Providers::Openshift::ContainerManager"
+# Copy and update for as many OCP or Origin providers as you want to
+# add to your management service
+ # - connection_configurations:
+ # - authentication: {auth_key: "management-admin-token-here", authtype: bearer, type: AuthToken}
+ # endpoint: {role: default, security_protocol: ssl-without-validation, verify_ssl: 0}
+ # hostname: "OCP/Origin cluster hostname (providing API access)"
+ # name: openshift-management
+ # port: 8443
+ # type: "ManageIQ::Providers::Openshift::ContainerManager"
+management_server:
+ hostname: "Management server hostname (providing API access)"
+ user: admin
+ password: smartvm
diff --git a/roles/openshift_management/filter_plugins/oo_management_filters.py b/roles/openshift_management/filter_plugins/oo_management_filters.py
new file mode 100644
index 000000000..3b7013d9a
--- /dev/null
+++ b/roles/openshift_management/filter_plugins/oo_management_filters.py
@@ -0,0 +1,32 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+"""
+Filter methods for the management role
+"""
+
+
+def oo_filter_container_providers(results):
+ """results - the result from posting the API calls for adding new
+providers"""
+ all_results = []
+ for result in results:
+ if 'results' in result['json']:
+ # We got an OK response
+ res = result['json']['results'][0]
+ all_results.append("Provider '{}' - Added successfully".format(res['name']))
+ elif 'error' in result['json']:
+ # This was a problem
+ all_results.append("Provider '{}' - Failed to add. Message: {}".format(
+ result['item']['name'], result['json']['error']['message']))
+ return all_results
+
+
+class FilterModule(object):
+ """ Custom ansible filter mapping """
+
+ # pylint: disable=no-self-use, too-few-public-methods
+ def filters(self):
+ """ returns a mapping of filters to methods """
+ return {
+ "oo_filter_container_providers": oo_filter_container_providers,
+ }
diff --git a/roles/openshift_management/tasks/add_container_provider.yml b/roles/openshift_management/tasks/add_container_provider.yml
new file mode 100644
index 000000000..383e6edb5
--- /dev/null
+++ b/roles/openshift_management/tasks/add_container_provider.yml
@@ -0,0 +1,65 @@
+---
+- name: Ensure lib_openshift modules are available
+ include_role:
+ role: lib_openshift
+
+- name: Ensure OpenShift facts module is available
+ include_role:
+ role: openshift_facts
+
+- name: Ensure OpenShift facts are loaded
+ openshift_facts:
+
+- name: Ensure the management SA Secrets are read
+ oc_serviceaccount_secret:
+ state: list
+ service_account: management-admin
+ namespace: management-infra
+ register: sa
+
+- name: Ensure the management SA bearer token is identified
+ set_fact:
+ management_token: "{{ sa.results | oo_filter_sa_secrets }}"
+
+- name: Ensure the SA bearer token value is read
+ oc_secret:
+ state: list
+ name: "{{ management_token }}"
+ namespace: management-infra
+ decode: true
+ no_log: True
+ register: sa_secret
+
+- name: Ensure the SA bearer token value is saved
+ set_fact:
+ management_bearer_token: "{{ sa_secret.results.decoded.token }}"
+
+- name: Ensure we have the public route to the management service
+ oc_route:
+ state: list
+ name: httpd
+ namespace: openshift-management
+ register: route
+
+- name: Ensure the management service route is saved
+ set_fact:
+ management_route: "{{ route.results.0.spec.host }}"
+
+- name: Ensure this cluster is a container provider
+ uri:
+ url: "https://{{ management_route }}/api/providers"
+ body_format: json
+ method: POST
+ user: "{{ openshift_management_username }}"
+ password: "{{ openshift_management_password }}"
+ validate_certs: no
+ # Docs on formatting the BODY of the POST request:
+ # http://manageiq.org/docs/reference/latest/api/reference/providers.html#specifying-connection-configurations
+ body:
+ connection_configurations:
+ - authentication: {auth_key: "{{ management_bearer_token }}", authtype: bearer, type: AuthToken}
+ endpoint: {role: default, security_protocol: ssl-without-validation, verify_ssl: 0}
+ hostname: "{{ openshift.master.cluster_public_hostname }}"
+ name: "{{ openshift_management_project }}"
+ port: "{{ openshift.master.api_port }}"
+ type: "ManageIQ::Providers::Openshift::ContainerManager"
diff --git a/roles/openshift_management/tasks/main.yml b/roles/openshift_management/tasks/main.yml
index 86c4d0010..9be923a57 100644
--- a/roles/openshift_management/tasks/main.yml
+++ b/roles/openshift_management/tasks/main.yml
@@ -2,23 +2,33 @@
######################################################################)
# Users, projects, and privileges
-- name: Run pre-install CFME validation checks
+- name: Run pre-install Management validation checks
include: validate.yml
-- name: "Ensure the CFME '{{ openshift_management_project }}' namespace exists"
+# This creates a service account allowing Container Provider
+# integration (managing OCP/Origin via MIQ/Management)
+- name: Enable Container Provider Integration
+ include_role:
+ role: openshift_manageiq
+
+- name: "Ensure the Management '{{ openshift_management_project }}' namespace exists"
oc_project:
state: present
name: "{{ openshift_management_project }}"
display_name: "{{ openshift_management_project_description }}"
-- name: Create and Authorize CFME Accounts
+- name: Create and Authorize Management Accounts
include: accounts.yml
######################################################################
# STORAGE - Initialize basic storage class
+- name: Determine the correct NFS host if required
+ include: storage/nfs_server.yml
+ when: openshift_management_storage_class in ['nfs', 'nfs_external']
+
#---------------------------------------------------------------------
# * nfs - set up NFS shares on the first master for a proof of concept
-- name: Create required NFS exports for CFME app storage
+- name: Create required NFS exports for Management app storage
include: storage/nfs.yml
when: openshift_management_storage_class == 'nfs'
@@ -45,7 +55,7 @@
######################################################################
# APPLICATION TEMPLATE
-- name: Install the CFME app and PV templates
+- name: Install the Management app and PV templates
include: template.yml
######################################################################
@@ -71,9 +81,16 @@
when:
- openshift_management_app_template in ['miq-template', 'cfme-template']
-- name: Ensure the CFME App is created
+- name: Ensure the Management App is created
oc_process:
namespace: "{{ openshift_management_project }}"
template_name: "{{ openshift_management_template_name }}"
create: True
params: "{{ openshift_management_template_parameters }}"
+
+- name: Wait for the app to come up. May take several minutes, 30s check intervals, 10m max
+ command: "oc logs {{ openshift_management_flavor }}-0 -n {{ openshift_management_project }}"
+ register: app_seeding_logs
+ until: app_seeding_logs.stdout.find('Server starting complete') != -1
+ delay: 30
+ retries: 20
diff --git a/roles/openshift_management/tasks/noop.yml b/roles/openshift_management/tasks/noop.yml
new file mode 100644
index 000000000..ed97d539c
--- /dev/null
+++ b/roles/openshift_management/tasks/noop.yml
@@ -0,0 +1 @@
+---
diff --git a/roles/openshift_management/tasks/storage/create_nfs_pvs.yml b/roles/openshift_management/tasks/storage/create_nfs_pvs.yml
index 31c845725..d1b9a8d5c 100644
--- a/roles/openshift_management/tasks/storage/create_nfs_pvs.yml
+++ b/roles/openshift_management/tasks/storage/create_nfs_pvs.yml
@@ -26,7 +26,7 @@
when:
- openshift_management_template_parameters.DATABASE_VOLUME_CAPACITY is not defined
-- name: Check if the CFME App PV has been created
+- name: Check if the Management App PV has been created
oc_obj:
namespace: "{{ openshift_management_project }}"
state: list
@@ -34,7 +34,7 @@
name: "{{ openshift_management_flavor_short }}-app"
register: miq_app_pv_check
-- name: Check if the CFME DB PV has been created
+- name: Check if the Management DB PV has been created
oc_obj:
namespace: "{{ openshift_management_project }}"
state: list
@@ -44,7 +44,7 @@
when:
- openshift_management_app_template in ['miq-template', 'cfme-template']
-- name: Ensure the CFME App PV is created
+- name: Ensure the Management App PV is created
oc_process:
namespace: "{{ openshift_management_project }}"
template_name: "{{ openshift_management_flavor }}-app-pv"
@@ -55,7 +55,7 @@
NFS_HOST: "{{ openshift_management_nfs_server }}"
when: miq_app_pv_check.results.results == [{}]
-- name: Ensure the CFME DB PV is created
+- name: Ensure the Management DB PV is created
oc_process:
namespace: "{{ openshift_management_project }}"
template_name: "{{ openshift_management_flavor }}-db-pv"
diff --git a/roles/openshift_management/tasks/storage/nfs.yml b/roles/openshift_management/tasks/storage/nfs.yml
index 696808328..94e11137c 100644
--- a/roles/openshift_management/tasks/storage/nfs.yml
+++ b/roles/openshift_management/tasks/storage/nfs.yml
@@ -2,37 +2,6 @@
# Tasks to statically provision NFS volumes
# Include if not using dynamic volume provisioning
-- name: Ensure we save the local NFS server if one is provided
- set_fact:
- openshift_management_nfs_server: "{{ openshift_management_storage_nfs_local_hostname }}"
- when:
- - openshift_management_storage_nfs_local_hostname is defined
- - openshift_management_storage_nfs_local_hostname != False
- - openshift_management_storage_class == "nfs"
-
-- name: Ensure we save the local NFS server
- set_fact:
- openshift_management_nfs_server: "{{ groups['oo_nfs_to_config'].0 }}"
- when:
- - openshift_management_nfs_server is not defined
- - openshift_management_storage_class == "nfs"
-
-- name: Ensure we save the external NFS server
- set_fact:
- openshift_management_nfs_server: "{{ openshift_management_storage_nfs_external_hostname }}"
- when:
- - openshift_management_storage_class == "nfs_external"
-
-- name: Failed NFS server detection
- assert:
- that:
- - openshift_management_nfs_server is defined
- msg: |
- "Unable to detect an NFS server. The 'nfs_external'
- openshift_management_storage_class option requires that you set
- openshift_management_storage_nfs_external_hostname. NFS hosts detected
- for local nfs services: {{ groups['oo_nfs_to_config'] | join(', ') }}"
-
- name: Setting up NFS storage
block:
- name: Include the NFS Setup role tasks
diff --git a/roles/openshift_management/tasks/storage/nfs_server.yml b/roles/openshift_management/tasks/storage/nfs_server.yml
new file mode 100644
index 000000000..96a742c83
--- /dev/null
+++ b/roles/openshift_management/tasks/storage/nfs_server.yml
@@ -0,0 +1,31 @@
+---
+- name: Ensure we save the local NFS server if one is provided
+ set_fact:
+ openshift_management_nfs_server: "{{ openshift_management_storage_nfs_local_hostname }}"
+ when:
+ - openshift_management_storage_nfs_local_hostname is defined
+ - openshift_management_storage_nfs_local_hostname != False
+ - openshift_management_storage_class == "nfs"
+
+- name: Ensure we save the local NFS server
+ set_fact:
+ openshift_management_nfs_server: "{{ groups['oo_nfs_to_config'].0 }}"
+ when:
+ - openshift_management_nfs_server is not defined
+ - openshift_management_storage_class == "nfs"
+
+- name: Ensure we save the external NFS server
+ set_fact:
+ openshift_management_nfs_server: "{{ openshift_management_storage_nfs_external_hostname }}"
+ when:
+ - openshift_management_storage_class == "nfs_external"
+
+- name: Failed NFS server detection
+ assert:
+ that:
+ - openshift_management_nfs_server is defined
+ msg: |
+ "Unable to detect an NFS server. The 'nfs_external'
+ openshift_management_storage_class option requires that you set
+ openshift_management_storage_nfs_external_hostname. NFS hosts detected
+ for local nfs services: {{ groups['oo_nfs_to_config'] | join(', ') }}"
diff --git a/roles/openshift_management/tasks/template.yml b/roles/openshift_management/tasks/template.yml
index 299158ac4..9f97cdcb9 100644
--- a/roles/openshift_management/tasks/template.yml
+++ b/roles/openshift_management/tasks/template.yml
@@ -15,7 +15,7 @@
# STANDARD PODIFIED DATABASE TEMPLATE
- when: openshift_management_app_template in ['miq-template', 'cfme-template']
block:
- - name: Check if the CFME Server template has been created already
+ - name: Check if the Management Server template has been created already
oc_obj:
namespace: "{{ openshift_management_project }}"
state: list
@@ -25,12 +25,12 @@
- when: miq_server_check.results.results == [{}]
block:
- - name: Copy over CFME Server template
+ - name: Copy over Management Server template
copy:
src: "templates/{{ openshift_management_flavor }}/{{ openshift_management_flavor_short }}-template.yaml"
dest: "{{ template_dir }}/"
- - name: Ensure CFME Server Template is created
+ - name: Ensure Management Server Template is created
oc_obj:
namespace: "{{ openshift_management_project }}"
name: "{{ openshift_management_flavor }}"
@@ -41,9 +41,9 @@
######################################################################
# EXTERNAL DATABASE TEMPLATE
-- when: openshift_management_app_template in ['miq-template-ext-db', 'cfme-template']
+- when: openshift_management_app_template in ['miq-template-ext-db', 'cfme-template-ext-db']
block:
- - name: Check if the CFME Ext-DB Server template has been created already
+ - name: Check if the Management Ext-DB Server template has been created already
oc_obj:
namespace: "{{ openshift_management_project }}"
state: list
@@ -53,12 +53,12 @@
- when: miq_ext_db_server_check.results.results == [{}]
block:
- - name: Copy over CFME Ext-DB Server template
+ - name: Copy over Management Ext-DB Server template
copy:
src: "templates/{{ openshift_management_flavor }}/{{openshift_management_flavor_short}}-template-ext-db.yaml"
dest: "{{ template_dir }}/"
- - name: Ensure CFME Ext-DB Server Template is created
+ - name: Ensure Management Ext-DB Server Template is created
oc_obj:
namespace: "{{ openshift_management_project }}"
name: "{{ openshift_management_flavor }}-ext-db"
@@ -74,7 +74,7 @@
# Begin conditional PV template creations
# Required for the application server
-- name: Check if the CFME App PV template has been created already
+- name: Check if the Management App PV template has been created already
oc_obj:
namespace: "{{ openshift_management_project }}"
state: list
@@ -84,12 +84,12 @@
- when: miq_app_pv_check.results.results == [{}]
block:
- - name: Copy over CFME App PV template
+ - name: Copy over Management App PV template
copy:
src: "templates/{{ openshift_management_flavor }}/{{ openshift_management_flavor_short }}-pv-server-example.yaml"
dest: "{{ template_dir }}/"
- - name: Ensure CFME App PV Template is created
+ - name: Ensure Management App PV Template is created
oc_obj:
namespace: "{{ openshift_management_project }}"
name: "{{ openshift_management_flavor }}-app-pv"
@@ -103,7 +103,7 @@
# Required for database if the installation is fully podified
- when: openshift_management_app_template in ['miq-template', 'cfme-template']
block:
- - name: Check if the CFME DB PV template has been created already
+ - name: Check if the Management DB PV template has been created already
oc_obj:
namespace: "{{ openshift_management_project }}"
state: list
@@ -113,12 +113,12 @@
- when: miq_db_pv_check.results.results == [{}]
block:
- - name: Copy over CFME DB PV template
+ - name: Copy over Management DB PV template
copy:
src: "templates/{{ openshift_management_flavor }}/{{ openshift_management_flavor_short }}-pv-db-example.yaml"
dest: "{{ template_dir }}/"
- - name: Ensure CFME DB PV Template is created
+ - name: Ensure Management DB PV Template is created
oc_obj:
namespace: "{{ openshift_management_project }}"
name: "{{ openshift_management_flavor }}-db-pv"
diff --git a/roles/openshift_master/tasks/journald.yml b/roles/openshift_master/tasks/journald.yml
index f79955e95..e2edd5ef4 100644
--- a/roles/openshift_master/tasks/journald.yml
+++ b/roles/openshift_master/tasks/journald.yml
@@ -3,6 +3,11 @@
stat: path=/etc/systemd/journald.conf
register: journald_conf_file
+- name: Create journald persistence directories
+ file:
+ path: /var/log/journal
+ state: directory
+
- name: Update journald setup
replace:
dest: /etc/systemd/journald.conf
diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml
index d0bc79c0c..48b34c578 100644
--- a/roles/openshift_master/tasks/main.yml
+++ b/roles/openshift_master/tasks/main.yml
@@ -297,14 +297,13 @@
- openshift.master.cluster_method == 'native'
- master_api_service_status_changed | bool
-- name: Start and enable master controller on first master
+- name: Start and enable master controller service
systemd:
name: "{{ openshift.common.service_type }}-master-controllers"
enabled: yes
state: started
when:
- openshift.master.cluster_method == 'native'
- - inventory_hostname == openshift_master_hosts[0]
register: l_start_result
until: not l_start_result | failed
retries: 1
@@ -315,31 +314,8 @@
when:
- l_start_result | failed
-- name: Wait for master controller service to start on first master
- pause:
- seconds: 15
- when:
- - openshift.master.cluster_method == 'native'
-
-- name: Start and enable master controller on all masters
- systemd:
- name: "{{ openshift.common.service_type }}-master-controllers"
- enabled: yes
- state: started
- when:
- - openshift.master.cluster_method == 'native'
- - inventory_hostname != openshift_master_hosts[0]
- register: l_start_result
- until: not l_start_result | failed
- retries: 1
- delay: 60
-
-- name: Dump logs from master-controllers if it failed
- command: journalctl --no-pager -n 100 -u {{ openshift.common.service_type }}-master-controllers
- when:
- - l_start_result | failed
-
-- set_fact:
+- name: Set fact master_controllers_service_status_changed
+ set_fact:
master_controllers_service_status_changed: "{{ l_start_result | changed }}"
when:
- openshift.master.cluster_method == 'native'
diff --git a/roles/openshift_master/templates/master.yaml.v1.j2 b/roles/openshift_master/templates/master.yaml.v1.j2
index 40775571f..a1a0bfaa9 100644
--- a/roles/openshift_master/templates/master.yaml.v1.j2
+++ b/roles/openshift_master/templates/master.yaml.v1.j2
@@ -179,6 +179,11 @@ masterPublicURL: {{ openshift.master.public_api_url }}
networkConfig:
clusterNetworkCIDR: {{ openshift.master.sdn_cluster_network_cidr }}
hostSubnetLength: {{ openshift.master.sdn_host_subnet_length }}
+{% if openshift.common.version_gte_3_7 | bool %}
+ clusterNetworks:
+ - cidr: {{ openshift.master.sdn_cluster_network_cidr }}
+ hostSubnetLength: {{ openshift.master.sdn_host_subnet_length }}
+{% endif %}
{% if r_openshift_master_use_openshift_sdn or r_openshift_master_use_nuage or r_openshift_master_use_contiv or r_openshift_master_use_kuryr or r_openshift_master_sdn_network_plugin_name == 'cni' %}
networkPluginName: {{ r_openshift_master_sdn_network_plugin_name_default }}
{% endif %}
diff --git a/roles/openshift_nfs/tasks/create_export.yml b/roles/openshift_nfs/tasks/create_export.yml
index 39323904f..b0b888d56 100644
--- a/roles/openshift_nfs/tasks/create_export.yml
+++ b/roles/openshift_nfs/tasks/create_export.yml
@@ -12,7 +12,7 @@
# l_nfs_export_name: Name of sub-directory of the export
# l_nfs_options: Mount Options
-- name: Ensure CFME App NFS export directory exists
+- name: "Ensure {{ l_nfs_export_name }} NFS export directory exists"
file:
path: "{{ l_nfs_base_dir }}/{{ l_nfs_export_name }}"
state: directory
diff --git a/roles/openshift_node/tasks/bootstrap.yml b/roles/openshift_node/tasks/bootstrap.yml
index 8c03f6c41..2deb005da 100644
--- a/roles/openshift_node/tasks/bootstrap.yml
+++ b/roles/openshift_node/tasks/bootstrap.yml
@@ -25,7 +25,7 @@
state: "{{ item.state | default('present') }}"
with_items:
# add the kubeconfig
- - line: "KUBECONFIG=/etc/origin/node/csr_kubeconfig"
+ - line: "KUBECONFIG=/etc/origin/node/bootstrap.kubeconfig"
regexp: "^KUBECONFIG=.*"
# remove the config file. This comes from openshift_facts
- regexp: "^CONFIG_FILE=.*"
diff --git a/roles/openshift_node_dnsmasq/defaults/main.yml b/roles/openshift_node_dnsmasq/defaults/main.yml
index eae832fcf..ebcff46b5 100644
--- a/roles/openshift_node_dnsmasq/defaults/main.yml
+++ b/roles/openshift_node_dnsmasq/defaults/main.yml
@@ -1,2 +1,7 @@
---
openshift_node_dnsmasq_install_network_manager_hook: true
+
+# lo must always be present in this list or dnsmasq will conflict with
+# the node's dns service.
+openshift_node_dnsmasq_except_interfaces:
+- lo
diff --git a/roles/openshift_node_dnsmasq/templates/origin-dns.conf.j2 b/roles/openshift_node_dnsmasq/templates/origin-dns.conf.j2
index ef3ba2880..5c9601277 100644
--- a/roles/openshift_node_dnsmasq/templates/origin-dns.conf.j2
+++ b/roles/openshift_node_dnsmasq/templates/origin-dns.conf.j2
@@ -4,4 +4,7 @@ no-negcache
max-cache-ttl=1
enable-dbus
bind-interfaces
-listen-address={{ openshift.node.dns_ip }}
+{% for interface in openshift_node_dnsmasq_except_interfaces %}
+except-interface={{ interface }}
+{% endfor %}
+# End of config
diff --git a/roles/openshift_sanitize_inventory/tasks/main.yml b/roles/openshift_sanitize_inventory/tasks/main.yml
index 74c1a51a8..a6c168bc7 100644
--- a/roles/openshift_sanitize_inventory/tasks/main.yml
+++ b/roles/openshift_sanitize_inventory/tasks/main.yml
@@ -23,6 +23,8 @@
# TODO: once this is well-documented, add deprecation notice if using old name.
deployment_type: "{{ openshift_deployment_type | default(deployment_type) | default | string }}"
openshift_deployment_type: "{{ openshift_deployment_type | default(deployment_type) | default | string }}"
+ deployment_subtype: "{{ openshift_deployment_subtype | default(deployment_subtype) | default('basic') | string }}"
+ openshift_deployment_subtype: "{{ openshift_deployment_subtype | default(deployment_subtype) | default('basic') | string }}"
- name: Abort when deployment type is invalid
# this variable is required; complain early and clearly if it is invalid.
@@ -45,7 +47,7 @@
- name: Abort when openshift_release is invalid
when:
- openshift_release is defined
- - not openshift_release | match('\d+(\.\d+){1,3}$')
+ - not openshift_release | match('^\d+(\.\d+){1,3}$')
fail:
msg: |-
openshift_release is "{{ openshift_release }}" which is not a valid version string.
diff --git a/roles/openshift_service_catalog/templates/api_server.j2 b/roles/openshift_service_catalog/templates/api_server.j2
index 5d5352c1c..0e5bb7230 100644
--- a/roles/openshift_service_catalog/templates/api_server.j2
+++ b/roles/openshift_service_catalog/templates/api_server.j2
@@ -24,6 +24,7 @@ spec:
{% endfor %}
containers:
- args:
+ - apiserver
- --storage-type
- etcd
- --secure-port
@@ -45,7 +46,7 @@ spec:
- --feature-gates
- OriginatingIdentity=true
image: {{ openshift_service_catalog_image_prefix }}service-catalog:{{ openshift_service_catalog_image_version }}
- command: ["/usr/bin/apiserver"]
+ command: ["/usr/bin/service-catalog"]
imagePullPolicy: Always
name: apiserver
ports:
diff --git a/roles/openshift_service_catalog/templates/controller_manager.j2 b/roles/openshift_service_catalog/templates/controller_manager.j2
index 2272cbb44..e5e5f6b50 100644
--- a/roles/openshift_service_catalog/templates/controller_manager.j2
+++ b/roles/openshift_service_catalog/templates/controller_manager.j2
@@ -29,6 +29,7 @@ spec:
fieldRef:
fieldPath: metadata.namespace
args:
+ - controller-manager
- -v
- "5"
- --leader-election-namespace
@@ -38,7 +39,7 @@ spec:
- --feature-gates
- OriginatingIdentity=true
image: {{ openshift_service_catalog_image_prefix }}service-catalog:{{ openshift_service_catalog_image_version }}
- command: ["/usr/bin/controller-manager"]
+ command: ["/usr/bin/service-catalog"]
imagePullPolicy: Always
name: controller-manager
ports:
diff --git a/roles/openshift_storage_glusterfs/files/v1.5/deploy-heketi-template.yml b/roles/openshift_storage_glusterfs/files/v1.5/deploy-heketi-template.yml
new file mode 100644
index 000000000..7b705c2d4
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/files/v1.5/deploy-heketi-template.yml
@@ -0,0 +1,135 @@
+---
+kind: Template
+apiVersion: v1
+metadata:
+ name: deploy-heketi
+ labels:
+ glusterfs: heketi-template
+ deploy-heketi: support
+ annotations:
+ description: Bootstrap Heketi installation
+ tags: glusterfs,heketi,installation
+objects:
+- kind: Service
+ apiVersion: v1
+ metadata:
+ name: deploy-heketi-${CLUSTER_NAME}
+ labels:
+ glusterfs: deploy-heketi-${CLUSTER_NAME}-service
+ deploy-heketi: support
+ annotations:
+ description: Exposes Heketi service
+ spec:
+ ports:
+ - name: deploy-heketi-${CLUSTER_NAME}
+ port: 8080
+ targetPort: 8080
+ selector:
+ glusterfs: deploy-heketi-${CLUSTER_NAME}-pod
+- kind: Route
+ apiVersion: v1
+ metadata:
+ name: ${HEKETI_ROUTE}
+ labels:
+ glusterfs: deploy-heketi-${CLUSTER_NAME}-route
+ deploy-heketi: support
+ spec:
+ to:
+ kind: Service
+ name: deploy-heketi-${CLUSTER_NAME}
+- kind: DeploymentConfig
+ apiVersion: v1
+ metadata:
+ name: deploy-heketi-${CLUSTER_NAME}
+ labels:
+ glusterfs: deploy-heketi-${CLUSTER_NAME}-dc
+ deploy-heketi: support
+ annotations:
+ description: Defines how to deploy Heketi
+ spec:
+ replicas: 1
+ selector:
+ glusterfs: deploy-heketi-${CLUSTER_NAME}-pod
+ triggers:
+ - type: ConfigChange
+ strategy:
+ type: Recreate
+ template:
+ metadata:
+ name: deploy-heketi
+ labels:
+ glusterfs: deploy-heketi-${CLUSTER_NAME}-pod
+ deploy-heketi: support
+ spec:
+ serviceAccountName: heketi-${CLUSTER_NAME}-service-account
+ containers:
+ - name: heketi
+ image: ${IMAGE_NAME}:${IMAGE_VERSION}
+ env:
+ - name: HEKETI_USER_KEY
+ value: ${HEKETI_USER_KEY}
+ - name: HEKETI_ADMIN_KEY
+ value: ${HEKETI_ADMIN_KEY}
+ - name: HEKETI_EXECUTOR
+ value: ${HEKETI_EXECUTOR}
+ - name: HEKETI_FSTAB
+ value: /var/lib/heketi/fstab
+ - name: HEKETI_SNAPSHOT_LIMIT
+ value: '14'
+ - name: HEKETI_KUBE_GLUSTER_DAEMONSET
+ value: '1'
+ - name: HEKETI_KUBE_NAMESPACE
+ value: ${HEKETI_KUBE_NAMESPACE}
+ ports:
+ - containerPort: 8080
+ volumeMounts:
+ - name: db
+ mountPath: /var/lib/heketi
+ - name: config
+ mountPath: /etc/heketi
+ readinessProbe:
+ timeoutSeconds: 3
+ initialDelaySeconds: 3
+ httpGet:
+ path: /hello
+ port: 8080
+ livenessProbe:
+ timeoutSeconds: 3
+ initialDelaySeconds: 30
+ httpGet:
+ path: /hello
+ port: 8080
+ volumes:
+ - name: db
+ - name: config
+ secret:
+ secretName: heketi-${CLUSTER_NAME}-config-secret
+parameters:
+- name: HEKETI_USER_KEY
+ displayName: Heketi User Secret
+ description: Set secret for those creating volumes as type _user_
+- name: HEKETI_ADMIN_KEY
+ displayName: Heketi Administrator Secret
+ description: Set secret for administration of the Heketi service as user _admin_
+- name: HEKETI_EXECUTOR
+ displayName: heketi executor type
+ description: Set the executor type, kubernetes or ssh
+ value: kubernetes
+- name: HEKETI_KUBE_NAMESPACE
+ displayName: Namespace
+ description: Set the namespace where the GlusterFS pods reside
+ value: default
+- name: HEKETI_ROUTE
+ displayName: heketi route name
+ description: Set the hostname for the route URL
+ value: "heketi-glusterfs"
+- name: IMAGE_NAME
+ displayName: heketi container image name
+ required: True
+- name: IMAGE_VERSION
+ displayName: heketi container image version
+ required: True
+- name: CLUSTER_NAME
+ displayName: GlusterFS cluster name
+ description: A unique name to identify this heketi service, useful for running multiple heketi instances
+ value: glusterfs
diff --git a/roles/openshift_storage_glusterfs/files/v1.5/glusterfs-template.yml b/roles/openshift_storage_glusterfs/files/v1.5/glusterfs-template.yml
new file mode 100644
index 000000000..8c5e1ded3
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/files/v1.5/glusterfs-template.yml
@@ -0,0 +1,136 @@
+---
+kind: Template
+apiVersion: v1
+metadata:
+ name: glusterfs
+ labels:
+ glusterfs: template
+ annotations:
+ description: GlusterFS DaemonSet template
+ tags: glusterfs
+objects:
+- kind: DaemonSet
+ apiVersion: extensions/v1beta1
+ metadata:
+ name: glusterfs-${CLUSTER_NAME}
+ labels:
+ glusterfs: ${CLUSTER_NAME}-daemonset
+ annotations:
+ description: GlusterFS DaemonSet
+ tags: glusterfs
+ spec:
+ selector:
+ matchLabels:
+ glusterfs: ${CLUSTER_NAME}-pod
+ template:
+ metadata:
+ name: glusterfs-${CLUSTER_NAME}
+ labels:
+ glusterfs: ${CLUSTER_NAME}-pod
+ glusterfs-node: pod
+ spec:
+ nodeSelector: "${{NODE_LABELS}}"
+ hostNetwork: true
+ containers:
+ - name: glusterfs
+ image: ${IMAGE_NAME}:${IMAGE_VERSION}
+ imagePullPolicy: IfNotPresent
+ volumeMounts:
+ - name: glusterfs-heketi
+ mountPath: "/var/lib/heketi"
+ - name: glusterfs-run
+ mountPath: "/run"
+ - name: glusterfs-lvm
+ mountPath: "/run/lvm"
+ - name: glusterfs-etc
+ mountPath: "/etc/glusterfs"
+ - name: glusterfs-logs
+ mountPath: "/var/log/glusterfs"
+ - name: glusterfs-config
+ mountPath: "/var/lib/glusterd"
+ - name: glusterfs-dev
+ mountPath: "/dev"
+ - name: glusterfs-misc
+ mountPath: "/var/lib/misc/glusterfsd"
+ - name: glusterfs-cgroup
+ mountPath: "/sys/fs/cgroup"
+ readOnly: true
+ - name: glusterfs-ssl
+ mountPath: "/etc/ssl"
+ readOnly: true
+ securityContext:
+ capabilities: {}
+ privileged: true
+ readinessProbe:
+ timeoutSeconds: 3
+ initialDelaySeconds: 40
+ exec:
+ command:
+ - "/bin/bash"
+ - "-c"
+ - systemctl status glusterd.service
+ periodSeconds: 25
+ successThreshold: 1
+ failureThreshold: 15
+ livenessProbe:
+ timeoutSeconds: 3
+ initialDelaySeconds: 40
+ exec:
+ command:
+ - "/bin/bash"
+ - "-c"
+ - systemctl status glusterd.service
+ periodSeconds: 25
+ successThreshold: 1
+ failureThreshold: 15
+ resources: {}
+ terminationMessagePath: "/dev/termination-log"
+ volumes:
+ - name: glusterfs-heketi
+ hostPath:
+ path: "/var/lib/heketi"
+ - name: glusterfs-run
+ emptyDir: {}
+ - name: glusterfs-lvm
+ hostPath:
+ path: "/run/lvm"
+ - name: glusterfs-etc
+ hostPath:
+ path: "/etc/glusterfs"
+ - name: glusterfs-logs
+ hostPath:
+ path: "/var/log/glusterfs"
+ - name: glusterfs-config
+ hostPath:
+ path: "/var/lib/glusterd"
+ - name: glusterfs-dev
+ hostPath:
+ path: "/dev"
+ - name: glusterfs-misc
+ hostPath:
+ path: "/var/lib/misc/glusterfsd"
+ - name: glusterfs-cgroup
+ hostPath:
+ path: "/sys/fs/cgroup"
+ - name: glusterfs-ssl
+ hostPath:
+ path: "/etc/ssl"
+ restartPolicy: Always
+ terminationGracePeriodSeconds: 30
+ dnsPolicy: ClusterFirst
+ securityContext: {}
+parameters:
+- name: NODE_LABELS
+ displayName: Daemonset Node Labels
+ description: Labels which define the daemonset node selector. Must contain at least one label of the format \'glusterfs=<CLUSTER_NAME>-host\'
+ value: '{ "glusterfs": "storage-host" }'
+- name: IMAGE_NAME
+ displayName: GlusterFS container image name
+ required: True
+- name: IMAGE_VERSION
+ displayName: GlusterFS container image version
+ required: True
+- name: CLUSTER_NAME
+ displayName: GlusterFS cluster name
+ description: A unique name to identify which heketi service manages this cluster, useful for running multiple heketi instances
+ value: storage
diff --git a/roles/openshift_storage_glusterfs/files/v1.5/heketi-template.yml b/roles/openshift_storage_glusterfs/files/v1.5/heketi-template.yml
new file mode 100644
index 000000000..61b6a8c13
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/files/v1.5/heketi-template.yml
@@ -0,0 +1,134 @@
+---
+kind: Template
+apiVersion: v1
+metadata:
+ name: heketi
+ labels:
+ glusterfs: heketi-template
+ annotations:
+ description: Heketi service deployment template
+ tags: glusterfs,heketi
+objects:
+- kind: Service
+ apiVersion: v1
+ metadata:
+ name: heketi-${CLUSTER_NAME}
+ labels:
+ glusterfs: heketi-${CLUSTER_NAME}-service
+ annotations:
+ description: Exposes Heketi service
+ spec:
+ ports:
+ - name: heketi
+ port: 8080
+ targetPort: 8080
+ selector:
+ glusterfs: heketi-${CLUSTER_NAME}-pod
+- kind: Route
+ apiVersion: v1
+ metadata:
+ name: ${HEKETI_ROUTE}
+ labels:
+ glusterfs: heketi-${CLUSTER_NAME}-route
+ spec:
+ to:
+ kind: Service
+ name: heketi-${CLUSTER_NAME}
+- kind: DeploymentConfig
+ apiVersion: v1
+ metadata:
+ name: heketi-${CLUSTER_NAME}
+ labels:
+ glusterfs: heketi-${CLUSTER_NAME}-dc
+ annotations:
+ description: Defines how to deploy Heketi
+ spec:
+ replicas: 1
+ selector:
+ glusterfs: heketi-${CLUSTER_NAME}-pod
+ triggers:
+ - type: ConfigChange
+ strategy:
+ type: Recreate
+ template:
+ metadata:
+ name: heketi-${CLUSTER_NAME}
+ labels:
+ glusterfs: heketi-${CLUSTER_NAME}-pod
+ spec:
+ serviceAccountName: heketi-${CLUSTER_NAME}-service-account
+ containers:
+ - name: heketi
+ image: ${IMAGE_NAME}:${IMAGE_VERSION}
+ imagePullPolicy: IfNotPresent
+ env:
+ - name: HEKETI_USER_KEY
+ value: ${HEKETI_USER_KEY}
+ - name: HEKETI_ADMIN_KEY
+ value: ${HEKETI_ADMIN_KEY}
+ - name: HEKETI_EXECUTOR
+ value: ${HEKETI_EXECUTOR}
+ - name: HEKETI_FSTAB
+ value: /var/lib/heketi/fstab
+ - name: HEKETI_SNAPSHOT_LIMIT
+ value: '14'
+ - name: HEKETI_KUBE_GLUSTER_DAEMONSET
+ value: '1'
+ - name: HEKETI_KUBE_NAMESPACE
+ value: ${HEKETI_KUBE_NAMESPACE}
+ ports:
+ - containerPort: 8080
+ volumeMounts:
+ - name: db
+ mountPath: /var/lib/heketi
+ - name: config
+ mountPath: /etc/heketi
+ readinessProbe:
+ timeoutSeconds: 3
+ initialDelaySeconds: 3
+ httpGet:
+ path: /hello
+ port: 8080
+ livenessProbe:
+ timeoutSeconds: 3
+ initialDelaySeconds: 30
+ httpGet:
+ path: /hello
+ port: 8080
+ volumes:
+ - name: db
+ glusterfs:
+ endpoints: heketi-db-${CLUSTER_NAME}-endpoints
+ path: heketidbstorage
+ - name: config
+ secret:
+ secretName: heketi-${CLUSTER_NAME}-config-secret
+parameters:
+- name: HEKETI_USER_KEY
+ displayName: Heketi User Secret
+ description: Set secret for those creating volumes as type _user_
+- name: HEKETI_ADMIN_KEY
+ displayName: Heketi Administrator Secret
+ description: Set secret for administration of the Heketi service as user _admin_
+- name: HEKETI_EXECUTOR
+ displayName: heketi executor type
+ description: Set the executor type, kubernetes or ssh
+ value: kubernetes
+- name: HEKETI_KUBE_NAMESPACE
+ displayName: Namespace
+ description: Set the namespace where the GlusterFS pods reside
+ value: default
+- name: HEKETI_ROUTE
+ displayName: heketi route name
+ description: Set the hostname for the route URL
+ value: "heketi-glusterfs"
+- name: IMAGE_NAME
+ displayName: heketi container image name
+ required: True
+- name: IMAGE_VERSION
+ displayName: heketi container image version
+ required: True
+- name: CLUSTER_NAME
+ displayName: GlusterFS cluster name
+ description: A unique name to identify this heketi service, useful for running multiple heketi instances
+ value: glusterfs
diff --git a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml
index 074904bec..54a6dd7c3 100644
--- a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml
+++ b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml
@@ -1,6 +1,6 @@
---
- name: Create heketi DB volume
- command: "{{ glusterfs_heketi_client }} setup-openshift-heketi-storage --image {{ glusterfs_heketi_image}}:{{ glusterfs_heketi_version }} --listfile /tmp/heketi-storage.json"
+ command: "{{ glusterfs_heketi_client }} setup-openshift-heketi-storage --listfile /tmp/heketi-storage.json"
register: setup_storage
- name: Copy heketi-storage list
diff --git a/roles/openshift_storage_glusterfs/tasks/kernel_modules.yml b/roles/openshift_storage_glusterfs/tasks/kernel_modules.yml
new file mode 100644
index 000000000..030fa81c9
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/tasks/kernel_modules.yml
@@ -0,0 +1,12 @@
+---
+- name: Ensure device mapper modules loaded
+ template:
+ src: glusterfs.conf
+ dest: /etc/modules-load.d/glusterfs.conf
+ register: km
+
+- name: load kernel modules
+ systemd:
+ name: systemd-modules-load.service
+ state: restarted
+ when: km | changed
diff --git a/roles/openshift_storage_glusterfs/templates/glusterfs.conf b/roles/openshift_storage_glusterfs/templates/glusterfs.conf
new file mode 100644
index 000000000..dd4d6e6f7
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/templates/glusterfs.conf
@@ -0,0 +1,4 @@
+#{{ ansible_managed }}
+dm_thin_pool
+dm_snapshot
+dm_mirror \ No newline at end of file
diff --git a/roles/openshift_storage_glusterfs/templates/v1.5/glusterfs-registry-endpoints.yml.j2 b/roles/openshift_storage_glusterfs/templates/v1.5/glusterfs-registry-endpoints.yml.j2
new file mode 100644
index 000000000..11c9195bb
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/templates/v1.5/glusterfs-registry-endpoints.yml.j2
@@ -0,0 +1,12 @@
+---
+apiVersion: v1
+kind: Endpoints
+metadata:
+ name: glusterfs-{{ glusterfs_name }}-endpoints
+subsets:
+- addresses:
+{% for node in glusterfs_nodes %}
+ - ip: {{ hostvars[node].glusterfs_ip | default(hostvars[node].openshift.common.ip) }}
+{% endfor %}
+ ports:
+ - port: 1
diff --git a/roles/openshift_storage_glusterfs/templates/v1.5/glusterfs-registry-service.yml.j2 b/roles/openshift_storage_glusterfs/templates/v1.5/glusterfs-registry-service.yml.j2
new file mode 100644
index 000000000..3f869d2b7
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/templates/v1.5/glusterfs-registry-service.yml.j2
@@ -0,0 +1,10 @@
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: glusterfs-{{ glusterfs_name }}-endpoints
+spec:
+ ports:
+ - port: 1
+status:
+ loadBalancer: {}
diff --git a/roles/openshift_storage_glusterfs/templates/v1.5/glusterfs-storageclass.yml.j2 b/roles/openshift_storage_glusterfs/templates/v1.5/glusterfs-storageclass.yml.j2
new file mode 100644
index 000000000..454e84aaf
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/templates/v1.5/glusterfs-storageclass.yml.j2
@@ -0,0 +1,13 @@
+---
+apiVersion: storage.k8s.io/v1beta1
+kind: StorageClass
+metadata:
+ name: glusterfs-{{ glusterfs_name }}
+provisioner: kubernetes.io/glusterfs
+parameters:
+ resturl: "http://{% if glusterfs_heketi_is_native %}{{ glusterfs_heketi_route }}{% else %}{{ glusterfs_heketi_url }}:{{ glusterfs_heketi_port }}{% endif %}"
+ restuser: "admin"
+{% if glusterfs_heketi_admin_key is defined %}
+ secretNamespace: "{{ glusterfs_namespace }}"
+ secretName: "heketi-{{ glusterfs_name }}-admin-secret"
+{%- endif -%}
diff --git a/roles/openshift_storage_glusterfs/templates/v1.5/heketi-endpoints.yml.j2 b/roles/openshift_storage_glusterfs/templates/v1.5/heketi-endpoints.yml.j2
new file mode 100644
index 000000000..99cbdf748
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/templates/v1.5/heketi-endpoints.yml.j2
@@ -0,0 +1,12 @@
+---
+apiVersion: v1
+kind: Endpoints
+metadata:
+ name: heketi-db-{{ glusterfs_name }}-endpoints
+subsets:
+- addresses:
+{% for node in glusterfs_nodes %}
+ - ip: {{ hostvars[node].glusterfs_ip | default(hostvars[node].openshift.common.ip) }}
+{% endfor %}
+ ports:
+ - port: 1
diff --git a/roles/openshift_storage_glusterfs/templates/v1.5/heketi-service.yml.j2 b/roles/openshift_storage_glusterfs/templates/v1.5/heketi-service.yml.j2
new file mode 100644
index 000000000..dcb896441
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/templates/v1.5/heketi-service.yml.j2
@@ -0,0 +1,10 @@
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: heketi-db-{{ glusterfs_name }}-endpoints
+spec:
+ ports:
+ - port: 1
+status:
+ loadBalancer: {}
diff --git a/roles/openshift_storage_glusterfs/templates/v1.5/heketi.json.j2 b/roles/openshift_storage_glusterfs/templates/v1.5/heketi.json.j2
new file mode 100644
index 000000000..579b11bb7
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/templates/v1.5/heketi.json.j2
@@ -0,0 +1,36 @@
+{
+ "_port_comment": "Heketi Server Port Number",
+ "port" : "8080",
+
+ "_use_auth": "Enable JWT authorization. Please enable for deployment",
+ "use_auth" : false,
+
+ "_jwt" : "Private keys for access",
+ "jwt" : {
+ "_admin" : "Admin has access to all APIs",
+ "admin" : {
+ "key" : "My Secret"
+ },
+ "_user" : "User only has access to /volumes endpoint",
+ "user" : {
+ "key" : "My Secret"
+ }
+ },
+
+ "_glusterfs_comment": "GlusterFS Configuration",
+ "glusterfs" : {
+
+ "_executor_comment": "Execute plugin. Possible choices: mock, kubernetes, ssh",
+ "executor" : "{{ glusterfs_heketi_executor }}",
+
+ "_db_comment": "Database file name",
+ "db" : "/var/lib/heketi/heketi.db",
+
+ "sshexec" : {
+ "keyfile" : "/etc/heketi/private_key",
+ "port" : "{{ glusterfs_heketi_ssh_port }}",
+ "user" : "{{ glusterfs_heketi_ssh_user }}",
+ "sudo" : {{ glusterfs_heketi_ssh_sudo | lower }}
+ }
+ }
+}
diff --git a/roles/openshift_storage_glusterfs/templates/v1.5/topology.json.j2 b/roles/openshift_storage_glusterfs/templates/v1.5/topology.json.j2
new file mode 100644
index 000000000..d6c28f6dd
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/templates/v1.5/topology.json.j2
@@ -0,0 +1,49 @@
+{
+ "clusters": [
+{%- set clusters = {} -%}
+{%- for node in glusterfs_nodes -%}
+ {%- set cluster = hostvars[node].glusterfs_cluster if 'glusterfs_cluster' in node else '1' -%}
+ {%- if cluster in clusters -%}
+ {%- set _dummy = clusters[cluster].append(node) -%}
+ {%- else -%}
+ {%- set _dummy = clusters.update({cluster: [ node, ]}) -%}
+ {%- endif -%}
+{%- endfor -%}
+{%- for cluster in clusters -%}
+ {
+ "nodes": [
+{%- for node in clusters[cluster] -%}
+ {
+ "node": {
+ "hostnames": {
+ "manage": [
+{%- if 'glusterfs_hostname' in hostvars[node] -%}
+ "{{ hostvars[node].glusterfs_hostname }}"
+{%- elif 'openshift' in hostvars[node] -%}
+ "{{ hostvars[node].openshift.node.nodename }}"
+{%- else -%}
+ "{{ node }}"
+{%- endif -%}
+ ],
+ "storage": [
+{%- if 'glusterfs_ip' in hostvars[node] -%}
+ "{{ hostvars[node].glusterfs_ip }}"
+{%- else -%}
+ "{{ hostvars[node].openshift.common.ip }}"
+{%- endif -%}
+ ]
+ },
+ "zone": {{ hostvars[node].glusterfs_zone | default(1) }}
+ },
+ "devices": [
+{%- for device in hostvars[node].glusterfs_devices -%}
+ "{{ device }}"{% if not loop.last %},{% endif %}
+{%- endfor -%}
+ ]
+ }{% if not loop.last %},{% endif %}
+{%- endfor -%}
+ ]
+ }{% if not loop.last %},{% endif %}
+{%- endfor -%}
+ ]
+}
diff --git a/roles/openshift_version/defaults/main.yml b/roles/openshift_version/defaults/main.yml
index 53d10f1f8..01a1a7472 100644
--- a/roles/openshift_version/defaults/main.yml
+++ b/roles/openshift_version/defaults/main.yml
@@ -1,3 +1,2 @@
---
openshift_protect_installed_version: True
-version_install_base_package: False
diff --git a/roles/openshift_version/tasks/main.yml b/roles/openshift_version/tasks/main.yml
index f4e9ff43a..1c8b9046c 100644
--- a/roles/openshift_version/tasks/main.yml
+++ b/roles/openshift_version/tasks/main.yml
@@ -5,16 +5,6 @@
is_containerized: "{{ openshift.common.is_containerized | default(False) | bool }}"
is_atomic: "{{ openshift.common.is_atomic | default(False) | bool }}"
-# This is only needed on masters and nodes; version_install_base_package
-# should be set by a play externally.
-- name: Install the base package for versioning
- package:
- name: "{{ openshift.common.service_type }}{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }}"
- state: present
- when:
- - not is_containerized | bool
- - version_install_base_package | bool
-
# Block attempts to install origin without specifying some kind of version information.
# This is because the latest tags for origin are usually alpha builds, which should not
# be used by default. Users must indicate what they want.