summaryrefslogtreecommitdiffstats
path: root/inventory
diff options
context:
space:
mode:
authorScott Dodson <sdodson@redhat.com>2017-10-18 07:47:40 -0400
committerGitHub <noreply@github.com>2017-10-18 07:47:40 -0400
commit5ff1e1bec64ea99830003a7f7d44de0ac1e65a57 (patch)
tree2b8d7ea06be949aadab22b7a09db35263ad7c28b /inventory
parente007bc8a9b397331f008586dcc3b2ee7d29978ad (diff)
parent418b742c365ef8dce4e14f9486ea658495029df3 (diff)
downloadopenshift-5ff1e1bec64ea99830003a7f7d44de0ac1e65a57.tar.gz
openshift-5ff1e1bec64ea99830003a7f7d44de0ac1e65a57.tar.bz2
openshift-5ff1e1bec64ea99830003a7f7d44de0ac1e65a57.tar.xz
openshift-5ff1e1bec64ea99830003a7f7d44de0ac1e65a57.zip
Merge pull request #5459 from zgalor/nfs_fix
Fix prometheus role nfs
Diffstat (limited to 'inventory')
-rw-r--r--inventory/byo/hosts.example65
-rw-r--r--inventory/byo/hosts.origin.example900
2 files changed, 965 insertions, 0 deletions
diff --git a/inventory/byo/hosts.example b/inventory/byo/hosts.example
index 436135bcf..499a9d8e7 100644
--- a/inventory/byo/hosts.example
+++ b/inventory/byo/hosts.example
@@ -615,6 +615,71 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
#openshift_logging_image_prefix=registry.access.redhat.com/openshift3/
#openshift_logging_image_version=3.7.0
+# Prometheus deployment
+#
+# Currently prometheus deployment is disabled by default, enable it by setting this
+#openshift_hosted_prometheus_deploy=true
+#
+# Prometheus storage config
+# Option A - NFS Host Group
+# An NFS volume will be created with path "nfs_directory/volume_name"
+# on the host within the [nfs] host group. For example, the volume
+# path using these options would be "/exports/prometheus"
+#openshift_prometheus_storage_kind=nfs
+#openshift_prometheus_storage_access_modes=['ReadWriteOnce']
+#openshift_prometheus_storage_nfs_directory=/exports
+#openshift_prometheus_storage_nfs_options='*(rw,root_squash)'
+#openshift_prometheus_storage_volume_name=prometheus
+#openshift_prometheus_storage_volume_size=10Gi
+#openshift_prometheus_storage_labels={'storage': 'prometheus'}
+# For prometheus-alertmanager
+#openshift_prometheus_alertmanager_storage_kind=nfs
+#openshift_prometheus_alertmanager_storage_access_modes=['ReadWriteOnce']
+#openshift_prometheus_alertmanager_storage_nfs_directory=/exports
+#openshift_prometheus_alertmanager_storage_nfs_options='*(rw,root_squash)'
+#openshift_prometheus_alertmanager_storage_volume_name=prometheus-alertmanager
+#openshift_prometheus_alertmanager_storage_volume_size=10Gi
+#openshift_prometheus_alertmanager_storage_labels={'storage': 'prometheus-alertmanager'}
+# For prometheus-alertbuffer
+#openshift_prometheus_alertbuffer_storage_kind=nfs
+#openshift_prometheus_alertbuffer_storage_access_modes=['ReadWriteOnce']
+#openshift_prometheus_alertbuffer_storage_nfs_directory=/exports
+#openshift_prometheus_alertbuffer_storage_nfs_options='*(rw,root_squash)'
+#openshift_prometheus_alertbuffer_storage_volume_name=prometheus-alertbuffer
+#openshift_prometheus_alertbuffer_storage_volume_size=10Gi
+#openshift_prometheus_alertbuffer_storage_labels={'storage': 'prometheus-alertbuffer'}
+#
+# Option B - External NFS Host
+# NFS volume must already exist with path "nfs_directory/_volume_name" on
+# the storage_host. For example, the remote volume path using these
+# options would be "nfs.example.com:/exports/prometheus"
+#openshift_prometheus_storage_kind=nfs
+#openshift_prometheus_storage_access_modes=['ReadWriteOnce']
+#openshift_prometheus_storage_host=nfs.example.com
+#openshift_prometheus_storage_nfs_directory=/exports
+#openshift_prometheus_storage_volume_name=prometheus
+#openshift_prometheus_storage_volume_size=10Gi
+#openshift_prometheus_storage_labels={'storage': 'prometheus'}
+# For prometheus-alertmanager
+#openshift_prometheus_alertmanager_storage_kind=nfs
+#openshift_prometheus_alertmanager_storage_access_modes=['ReadWriteOnce']
+#openshift_prometheus_alertmanager_storage_host=nfs.example.com
+#openshift_prometheus_alertmanager_storage_nfs_directory=/exports
+#openshift_prometheus_alertmanager_storage_volume_name=prometheus-alertmanager
+#openshift_prometheus_alertmanager_storage_volume_size=10Gi
+#openshift_prometheus_alertmanager_storage_labels={'storage': 'prometheus-alertmanager'}
+# For prometheus-alertbuffer
+#openshift_prometheus_alertbuffer_storage_kind=nfs
+#openshift_prometheus_alertbuffer_storage_access_modes=['ReadWriteOnce']
+#openshift_prometheus_alertbuffer_storage_host=nfs.example.com
+#openshift_prometheus_alertbuffer_storage_nfs_directory=/exports
+#openshift_prometheus_alertbuffer_storage_volume_name=prometheus-alertbuffer
+#openshift_prometheus_alertbuffer_storage_volume_size=10Gi
+#openshift_prometheus_alertbuffer_storage_labels={'storage': 'prometheus-alertbuffer'}
+#
+# Option C - none -- Prometheus, alertmanager and alertbuffer will use emptydir volumes
+# which are destroyed when pods are deleted
+
# Configure the multi-tenant SDN plugin (default is 'redhat/openshift-ovs-subnet')
# os_sdn_network_plugin_name='redhat/openshift-ovs-multitenant'
diff --git a/inventory/byo/hosts.origin.example b/inventory/byo/hosts.origin.example
new file mode 100644
index 000000000..9d811fcab
--- /dev/null
+++ b/inventory/byo/hosts.origin.example
@@ -0,0 +1,900 @@
+# This is an example of a bring your own (byo) host inventory
+
+# Create an OSEv3 group that contains the masters and nodes groups
+[OSEv3:children]
+masters
+nodes
+etcd
+lb
+nfs
+
+# Set variables common for all OSEv3 hosts
+[OSEv3:vars]
+# Enable unsupported configurations, things that will yield a partially
+# functioning cluster but would not be supported for production use
+#openshift_enable_unsupported_configurations=false
+
+# SSH user, this user should allow ssh based auth without requiring a
+# password. If using ssh key based auth, then the key should be managed by an
+# ssh agent.
+ansible_ssh_user=root
+
+# If ansible_ssh_user is not root, ansible_become must be set to true and the
+# user must be configured for passwordless sudo
+#ansible_become=yes
+
+# Debug level for all OpenShift components (Defaults to 2)
+debug_level=2
+
+# Specify the deployment type. Valid values are origin and openshift-enterprise.
+openshift_deployment_type=origin
+
+# Specify the generic release of OpenShift to install. This is used mainly just during installation, after which we
+# rely on the version running on the first master. Works best for containerized installs where we can usually
+# use this to lookup the latest exact version of the container images, which is the tag actually used to configure
+# the cluster. For RPM installations we just verify the version detected in your configured repos matches this
+# release.
+openshift_release=v3.7
+
+# Specify an exact container image tag to install or configure.
+# WARNING: This value will be used for all hosts in containerized environments, even those that have another version installed.
+# This could potentially trigger an upgrade and downtime, so be careful with modifying this value after the cluster is set up.
+#openshift_image_tag=v3.7.0
+
+# Specify an exact rpm version to install or configure.
+# WARNING: This value will be used for all hosts in RPM based environments, even those that have another version installed.
+# This could potentially trigger an upgrade and downtime, so be careful with modifying this value after the cluster is set up.
+#openshift_pkg_version=-3.7.0
+
+# This enables all the system containers except for docker:
+#openshift_use_system_containers=False
+#
+# But you can choose separately each component that must be a
+# system container:
+#
+#openshift_use_openvswitch_system_container=False
+#openshift_use_node_system_container=False
+#openshift_use_master_system_container=False
+#openshift_use_etcd_system_container=False
+#
+# In either case, system_images_registry must be specified to be able to find the system images
+#system_images_registry="docker.io"
+
+# Install the openshift examples
+#openshift_install_examples=true
+
+# Configure logoutURL in the master config for console customization
+# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#changing-the-logout-url
+#openshift_master_logout_url=http://example.com
+
+# Configure extensionScripts in the master config for console customization
+# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#loading-custom-scripts-and-stylesheets
+#openshift_master_extension_scripts=['/path/to/script1.js','/path/to/script2.js']
+
+# Configure extensionStylesheets in the master config for console customization
+# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#loading-custom-scripts-and-stylesheets
+#openshift_master_extension_stylesheets=['/path/to/stylesheet1.css','/path/to/stylesheet2.css']
+
+# Configure extensions in the master config for console customization
+# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#serving-static-files
+#openshift_master_extensions=[{'name': 'images', 'sourceDirectory': '/path/to/my_images'}]
+
+# Configure extensions in the master config for console customization
+# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#serving-static-files
+#openshift_master_oauth_template=/path/to/login-template.html
+
+# Configure imagePolicyConfig in the master config
+# See: https://godoc.org/github.com/openshift/origin/pkg/cmd/server/api#ImagePolicyConfig
+#openshift_master_image_policy_config={"maxImagesBulkImportedPerRepository": 3, "disableScheduledImport": true}
+
+# Configure master API rate limits for external clients
+#openshift_master_external_ratelimit_qps=200
+#openshift_master_external_ratelimit_burst=400
+# Configure master API rate limits for loopback clients
+#openshift_master_loopback_ratelimit_qps=300
+#openshift_master_loopback_ratelimit_burst=600
+
+# Docker Configuration
+# Add additional, insecure, and blocked registries to global docker configuration
+# For enterprise deployment types we ensure that registry.access.redhat.com is
+# included if you do not include it
+#openshift_docker_additional_registries=registry.example.com
+#openshift_docker_insecure_registries=registry.example.com
+#openshift_docker_blocked_registries=registry.hacker.com
+# Disable pushing to dockerhub
+#openshift_docker_disable_push_dockerhub=True
+# Use Docker inside a System Container. Note that this is a tech preview and should
+# not be used to upgrade!
+# The following options for docker are ignored:
+# - docker_version
+# - docker_upgrade
+# The following options must not be used
+# - openshift_docker_options
+#openshift_docker_use_system_container=False
+# Instead of using docker, replacec it with cri-o
+# NOTE: This uses openshift_docker_systemcontainer_image_registry_override as it's override
+# just as container-engine does.
+#openshift_use_crio=False
+# Force the registry to use for the docker/crio system container. By default the registry
+# will be built off of the deployment type and ansible_distribution. Only
+# use this option if you are sure you know what you are doing!
+#openshift_docker_systemcontainer_image_override="registry.example.com/container-engine:latest"
+#openshift_crio_systemcontainer_image_override="registry.example.com/cri-o:latest"
+# Items added, as is, to end of /etc/sysconfig/docker OPTIONS
+# Default value: "--log-driver=journald"
+#openshift_docker_options="-l warn --ipv6=false"
+
+# Specify exact version of Docker to configure or upgrade to.
+# Downgrades are not supported and will error out. Be careful when upgrading docker from < 1.10 to > 1.10.
+# docker_version="1.12.1"
+
+# Specify whether to run Docker daemon with SELinux enabled in containers. Default is True.
+# Uncomment below to disable; for example if your kernel does not support the
+# Docker overlay/overlay2 storage drivers with SELinux enabled.
+#openshift_docker_selinux_enabled=False
+
+# Skip upgrading Docker during an OpenShift upgrade, leaves the current Docker version alone.
+# docker_upgrade=False
+
+# Specify exact version of etcd to configure or upgrade to.
+# etcd_version="3.1.0"
+# Enable etcd debug logging, defaults to false
+# etcd_debug=true
+# Set etcd log levels by package
+# etcd_log_package_levels="etcdserver=WARNING,security=DEBUG"
+
+# Upgrade Hooks
+#
+# Hooks are available to run custom tasks at various points during a cluster
+# upgrade. Each hook should point to a file with Ansible tasks defined. Suggest using
+# absolute paths, if not the path will be treated as relative to the file where the
+# hook is actually used.
+#
+# Tasks to run before each master is upgraded.
+# openshift_master_upgrade_pre_hook=/usr/share/custom/pre_master.yml
+#
+# Tasks to run to upgrade the master. These tasks run after the main openshift-ansible
+# upgrade steps, but before we restart system/services.
+# openshift_master_upgrade_hook=/usr/share/custom/master.yml
+#
+# Tasks to run after each master is upgraded and system/services have been restarted.
+# openshift_master_upgrade_post_hook=/usr/share/custom/post_master.yml
+
+
+# Alternate image format string, useful if you've got your own registry mirror
+# Configure this setting just on node or master
+#oreg_url_master=example.com/openshift3/ose-${component}:${version}
+#oreg_url_node=example.com/openshift3/ose-${component}:${version}
+# For setting the configuration globally
+#oreg_url=example.com/openshift3/ose-${component}:${version}
+# If oreg_url points to a registry other than registry.access.redhat.com we can
+# modify image streams to point at that registry by setting the following to true
+#openshift_examples_modify_imagestreams=true
+
+# OpenShift repository configuration
+#openshift_additional_repos=[{'id': 'openshift-origin-copr', 'name': 'OpenShift Origin COPR', 'baseurl': 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/epel-7-$basearch/', 'enabled': 1, 'gpgcheck': 1, 'gpgkey': 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/pubkey.gpg'}]
+#openshift_repos_enable_testing=false
+
+# htpasswd auth
+openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider', 'filename': '/etc/origin/master/htpasswd'}]
+# Defining htpasswd users
+#openshift_master_htpasswd_users={'user1': '<pre-hashed password>', 'user2': '<pre-hashed password>'}
+# or
+#openshift_master_htpasswd_file=<path to local pre-generated htpasswd file>
+
+# Allow all auth
+#openshift_master_identity_providers=[{'name': 'allow_all', 'login': 'true', 'challenge': 'true', 'kind': 'AllowAllPasswordIdentityProvider'}]
+
+# LDAP auth
+#openshift_master_identity_providers=[{'name': 'my_ldap_provider', 'challenge': 'true', 'login': 'true', 'kind': 'LDAPPasswordIdentityProvider', 'attributes': {'id': ['dn'], 'email': ['mail'], 'name': ['cn'], 'preferredUsername': ['uid']}, 'bindDN': '', 'bindPassword': '', 'ca': 'my-ldap-ca.crt', 'insecure': 'false', 'url': 'ldap://ldap.example.com:389/ou=users,dc=example,dc=com?uid'}]
+#
+# Configure LDAP CA certificate
+# Specify either the ASCII contents of the certificate or the path to
+# the local file that will be copied to the remote host. CA
+# certificate contents will be copied to master systems and saved
+# within /etc/origin/master/ with a filename matching the "ca" key set
+# within the LDAPPasswordIdentityProvider.
+#
+#openshift_master_ldap_ca=<ca text>
+# or
+#openshift_master_ldap_ca_file=<path to local ca file to use>
+
+# OpenID auth
+#openshift_master_identity_providers=[{"name": "openid_auth", "login": "true", "challenge": "false", "kind": "OpenIDIdentityProvider", "client_id": "my_client_id", "client_secret": "my_client_secret", "claims": {"id": ["sub"], "preferredUsername": ["preferred_username"], "name": ["name"], "email": ["email"]}, "urls": {"authorize": "https://myidp.example.com/oauth2/authorize", "token": "https://myidp.example.com/oauth2/token"}, "ca": "my-openid-ca-bundle.crt"}]
+#
+# Configure OpenID CA certificate
+# Specify either the ASCII contents of the certificate or the path to
+# the local file that will be copied to the remote host. CA
+# certificate contents will be copied to master systems and saved
+# within /etc/origin/master/ with a filename matching the "ca" key set
+# within the OpenIDIdentityProvider.
+#
+#openshift_master_openid_ca=<ca text>
+# or
+#openshift_master_openid_ca_file=<path to local ca file to use>
+
+# Request header auth
+#openshift_master_identity_providers=[{"name": "my_request_header_provider", "challenge": "true", "login": "true", "kind": "RequestHeaderIdentityProvider", "challengeURL": "https://www.example.com/challenging-proxy/oauth/authorize?${query}", "loginURL": "https://www.example.com/login-proxy/oauth/authorize?${query}", "clientCA": "my-request-header-ca.crt", "clientCommonNames": ["my-auth-proxy"], "headers": ["X-Remote-User", "SSO-User"], "emailHeaders": ["X-Remote-User-Email"], "nameHeaders": ["X-Remote-User-Display-Name"], "preferredUsernameHeaders": ["X-Remote-User-Login"]}]
+#
+# Configure request header CA certificate
+# Specify either the ASCII contents of the certificate or the path to
+# the local file that will be copied to the remote host. CA
+# certificate contents will be copied to master systems and saved
+# within /etc/origin/master/ with a filename matching the "clientCA"
+# key set within the RequestHeaderIdentityProvider.
+#
+#openshift_master_request_header_ca=<ca text>
+# or
+#openshift_master_request_header_ca_file=<path to local ca file to use>
+
+# CloudForms Management Engine (ManageIQ) App Install
+#
+# Enables installation of MIQ server. Recommended for dedicated
+# clusters only. See roles/openshift_cfme/README.md for instructions
+# and requirements.
+#openshift_cfme_install_app=False
+
+# Cloud Provider Configuration
+#
+# Note: You may make use of environment variables rather than store
+# sensitive configuration within the ansible inventory.
+# For example:
+#openshift_cloudprovider_aws_access_key="{{ lookup('env','AWS_ACCESS_KEY_ID') }}"
+#openshift_cloudprovider_aws_secret_key="{{ lookup('env','AWS_SECRET_ACCESS_KEY') }}"
+#
+# AWS
+#openshift_cloudprovider_kind=aws
+# Note: IAM profiles may be used instead of storing API credentials on disk.
+#openshift_cloudprovider_aws_access_key=aws_access_key_id
+#openshift_cloudprovider_aws_secret_key=aws_secret_access_key
+#
+# Openstack
+#openshift_cloudprovider_kind=openstack
+#openshift_cloudprovider_openstack_auth_url=http://openstack.example.com:35357/v2.0/
+#openshift_cloudprovider_openstack_username=username
+#openshift_cloudprovider_openstack_password=password
+#openshift_cloudprovider_openstack_domain_id=domain_id
+#openshift_cloudprovider_openstack_domain_name=domain_name
+#openshift_cloudprovider_openstack_tenant_id=tenant_id
+#openshift_cloudprovider_openstack_tenant_name=tenant_name
+#openshift_cloudprovider_openstack_region=region
+#openshift_cloudprovider_openstack_lb_subnet_id=subnet_id
+#
+# GCE
+#openshift_cloudprovider_kind=gce
+
+# Project Configuration
+#osm_project_request_message=''
+#osm_project_request_template=''
+#osm_mcs_allocator_range='s0:/2'
+#osm_mcs_labels_per_project=5
+#osm_uid_allocator_range='1000000000-1999999999/10000'
+
+# Configure additional projects
+#openshift_additional_projects={'my-project': {'default_node_selector': 'label=value'}}
+
+# Enable cockpit
+#osm_use_cockpit=true
+#
+# Set cockpit plugins
+#osm_cockpit_plugins=['cockpit-kubernetes']
+
+# Native high availability cluster method with optional load balancer.
+# If no lb group is defined, the installer assumes that a load balancer has
+# been preconfigured. For installation the value of
+# openshift_master_cluster_hostname must resolve to the load balancer
+# or to one or all of the masters defined in the inventory if no load
+# balancer is present.
+#openshift_master_cluster_method=native
+#openshift_master_cluster_hostname=openshift-ansible.test.example.com
+#openshift_master_cluster_public_hostname=openshift-ansible.test.example.com
+
+# Pacemaker high availability cluster method.
+# Pacemaker HA environment must be able to self provision the
+# configured VIP. For installation openshift_master_cluster_hostname
+# must resolve to the configured VIP.
+#openshift_master_cluster_method=pacemaker
+#openshift_master_cluster_password=openshift_cluster
+#openshift_master_cluster_vip=192.168.133.25
+#openshift_master_cluster_public_vip=192.168.133.25
+#openshift_master_cluster_hostname=openshift-ansible.test.example.com
+#openshift_master_cluster_public_hostname=openshift-ansible.test.example.com
+
+# Override the default controller lease ttl
+#osm_controller_lease_ttl=30
+
+# Configure controller arguments
+#osm_controller_args={'resource-quota-sync-period': ['10s']}
+
+# Configure api server arguments
+#osm_api_server_args={'max-requests-inflight': ['400']}
+
+# default subdomain to use for exposed routes
+#openshift_master_default_subdomain=apps.test.example.com
+
+# additional cors origins
+#osm_custom_cors_origins=['foo.example.com', 'bar.example.com']
+
+# default project node selector
+#osm_default_node_selector='region=primary'
+
+# Override the default pod eviction timeout
+#openshift_master_pod_eviction_timeout=5m
+
+# Override the default oauth tokenConfig settings:
+# openshift_master_access_token_max_seconds=86400
+# openshift_master_auth_token_max_seconds=500
+
+# Override master servingInfo.maxRequestsInFlight
+#openshift_master_max_requests_inflight=500
+
+# Override master and node servingInfo.minTLSVersion and .cipherSuites
+# valid TLS versions are VersionTLS10, VersionTLS11, VersionTLS12
+# example cipher suites override, valid cipher suites are https://golang.org/pkg/crypto/tls/#pkg-constants
+#openshift_master_min_tls_version=VersionTLS12
+#openshift_master_cipher_suites=['TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256', '...']
+#
+#openshift_node_min_tls_version=VersionTLS12
+#openshift_node_cipher_suites=['TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256', '...']
+
+# default storage plugin dependencies to install, by default the ceph and
+# glusterfs plugin dependencies will be installed, if available.
+#osn_storage_plugin_deps=['ceph','glusterfs','iscsi']
+
+# OpenShift Router Options
+#
+# An OpenShift router will be created during install if there are
+# nodes present with labels matching the default router selector,
+# "region=infra". Set openshift_node_labels per node as needed in
+# order to label nodes.
+#
+# Example:
+# [nodes]
+# node.example.com openshift_node_labels="{'region': 'infra'}"
+#
+# Router selector (optional)
+# Router will only be created if nodes matching this label are present.
+# Default value: 'region=infra'
+#openshift_hosted_router_selector='region=infra'
+#
+# Router replicas (optional)
+# Unless specified, openshift-ansible will calculate the replica count
+# based on the number of nodes matching the openshift router selector.
+#openshift_hosted_router_replicas=2
+#
+# Router force subdomain (optional)
+# A router path format to force on all routes used by this router
+# (will ignore the route host value)
+#openshift_hosted_router_force_subdomain='${name}-${namespace}.apps.example.com'
+#
+# Router certificate (optional)
+# Provide local certificate paths which will be configured as the
+# router's default certificate.
+#openshift_hosted_router_certificate={"certfile": "/path/to/router.crt", "keyfile": "/path/to/router.key", "cafile": "/path/to/router-ca.crt"}
+#
+# Manage the OpenShift Router
+#openshift_hosted_manage_router=true
+#
+# Router sharding support has been added and can be achieved by supplying the correct
+# data to the inventory. The variable to house the data is openshift_hosted_routers
+# and is in the form of a list. If no data is passed then a default router will be
+# created. There are multiple combinations of router sharding. The one described
+# below supports routers on separate nodes.
+#
+#openshift_hosted_routers=[{'name': 'router1', 'certificate': {'certfile': '/path/to/certificate/abc.crt', 'keyfile': '/path/to/certificate/abc.key', 'cafile': '/path/to/certificate/ca.crt'}, 'replicas': 1, 'serviceaccount': 'router', 'namespace': 'default', 'stats_port': 1936, 'edits': [], 'images': 'openshift3/ose-${component}:${version}', 'selector': 'type=router1', 'ports': ['80:80', '443:443']}, {'name': 'router2', 'certificate': {'certfile': '/path/to/certificate/xyz.crt', 'keyfile': '/path/to/certificate/xyz.key', 'cafile': '/path/to/certificate/ca.crt'}, 'replicas': 1, 'serviceaccount': 'router', 'namespace': 'default', 'stats_port': 1936, 'edits': [{'action': 'append', 'key': 'spec.template.spec.containers[0].env', 'value': {'name': 'ROUTE_LABELS', 'value': 'route=external'}}], 'images': 'openshift3/ose-${component}:${version}', 'selector': 'type=router2', 'ports': ['80:80', '443:443']}]
+
+# OpenShift Registry Console Options
+# Override the console image prefix for enterprise deployments, not used in origin
+# default is "registry.access.redhat.com/openshift3/" and the image appended is "registry-console"
+#openshift_cockpit_deployer_prefix=registry.example.com/myrepo/
+# Override image version, defaults to latest for origin, matches the product version for enterprise
+#openshift_cockpit_deployer_version=1.4.1
+
+# Openshift Registry Options
+#
+# An OpenShift registry will be created during install if there are
+# nodes present with labels matching the default registry selector,
+# "region=infra". Set openshift_node_labels per node as needed in
+# order to label nodes.
+#
+# Example:
+# [nodes]
+# node.example.com openshift_node_labels="{'region': 'infra'}"
+#
+# Registry selector (optional)
+# Registry will only be created if nodes matching this label are present.
+# Default value: 'region=infra'
+#openshift_hosted_registry_selector='region=infra'
+#
+# Registry replicas (optional)
+# Unless specified, openshift-ansible will calculate the replica count
+# based on the number of nodes matching the openshift registry selector.
+#openshift_hosted_registry_replicas=2
+#
+# Validity of the auto-generated certificate in days (optional)
+#openshift_hosted_registry_cert_expire_days=730
+#
+# Manage the OpenShift Registry
+#openshift_hosted_manage_registry=true
+
+# Registry Storage Options
+#
+# NFS Host Group
+# An NFS volume will be created with path "nfs_directory/volume_name"
+# on the host within the [nfs] host group. For example, the volume
+# path using these options would be "/exports/registry"
+#openshift_hosted_registry_storage_kind=nfs
+#openshift_hosted_registry_storage_access_modes=['ReadWriteMany']
+#openshift_hosted_registry_storage_nfs_directory=/exports
+#openshift_hosted_registry_storage_nfs_options='*(rw,root_squash)'
+#openshift_hosted_registry_storage_volume_name=registry
+#openshift_hosted_registry_storage_volume_size=10Gi
+#
+# External NFS Host
+# NFS volume must already exist with path "nfs_directory/_volume_name" on
+# the storage_host. For example, the remote volume path using these
+# options would be "nfs.example.com:/exports/registry"
+#openshift_hosted_registry_storage_kind=nfs
+#openshift_hosted_registry_storage_access_modes=['ReadWriteMany']
+#openshift_hosted_registry_storage_host=nfs.example.com
+#openshift_hosted_registry_storage_nfs_directory=/exports
+#openshift_hosted_registry_storage_volume_name=registry
+#openshift_hosted_registry_storage_volume_size=10Gi
+#
+# Openstack
+# Volume must already exist.
+#openshift_hosted_registry_storage_kind=openstack
+#openshift_hosted_registry_storage_access_modes=['ReadWriteOnce']
+#openshift_hosted_registry_storage_openstack_filesystem=ext4
+#openshift_hosted_registry_storage_openstack_volumeID=3a650b4f-c8c5-4e0a-8ca5-eaee11f16c57
+#openshift_hosted_registry_storage_volume_size=10Gi
+#
+# AWS S3
+# S3 bucket must already exist.
+#openshift_hosted_registry_storage_kind=object
+#openshift_hosted_registry_storage_provider=s3
+#openshift_hosted_registry_storage_s3_encrypt=false
+#openshift_hosted_registry_storage_s3_kmskeyid=aws_kms_key_id
+#openshift_hosted_registry_storage_s3_accesskey=aws_access_key_id
+#openshift_hosted_registry_storage_s3_secretkey=aws_secret_access_key
+#openshift_hosted_registry_storage_s3_bucket=bucket_name
+#openshift_hosted_registry_storage_s3_region=bucket_region
+#openshift_hosted_registry_storage_s3_chunksize=26214400
+#openshift_hosted_registry_storage_s3_rootdirectory=/registry
+#openshift_hosted_registry_pullthrough=true
+#openshift_hosted_registry_acceptschema2=true
+#openshift_hosted_registry_enforcequota=true
+#
+# Any S3 service (Minio, ExoScale, ...): Basically the same as above
+# but with regionendpoint configured
+# S3 bucket must already exist.
+#openshift_hosted_registry_storage_kind=object
+#openshift_hosted_registry_storage_provider=s3
+#openshift_hosted_registry_storage_s3_accesskey=access_key_id
+#openshift_hosted_registry_storage_s3_secretkey=secret_access_key
+#openshift_hosted_registry_storage_s3_regionendpoint=https://myendpoint.example.com/
+#openshift_hosted_registry_storage_s3_bucket=bucket_name
+#openshift_hosted_registry_storage_s3_region=bucket_region
+#openshift_hosted_registry_storage_s3_chunksize=26214400
+#openshift_hosted_registry_storage_s3_rootdirectory=/registry
+#openshift_hosted_registry_pullthrough=true
+#openshift_hosted_registry_acceptschema2=true
+#openshift_hosted_registry_enforcequota=true
+#
+# Additional CloudFront Options. When using CloudFront all three
+# of the followingg variables must be defined.
+#openshift_hosted_registry_storage_s3_cloudfront_baseurl=https://myendpoint.cloudfront.net/
+#openshift_hosted_registry_storage_s3_cloudfront_privatekeyfile=/full/path/to/secret.pem
+#openshift_hosted_registry_storage_s3_cloudfront_keypairid=yourpairid
+
+# Metrics deployment
+# See: https://docs.openshift.com/enterprise/latest/install_config/cluster_metrics.html
+#
+# By default metrics are not automatically deployed, set this to enable them
+#openshift_metrics_install_metrics=true
+#
+# Storage Options
+# If openshift_metrics_storage_kind is unset then metrics will be stored
+# in an EmptyDir volume and will be deleted when the cassandra pod terminates.
+# Storage options A & B currently support only one cassandra pod which is
+# generally enough for up to 1000 pods. Additional volumes can be created
+# manually after the fact and metrics scaled per the docs.
+#
+# Option A - NFS Host Group
+# An NFS volume will be created with path "nfs_directory/volume_name"
+# on the host within the [nfs] host group. For example, the volume
+# path using these options would be "/exports/metrics"
+#openshift_metrics_storage_kind=nfs
+#openshift_metrics_storage_access_modes=['ReadWriteOnce']
+#openshift_metrics_storage_nfs_directory=/exports
+#openshift_metrics_storage_nfs_options='*(rw,root_squash)'
+#openshift_metrics_storage_volume_name=metrics
+#openshift_metrics_storage_volume_size=10Gi
+#openshift_metrics_storage_labels={'storage': 'metrics'}
+#
+# Option B - External NFS Host
+# NFS volume must already exist with path "nfs_directory/_volume_name" on
+# the storage_host. For example, the remote volume path using these
+# options would be "nfs.example.com:/exports/metrics"
+#openshift_metrics_storage_kind=nfs
+#openshift_metrics_storage_access_modes=['ReadWriteOnce']
+#openshift_metrics_storage_host=nfs.example.com
+#openshift_metrics_storage_nfs_directory=/exports
+#openshift_metrics_storage_volume_name=metrics
+#openshift_metrics_storage_volume_size=10Gi
+#openshift_metrics_storage_labels={'storage': 'metrics'}
+#
+# Option C - Dynamic -- If openshift supports dynamic volume provisioning for
+# your cloud platform use this.
+#openshift_metrics_storage_kind=dynamic
+#
+# Other Metrics Options -- Common items you may wish to reconfigure, for the complete
+# list of options please see roles/openshift_metrics/README.md
+#
+# Override metricsPublicURL in the master config for cluster metrics
+# Defaults to https://hawkular-metrics.{{openshift_master_default_subdomain}}/hawkular/metrics
+# Currently, you may only alter the hostname portion of the url, alterting the
+# `/hawkular/metrics` path will break installation of metrics.
+#openshift_metrics_hawkular_hostname=https://hawkular-metrics.example.com/hawkular/metrics
+# Configure the prefix and version for the component images
+#openshift_metrics_image_prefix=docker.io/openshift/origin-
+#openshift_metrics_image_version=v3.7.0
+#
+# StorageClass
+# openshift_storageclass_name=gp2
+# openshift_storageclass_parameters={'type': 'gp2', 'encrypted': 'false'}
+#
+
+# Logging deployment
+#
+# Currently logging deployment is disabled by default, enable it by setting this
+#openshift_logging_install_logging=true
+#
+# Logging storage config
+# Option A - NFS Host Group
+# An NFS volume will be created with path "nfs_directory/volume_name"
+# on the host within the [nfs] host group. For example, the volume
+# path using these options would be "/exports/logging"
+#openshift_logging_storage_kind=nfs
+#openshift_logging_storage_access_modes=['ReadWriteOnce']
+#openshift_logging_storage_nfs_directory=/exports
+#openshift_logging_storage_nfs_options='*(rw,root_squash)'
+#openshift_logging_storage_volume_name=logging
+#openshift_logging_storage_volume_size=10Gi
+#openshift_logging_storage_labels={'storage': 'logging'}
+#
+# Option B - External NFS Host
+# NFS volume must already exist with path "nfs_directory/_volume_name" on
+# the storage_host. For example, the remote volume path using these
+# options would be "nfs.example.com:/exports/logging"
+#openshift_logging_storage_kind=nfs
+#openshift_logging_storage_access_modes=['ReadWriteOnce']
+#openshift_logging_storage_host=nfs.example.com
+#openshift_logging_storage_nfs_directory=/exports
+#openshift_logging_storage_volume_name=logging
+#openshift_logging_storage_volume_size=10Gi
+#openshift_logging_storage_labels={'storage': 'logging'}
+#
+# Option C - Dynamic -- If openshift supports dynamic volume provisioning for
+# your cloud platform use this.
+#openshift_logging_storage_kind=dynamic
+#
+# Option D - none -- Logging will use emptydir volumes which are destroyed when
+# pods are deleted
+#
+# Other Logging Options -- Common items you may wish to reconfigure, for the complete
+# list of options please see roles/openshift_logging/README.md
+#
+# Configure loggingPublicURL in the master config for aggregate logging, defaults
+# to kibana.{{ openshift_master_default_subdomain }}
+#openshift_logging_kibana_hostname=logging.apps.example.com
+# Configure the number of elastic search nodes, unless you're using dynamic provisioning
+# this value must be 1
+#openshift_logging_es_cluster_size=1
+# Configure the prefix and version for the component images
+#openshift_logging_image_prefix=docker.io/openshift/origin-
+#openshift_logging_image_version=v3.7.0
+
+# Configure the multi-tenant SDN plugin (default is 'redhat/openshift-ovs-subnet')
+# os_sdn_network_plugin_name='redhat/openshift-ovs-multitenant'
+
+# Disable the OpenShift SDN plugin
+# openshift_use_openshift_sdn=False
+
+# Configure SDN cluster network and kubernetes service CIDR blocks. These
+# network blocks should be private and should not conflict with network blocks
+# in your infrastructure that pods may require access to. Can not be changed
+# after deployment.
+#
+# WARNING : Do not pick subnets that overlap with the default Docker bridge subnet of
+# 172.17.0.0/16. Your installation will fail and/or your configuration change will
+# cause the Pod SDN or Cluster SDN to fail.
+#
+# WORKAROUND : If you must use an overlapping subnet, you can configure a non conflicting
+# docker0 CIDR range by adding '--bip=192.168.2.1/24' to DOCKER_NETWORK_OPTIONS
+# environment variable located in /etc/sysconfig/docker-network.
+# When upgrading or scaling up the following must match whats in your master config!
+# Inventory: master yaml field
+# osm_cluster_network_cidr: clusterNetworkCIDR
+# openshift_portal_net: serviceNetworkCIDR
+# When installing osm_cluster_network_cidr and openshift_portal_net must be set.
+# Sane examples are provided below.
+#osm_cluster_network_cidr=10.128.0.0/14
+#openshift_portal_net=172.30.0.0/16
+
+# ExternalIPNetworkCIDRs controls what values are acceptable for the
+# service external IP field. If empty, no externalIP may be set. It
+# may contain a list of CIDRs which are checked for access. If a CIDR
+# is prefixed with !, IPs in that CIDR will be rejected. Rejections
+# will be applied first, then the IP checked against one of the
+# allowed CIDRs. You should ensure this range does not overlap with
+# your nodes, pods, or service CIDRs for security reasons.
+#openshift_master_external_ip_network_cidrs=['0.0.0.0/0']
+
+# IngressIPNetworkCIDR controls the range to assign ingress IPs from for
+# services of type LoadBalancer on bare metal. If empty, ingress IPs will not
+# be assigned. It may contain a single CIDR that will be allocated from. For
+# security reasons, you should ensure that this range does not overlap with
+# the CIDRs reserved for external IPs, nodes, pods, or services.
+#openshift_master_ingress_ip_network_cidr=172.46.0.0/16
+
+# Configure number of bits to allocate to each host’s subnet e.g. 9
+# would mean a /23 network on the host.
+# When upgrading or scaling up the following must match whats in your master config!
+# Inventory: master yaml field
+# osm_host_subnet_length: hostSubnetLength
+# When installing osm_host_subnet_length must be set. A sane example is provided below.
+#osm_host_subnet_length=9
+
+# Configure master API and console ports.
+#openshift_master_api_port=8443
+#openshift_master_console_port=8443
+
+# set RPM version for debugging purposes
+#openshift_pkg_version=-1.1
+
+# Configure custom ca certificate
+#openshift_master_ca_certificate={'certfile': '/path/to/ca.crt', 'keyfile': '/path/to/ca.key'}
+#
+# NOTE: CA certificate will not be replaced with existing clusters.
+# This option may only be specified when creating a new cluster or
+# when redeploying cluster certificates with the redeploy-certificates
+# playbook.
+
+# Configure custom named certificates (SNI certificates)
+#
+# https://docs.openshift.org/latest/install_config/certificate_customization.html
+#
+# NOTE: openshift_master_named_certificates is cached on masters and is an
+# additive fact, meaning that each run with a different set of certificates
+# will add the newly provided certificates to the cached set of certificates.
+#
+# An optional CA may be specified for each named certificate. CAs will
+# be added to the OpenShift CA bundle which allows for the named
+# certificate to be served for internal cluster communication.
+#
+# If you would like openshift_master_named_certificates to be overwritten with
+# the provided value, specify openshift_master_overwrite_named_certificates.
+#openshift_master_overwrite_named_certificates=true
+#
+# Provide local certificate paths which will be deployed to masters
+#openshift_master_named_certificates=[{"certfile": "/path/to/custom1.crt", "keyfile": "/path/to/custom1.key", "cafile": "/path/to/custom-ca1.crt"}]
+#
+# Detected names may be overridden by specifying the "names" key
+#openshift_master_named_certificates=[{"certfile": "/path/to/custom1.crt", "keyfile": "/path/to/custom1.key", "names": ["public-master-host.com"], "cafile": "/path/to/custom-ca1.crt"}]
+
+# Session options
+#openshift_master_session_name=ssn
+#openshift_master_session_max_seconds=3600
+
+# An authentication and encryption secret will be generated if secrets
+# are not provided. If provided, openshift_master_session_auth_secrets
+# and openshift_master_encryption_secrets must be equal length.
+#
+# Signing secrets, used to authenticate sessions using
+# HMAC. Recommended to use secrets with 32 or 64 bytes.
+#openshift_master_session_auth_secrets=['DONT+USE+THIS+SECRET+b4NV+pmZNSO']
+#
+# Encrypting secrets, used to encrypt sessions. Must be 16, 24, or 32
+# characters long, to select AES-128, AES-192, or AES-256.
+#openshift_master_session_encryption_secrets=['DONT+USE+THIS+SECRET+b4NV+pmZNSO']
+
+# configure how often node iptables rules are refreshed
+#openshift_node_iptables_sync_period=5s
+
+# Configure nodeIP in the node config
+# This is needed in cases where node traffic is desired to go over an
+# interface other than the default network interface.
+#openshift_set_node_ip=True
+
+# Force setting of system hostname when configuring OpenShift
+# This works around issues related to installations that do not have valid dns
+# entries for the interfaces attached to the host.
+#openshift_set_hostname=True
+
+# Configure dnsIP in the node config
+#openshift_dns_ip=172.30.0.1
+
+# Configure node kubelet arguments. pods-per-core is valid in OpenShift Origin 1.3 or OpenShift Container Platform 3.3 and later.
+#openshift_node_kubelet_args={'pods-per-core': ['10'], 'max-pods': ['250'], 'image-gc-high-threshold': ['85'], 'image-gc-low-threshold': ['80']}
+
+# Configure logrotate scripts
+# See: https://github.com/nickhammond/ansible-logrotate
+#logrotate_scripts=[{"name": "syslog", "path": "/var/log/cron\n/var/log/maillog\n/var/log/messages\n/var/log/secure\n/var/log/spooler\n", "options": ["daily", "rotate 7", "compress", "sharedscripts", "missingok"], "scripts": {"postrotate": "/bin/kill -HUP `cat /var/run/syslogd.pid 2> /dev/null` 2> /dev/null || true"}}]
+
+# openshift-ansible will wait indefinitely for your input when it detects that the
+# value of openshift_hostname resolves to an IP address not bound to any local
+# interfaces. This mis-configuration is problematic for any pod leveraging host
+# networking and liveness or readiness probes.
+# Setting this variable to true will override that check.
+#openshift_override_hostname_check=true
+
+# openshift_use_dnsmasq is deprecated. This must be true, or installs will fail
+# in versions >= 3.6
+#openshift_use_dnsmasq=False
+
+# Define an additional dnsmasq.conf file to deploy to /etc/dnsmasq.d/openshift-ansible.conf
+# This is useful for POC environments where DNS may not actually be available yet or to set
+# options like 'strict-order' to alter dnsmasq configuration.
+#openshift_node_dnsmasq_additional_config_file=/home/bob/ose-dnsmasq.conf
+
+# Global Proxy Configuration
+# These options configure HTTP_PROXY, HTTPS_PROXY, and NOPROXY environment
+# variables for docker and master services.
+#
+# Hosts in the openshift_no_proxy list will NOT use any globally
+# configured HTTP(S)_PROXYs. openshift_no_proxy accepts domains
+# (.example.com), and hosts (example.com), and IP addresses.
+#openshift_http_proxy=http://USER:PASSWORD@IPADDR:PORT
+#openshift_https_proxy=https://USER:PASSWORD@IPADDR:PORT
+#openshift_no_proxy='.hosts.example.com,some-host.com'
+#
+# Most environments don't require a proxy between openshift masters, nodes, and
+# etcd hosts. So automatically add those hostnames to the openshift_no_proxy list.
+# If all of your hosts share a common domain you may wish to disable this and
+# specify that domain above instead.
+#
+# For example, having hosts with FQDNs: m1.ex.com, n1.ex.com, and
+# n2.ex.com, one would simply add '.ex.com' to the openshift_no_proxy
+# variable (above) and set this value to False
+#openshift_generate_no_proxy_hosts=True
+#
+# These options configure the BuildDefaults admission controller which injects
+# configuration into Builds. Proxy related values will default to the global proxy
+# config values. You only need to set these if they differ from the global proxy settings.
+# See BuildDefaults documentation at
+# https://docs.openshift.org/latest/admin_guide/build_defaults_overrides.html
+#openshift_builddefaults_http_proxy=http://USER:PASSWORD@HOST:PORT
+#openshift_builddefaults_https_proxy=https://USER:PASSWORD@HOST:PORT
+#openshift_builddefaults_no_proxy=mycorp.com
+#openshift_builddefaults_git_http_proxy=http://USER:PASSWORD@HOST:PORT
+#openshift_builddefaults_git_https_proxy=https://USER:PASSWORD@HOST:PORT
+#openshift_builddefaults_git_no_proxy=mycorp.com
+#openshift_builddefaults_image_labels=[{'name':'imagelabelname1','value':'imagelabelvalue1'}]
+#openshift_builddefaults_nodeselectors={'nodelabel1':'nodelabelvalue1'}
+#openshift_builddefaults_annotations={'annotationkey1':'annotationvalue1'}
+#openshift_builddefaults_resources_requests_cpu=100m
+#openshift_builddefaults_resources_requests_memory=256Mi
+#openshift_builddefaults_resources_limits_cpu=1000m
+#openshift_builddefaults_resources_limits_memory=512Mi
+
+# Or you may optionally define your own build defaults configuration serialized as json
+#openshift_builddefaults_json='{"BuildDefaults":{"configuration":{"apiVersion":"v1","env":[{"name":"HTTP_PROXY","value":"http://proxy.example.com.redhat.com:3128"},{"name":"NO_PROXY","value":"ose3-master.example.com"}],"gitHTTPProxy":"http://proxy.example.com:3128","gitNoProxy":"ose3-master.example.com","kind":"BuildDefaultsConfig"}}}'
+
+# These options configure the BuildOverrides admission controller which injects
+# configuration into Builds.
+# See BuildOverrides documentation at
+# https://docs.openshift.org/latest/admin_guide/build_defaults_overrides.html
+#openshift_buildoverrides_force_pull=true
+#openshift_buildoverrides_image_labels=[{'name':'imagelabelname1','value':'imagelabelvalue1'}]
+#openshift_buildoverrides_nodeselectors={'nodelabel1':'nodelabelvalue1'}
+#openshift_buildoverrides_annotations={'annotationkey1':'annotationvalue1'}
+
+# Or you may optionally define your own build overrides configuration serialized as json
+#openshift_buildoverrides_json='{"BuildOverrides":{"configuration":{"apiVersion":"v1","kind":"BuildDefaultsConfig","forcePull":"true"}}}'
+
+# Enable template service broker by specifying one of more namespaces whose
+# templates will be served by the broker
+#openshift_template_service_broker_namespaces=['openshift']
+
+# masterConfig.volumeConfig.dynamicProvisioningEnabled, configurable as of 1.2/3.2, enabled by default
+#openshift_master_dynamic_provisioning_enabled=False
+
+# Admission plugin config
+#openshift_master_admission_plugin_config={"ProjectRequestLimit":{"configuration":{"apiVersion":"v1","kind":"ProjectRequestLimitConfig","limits":[{"selector":{"admin":"true"}},{"maxProjects":"1"}]}},"PodNodeConstraints":{"configuration":{"apiVersion":"v1","kind":"PodNodeConstraintsConfig"}}}
+
+# Configure usage of openshift_clock role.
+#openshift_clock_enabled=true
+
+# OpenShift Per-Service Environment Variables
+# Environment variables are added to /etc/sysconfig files for
+# each OpenShift service: node, master (api and controllers).
+# API and controllers environment variables are merged in single
+# master environments.
+#openshift_master_api_env_vars={"ENABLE_HTTP2": "true"}
+#openshift_master_controllers_env_vars={"ENABLE_HTTP2": "true"}
+#openshift_node_env_vars={"ENABLE_HTTP2": "true"}
+
+# Enable API service auditing, available as of 1.3
+#openshift_master_audit_config={"enabled": true}
+#
+# In case you want more advanced setup for the auditlog you can
+# use this line.
+# The directory in "auditFilePath" will be created if it's not
+# exist
+#openshift_master_audit_config={"enabled": true, "auditFilePath": "/var/log/openpaas-oscp-audit/openpaas-oscp-audit.log", "maximumFileRetentionDays": 14, "maximumFileSizeMegabytes": 500, "maximumRetainedFiles": 5}
+
+# Enable origin repos that point at Centos PAAS SIG, defaults to true, only used
+# by deployment_type=origin
+#openshift_enable_origin_repo=false
+
+# Validity of the auto-generated OpenShift certificates in days.
+# See also openshift_hosted_registry_cert_expire_days above.
+#
+#openshift_ca_cert_expire_days=1825
+#openshift_node_cert_expire_days=730
+#openshift_master_cert_expire_days=730
+
+# Validity of the auto-generated external etcd certificates in days.
+# Controls validity for etcd CA, peer, server and client certificates.
+#
+#etcd_ca_default_days=1825
+#
+# ServiceAccountConfig:LimitSecretRefences rejects pods that reference secrets their service accounts do not reference
+# openshift_master_saconfig_limitsecretreferences=false
+
+# Upgrade Control
+#
+# By default nodes are upgraded in a serial manner one at a time and all failures
+# are fatal, one set of variables for normal nodes, one set of variables for
+# nodes that are part of control plane as the number of hosts may be different
+# in those two groups.
+#openshift_upgrade_nodes_serial=1
+#openshift_upgrade_nodes_max_fail_percentage=0
+#openshift_upgrade_control_plane_nodes_serial=1
+#openshift_upgrade_control_plane_nodes_max_fail_percentage=0
+#
+# You can specify the number of nodes to upgrade at once. We do not currently
+# attempt to verify that you have capacity to drain this many nodes at once
+# so please be careful when specifying these values. You should also verify that
+# the expected number of nodes are all schedulable and ready before starting an
+# upgrade. If it's not possible to drain the requested nodes the upgrade will
+# stall indefinitely until the drain is successful.
+#
+# If you're upgrading more than one node at a time you can specify the maximum
+# percentage of failure within the batch before the upgrade is aborted. Any
+# nodes that do fail are ignored for the rest of the playbook run and you should
+# take care to investigate the failure and return the node to service so that
+# your cluster.
+#
+# The percentage must exceed the value, this would fail on two failures
+# openshift_upgrade_nodes_serial=4 openshift_upgrade_nodes_max_fail_percentage=49
+# where as this would not
+# openshift_upgrade_nodes_serial=4 openshift_upgrade_nodes_max_fail_percentage=50
+#
+# Multiple data migrations take place and if they fail they will fail the upgrade
+# You may wish to disable these or make them non fatal
+#
+# openshift_upgrade_pre_storage_migration_enabled=true
+# openshift_upgrade_pre_storage_migration_fatal==true
+# openshift_upgrade_post_storage_migration_enabled=true
+# openshift_upgrade_post_storage_migration_fatal==false
+
+# host group for masters
+[masters]
+ose3-master[1:3]-ansible.test.example.com
+
+[etcd]
+ose3-etcd[1:3]-ansible.test.example.com
+
+# NOTE: Containerized load balancer hosts are not yet supported, if using a global
+# containerized=true host variable we must set to false.
+[lb]
+ose3-lb-ansible.test.example.com containerized=false
+
+# NOTE: Currently we require that masters be part of the SDN which requires that they also be nodes
+# However, in order to ensure that your masters are not burdened with running pods you should
+# make them unschedulable by adding openshift_schedulable=False any node that's also a master.
+[nodes]
+ose3-master[1:3]-ansible.test.example.com
+ose3-node[1:2]-ansible.test.example.com openshift_node_labels="{'region': 'primary', 'zone': 'default'}"