summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDan Yocum <dyocum@redhat.com>2018-01-24 17:15:26 -0500
committerDan Yocum <dyocum@redhat.com>2018-01-24 17:15:26 -0500
commit8cbd53ed64f1819b8f4defbf196140a1692c5a42 (patch)
tree7dd61610e8ea6ae373e12dbb1c894047b8feac0f
parentc2de775c80b8daa629af514f24118f704c521c18 (diff)
parent0b57e113dbf16b5ac6f01859c9c4ab2aee594f87 (diff)
downloadopenshift-8cbd53ed64f1819b8f4defbf196140a1692c5a42.tar.gz
openshift-8cbd53ed64f1819b8f4defbf196140a1692c5a42.tar.bz2
openshift-8cbd53ed64f1819b8f4defbf196140a1692c5a42.tar.xz
openshift-8cbd53ed64f1819b8f4defbf196140a1692c5a42.zip
Merge branch 'master' into rm_origin-components
-rw-r--r--.dockerignore2
-rw-r--r--images/installer/Dockerfile6
-rw-r--r--images/installer/Dockerfile.rhel72
-rwxr-xr-ximages/installer/root/usr/local/bin/entrypoint-gcp51
-rwxr-xr-ximages/installer/root/usr/local/bin/user_setup2
-rw-r--r--inventory/.gitignore1
-rw-r--r--inventory/dynamic/gcp/README.md1
-rw-r--r--inventory/dynamic/gcp/ansible.cfg45
-rw-r--r--inventory/dynamic/gcp/group_vars/all/00_defaults.yml42
-rwxr-xr-xinventory/dynamic/gcp/hosts.py408
-rwxr-xr-xinventory/dynamic/gcp/hosts.sh15
-rw-r--r--inventory/dynamic/gcp/none1
-rw-r--r--inventory/dynamic/injected/README.md3
-rw-r--r--inventory/hosts.example6
-rw-r--r--openshift-ansible.spec6
-rw-r--r--playbooks/aws/openshift-cluster/hosted.yml25
-rw-r--r--playbooks/aws/openshift-cluster/install.yml27
-rw-r--r--playbooks/aws/openshift-cluster/provision_install.yml4
-rw-r--r--playbooks/aws/provisioning_vars.yml.example18
-rw-r--r--playbooks/common/private/components.yml38
-rw-r--r--playbooks/common/private/control_plane.yml34
-rw-r--r--playbooks/container-runtime/private/build_container_groups.yml4
-rw-r--r--playbooks/container-runtime/private/config.yml4
-rw-r--r--playbooks/container-runtime/private/setup_storage.yml4
-rw-r--r--playbooks/deploy_cluster.yml37
-rw-r--r--playbooks/gcp/openshift-cluster/build_base_image.yml162
-rw-r--r--playbooks/gcp/openshift-cluster/build_image.yml106
-rw-r--r--playbooks/gcp/openshift-cluster/deprovision.yml10
-rw-r--r--playbooks/gcp/openshift-cluster/install.yml33
-rw-r--r--playbooks/gcp/openshift-cluster/install_gcp.yml21
-rw-r--r--playbooks/gcp/openshift-cluster/inventory.yml10
-rw-r--r--playbooks/gcp/openshift-cluster/launch.yml12
-rw-r--r--playbooks/gcp/openshift-cluster/provision.yml (renamed from playbooks/gcp/provision.yml)9
-rw-r--r--playbooks/gcp/openshift-cluster/publish_image.yml9
l---------playbooks/gcp/openshift-cluster/roles1
-rw-r--r--playbooks/init/evaluate_groups.yml2
-rw-r--r--playbooks/init/validate_hostnames.yml4
-rw-r--r--playbooks/openshift-etcd/scaleup.yml47
-rw-r--r--playbooks/openstack/sample-inventory/group_vars/OSEv3.yml2
-rw-r--r--roles/container_runtime/defaults/main.yml39
-rw-r--r--roles/flannel/meta/main.yml1
-rw-r--r--roles/lib_utils/filter_plugins/oo_filters.py14
-rw-r--r--roles/lib_utils/filter_plugins/openshift_aws_filters.py16
-rw-r--r--roles/lib_utils/library/docker_creds.py4
-rw-r--r--roles/openshift_aws/defaults/main.yml7
-rw-r--r--roles/openshift_aws/tasks/elb.yml2
-rw-r--r--roles/openshift_bootstrap_autoapprover/files/openshift-bootstrap-controller-policy.yaml10
-rw-r--r--roles/openshift_bootstrap_autoapprover/files/openshift-bootstrap-controller-role.yaml21
-rw-r--r--roles/openshift_bootstrap_autoapprover/files/openshift-bootstrap-controller-serviceaccount.yaml5
-rw-r--r--roles/openshift_bootstrap_autoapprover/files/openshift-bootstrap-controller.yaml68
-rw-r--r--roles/openshift_bootstrap_autoapprover/tasks/main.yml28
-rw-r--r--roles/openshift_cloud_provider/tasks/gce.yml10
-rw-r--r--roles/openshift_examples/files/examples/v3.7/cfme-templates/cfme-backup-job.yaml28
-rw-r--r--roles/openshift_examples/files/examples/v3.7/cfme-templates/cfme-backup-pvc.yaml10
-rw-r--r--roles/openshift_examples/files/examples/v3.7/cfme-templates/cfme-pv-backup-example.yaml13
-rw-r--r--roles/openshift_examples/files/examples/v3.7/cfme-templates/cfme-pv-db-example.yaml45
-rw-r--r--roles/openshift_examples/files/examples/v3.7/cfme-templates/cfme-pv-region-example.yaml13
-rw-r--r--roles/openshift_examples/files/examples/v3.7/cfme-templates/cfme-pv-server-example.yaml45
-rw-r--r--roles/openshift_examples/files/examples/v3.7/cfme-templates/cfme-restore-job.yaml35
-rw-r--r--roles/openshift_examples/files/examples/v3.7/cfme-templates/cfme-scc-sysadmin.yaml38
-rw-r--r--roles/openshift_examples/files/examples/v3.7/cfme-templates/cfme-template-ext-db.yaml956
-rw-r--r--roles/openshift_examples/files/examples/v3.7/cfme-templates/cfme-template.yaml1424
-rw-r--r--roles/openshift_examples/files/examples/v3.9/cfme-templates/cfme-backup-job.yaml28
-rw-r--r--roles/openshift_examples/files/examples/v3.9/cfme-templates/cfme-backup-pvc.yaml10
-rw-r--r--roles/openshift_examples/files/examples/v3.9/cfme-templates/cfme-pv-backup-example.yaml13
-rw-r--r--roles/openshift_examples/files/examples/v3.9/cfme-templates/cfme-pv-db-example.yaml45
-rw-r--r--roles/openshift_examples/files/examples/v3.9/cfme-templates/cfme-pv-region-example.yaml13
-rw-r--r--roles/openshift_examples/files/examples/v3.9/cfme-templates/cfme-pv-server-example.yaml45
-rw-r--r--roles/openshift_examples/files/examples/v3.9/cfme-templates/cfme-restore-job.yaml35
-rw-r--r--roles/openshift_examples/files/examples/v3.9/cfme-templates/cfme-scc-sysadmin.yaml38
-rw-r--r--roles/openshift_examples/files/examples/v3.9/cfme-templates/cfme-template-ext-db.yaml956
-rw-r--r--roles/openshift_examples/files/examples/v3.9/cfme-templates/cfme-template.yaml1424
-rw-r--r--roles/openshift_gcp/files/bootstrap-script.sh42
-rw-r--r--roles/openshift_gcp/files/openshift-bootstrap-update.service7
-rw-r--r--roles/openshift_gcp/files/openshift-bootstrap-update.timer10
-rw-r--r--roles/openshift_gcp/files/partition.conf (renamed from roles/openshift_gcp_image_prep/files/partition.conf)2
-rw-r--r--roles/openshift_gcp/meta/main.yml17
-rw-r--r--roles/openshift_gcp/tasks/add_custom_repositories.yml20
-rw-r--r--roles/openshift_gcp/tasks/configure_gcp_base_image.yml (renamed from roles/openshift_gcp_image_prep/tasks/main.yaml)14
-rw-r--r--roles/openshift_gcp/tasks/configure_master_bootstrap.yml36
-rw-r--r--roles/openshift_gcp/tasks/configure_master_healthcheck.yml19
-rw-r--r--roles/openshift_gcp/tasks/dynamic_inventory.yml5
-rw-r--r--roles/openshift_gcp/tasks/frequent_log_rotation.yml18
-rw-r--r--roles/openshift_gcp/tasks/main.yml (renamed from roles/openshift_gcp/tasks/main.yaml)4
-rw-r--r--roles/openshift_gcp/tasks/node_cloud_config.yml12
-rw-r--r--roles/openshift_gcp/tasks/publish_image.yml32
-rw-r--r--roles/openshift_gcp/tasks/setup_scale_group_facts.yml44
-rw-r--r--roles/openshift_gcp/templates/inventory.j2.sh8
-rw-r--r--roles/openshift_gcp/templates/master_healthcheck.j268
-rw-r--r--roles/openshift_gcp/templates/openshift-bootstrap-update.j27
-rw-r--r--roles/openshift_gcp/templates/provision.j2.sh17
-rw-r--r--roles/openshift_gcp/templates/yum_repo.j220
-rw-r--r--roles/openshift_hosted/tasks/registry.yml2
l---------roles/openshift_hosted/tasks/storage/registry_config.j21
-rw-r--r--roles/openshift_loadbalancer/templates/haproxy.cfg.j23
-rw-r--r--roles/openshift_management/files/templates/cloudforms/cfme-backup-job.yaml2
-rw-r--r--roles/openshift_management/files/templates/cloudforms/cfme-restore-job.yaml2
-rw-r--r--roles/openshift_management/files/templates/cloudforms/cfme-template-ext-db.yaml219
-rw-r--r--roles/openshift_management/files/templates/cloudforms/cfme-template.yaml221
-rw-r--r--roles/openshift_metrics/tasks/oc_apply.yaml8
-rw-r--r--roles/openshift_node/defaults/main.yml12
-rw-r--r--roles/openshift_node/tasks/node_system_container.yml19
-rw-r--r--roles/openshift_node/templates/node.yaml.v1.j22
-rw-r--r--roles/openshift_node_certificates/tasks/main.yml20
-rw-r--r--roles/openshift_node_certificates/vars/main.yml2
-rw-r--r--roles/openshift_provisioners/tasks/oc_apply.yaml12
-rw-r--r--roles/openshift_web_console/files/console-config.yaml1
-rw-r--r--roles/openshift_web_console/tasks/install.yml2
-rw-r--r--roles/openshift_web_console/tasks/update_console_config.yml78
-rw-r--r--utils/src/ooinstall/cli_installer.py4
-rw-r--r--utils/src/ooinstall/openshift_ansible.py2
111 files changed, 6509 insertions, 1158 deletions
diff --git a/.dockerignore b/.dockerignore
index 0a70c5bfa..2509d48b5 100644
--- a/.dockerignore
+++ b/.dockerignore
@@ -2,7 +2,7 @@
bin
docs
hack
-inventory
+inventory/hosts.*
test
utils
**/*.md
diff --git a/images/installer/Dockerfile b/images/installer/Dockerfile
index b1390480a..22a0d06a0 100644
--- a/images/installer/Dockerfile
+++ b/images/installer/Dockerfile
@@ -8,12 +8,14 @@ USER root
COPY images/installer/origin-extra-root /
# install ansible and deps
-RUN INSTALL_PKGS="python-lxml pyOpenSSL python2-cryptography openssl java-1.8.0-openjdk-headless python2-passlib httpd-tools openssh-clients origin-clients" \
+RUN INSTALL_PKGS="python-lxml python-dns pyOpenSSL python2-cryptography openssl java-1.8.0-openjdk-headless python2-passlib httpd-tools openssh-clients origin-clients" \
&& yum install -y --setopt=tsflags=nodocs $INSTALL_PKGS \
&& EPEL_PKGS="ansible python2-boto python2-boto3 google-cloud-sdk-183.0.0 which" \
&& yum install -y epel-release \
&& yum install -y --setopt=tsflags=nodocs $EPEL_PKGS \
- && rpm -V $INSTALL_PKGS $EPEL_PKGS \
+ && EPEL_TESTING_PKGS="python2-libcloud" \
+ && yum install -y --enablerepo=epel-testing --setopt=tsflags=nodocs $EPEL_TESTING_PKGS \
+ && rpm -V $INSTALL_PKGS $EPEL_PKGS $EPEL_TESTING_PKGS \
&& yum clean all
LABEL name="openshift/origin-ansible" \
diff --git a/images/installer/Dockerfile.rhel7 b/images/installer/Dockerfile.rhel7
index 05df6b43a..3b05c1aa6 100644
--- a/images/installer/Dockerfile.rhel7
+++ b/images/installer/Dockerfile.rhel7
@@ -5,7 +5,7 @@ MAINTAINER OpenShift Team <dev@lists.openshift.redhat.com>
USER root
# Playbooks, roles, and their dependencies are installed from packages.
-RUN INSTALL_PKGS="atomic-openshift-utils atomic-openshift-clients python-boto openssl java-1.8.0-openjdk-headless httpd-tools google-cloud-sdk" \
+RUN INSTALL_PKGS="atomic-openshift-utils atomic-openshift-clients python-boto python2-boto3 openssl java-1.8.0-openjdk-headless httpd-tools google-cloud-sdk" \
&& yum repolist > /dev/null \
&& yum-config-manager --enable rhel-7-server-ose-3.7-rpms \
&& yum-config-manager --enable rhel-7-server-rh-common-rpms \
diff --git a/images/installer/root/usr/local/bin/entrypoint-gcp b/images/installer/root/usr/local/bin/entrypoint-gcp
new file mode 100755
index 000000000..d0ffd9904
--- /dev/null
+++ b/images/installer/root/usr/local/bin/entrypoint-gcp
@@ -0,0 +1,51 @@
+#!/bin/bash
+#
+# This file sets up the user to run in the GCP environment.
+# It provides dynamic inventory that works well when run in
+# a container environment by setting up a default inventory.
+# It assumes the user has provided a GCP service account token
+# and ssh-privatekey file at "$(pwd)/inventory/dynamic/injected"
+# and automatically links any YAML files found into the group
+# vars directory, which allows the playbook to more easily be
+# run in containerized contexts.
+
+WORK=$(pwd)
+FILES="${WORK}/inventory/dynamic/injected"
+
+# Patch /etc/passwd file with the current user info.
+# The current user's entry must be correctly defined in this file in order for
+# the `ssh` command to work within the created container.
+
+if ! whoami &>/dev/null; then
+ echo "${USER:-default}:x:$(id -u):$(id -g):Default User:$HOME:/sbin/nologin" >> /etc/passwd
+fi
+
+# Provide a "files_dir" variable that points to inventory/dynamic/injected
+echo "files_dir: \"${FILES}\"" > "${WORK}/inventory/dynamic/gcp/group_vars/all/00_default_files_dir.yml"
+# Add any injected variable files into the group vars directory
+find "${FILES}" -name '*.yml' -or -name '*.yaml' -or -name vars | xargs -L1 -I {} ln -fs {} "${WORK}/inventory/dynamic/gcp/group_vars/all"
+# Avoid sudo when running locally - nothing in the image requires it.
+mkdir -p "${WORK}/inventory/dynamic/gcp/host_vars/localhost"
+echo "ansible_become: no" > "${WORK}/inventory/dynamic/gcp/host_vars/localhost/00_skip_root.yaml"
+
+if [[ -z "${ANSIBLE_CONFIG-}" ]]; then
+ export ANSIBLE_CONFIG="${WORK}/inventory/dynamic/gcp/ansible.cfg"
+fi
+
+# SSH requires the file to be owned by the current user, but Docker copies
+# files in as root. Put the file into the ssh dir with the right permissions
+if [[ -f "${FILES}/ssh-privatekey" ]]; then
+ keyfile="${HOME}/.ssh/google_compute_engine"
+ mkdir "${HOME}/.ssh"
+ rm -f "${keyfile}"
+ cat "${FILES}/ssh-privatekey" > "${keyfile}"
+ chmod 0600 "${keyfile}"
+ ssh-keygen -y -f "${keyfile}" > "${keyfile}.pub"
+fi
+if [[ -f "${FILES}/gce.json" ]]; then
+ gcloud auth activate-service-account --key-file="${FILES}/gce.json"
+else
+ echo "No service account file found at ${FILES}/gce.json, bypassing login"
+fi
+
+exec "$@" \ No newline at end of file
diff --git a/images/installer/root/usr/local/bin/user_setup b/images/installer/root/usr/local/bin/user_setup
index b76e60a4d..dba0af3e4 100755
--- a/images/installer/root/usr/local/bin/user_setup
+++ b/images/installer/root/usr/local/bin/user_setup
@@ -12,6 +12,8 @@ chmod g+rw /etc/passwd
# ensure that the ansible content is accessible
chmod -R g+r ${WORK_DIR}
find ${WORK_DIR} -type d -exec chmod g+x {} +
+# ensure that the dynamic inventory dir can have content created
+find ${WORK_DIR} -type d -exec chmod g+wx {} +
# no need for this script to remain in the image after running
rm $0
diff --git a/inventory/.gitignore b/inventory/.gitignore
index 6ff331c7e..97aa044f6 100644
--- a/inventory/.gitignore
+++ b/inventory/.gitignore
@@ -1 +1,2 @@
hosts
+/dynamic/gcp/group_vars/all/00_default_files_dir.yml \ No newline at end of file
diff --git a/inventory/dynamic/gcp/README.md b/inventory/dynamic/gcp/README.md
new file mode 100644
index 000000000..217a035ca
--- /dev/null
+++ b/inventory/dynamic/gcp/README.md
@@ -0,0 +1 @@
+This directory provides dynamic inventory for a GCP cluster configured via the GCP provisioning playbook. Set inventory to `inventory/dynamic/gcp/hosts.sh` to calculate the appropriate host set. \ No newline at end of file
diff --git a/inventory/dynamic/gcp/ansible.cfg b/inventory/dynamic/gcp/ansible.cfg
new file mode 100644
index 000000000..f87d51f28
--- /dev/null
+++ b/inventory/dynamic/gcp/ansible.cfg
@@ -0,0 +1,45 @@
+# config file for ansible -- http://ansible.com/
+# ==============================================
+
+# This config file provides examples for running
+# the OpenShift playbooks with the provided
+# inventory scripts.
+
+[defaults]
+# Set the log_path
+#log_path = /tmp/ansible.log
+
+private_key_file = $HOME/.ssh/google_compute_engine
+
+# Additional default options for OpenShift Ansible
+forks = 50
+host_key_checking = False
+retry_files_enabled = False
+retry_files_save_path = ~/ansible-installer-retries
+nocows = True
+remote_user = cloud-user
+roles_path = ../../../roles/
+gathering = smart
+fact_caching = jsonfile
+fact_caching_connection = $HOME/ansible/facts
+fact_caching_timeout = 600
+callback_whitelist = profile_tasks
+inventory_ignore_extensions = secrets.py, .pyc, .cfg, .crt
+# work around privilege escalation timeouts in ansible:
+timeout = 30
+
+# Uncomment to use the provided example inventory
+inventory = hosts.sh
+
+[inventory]
+# fail more helpfully when the inventory file does not parse (Ansible 2.4+)
+unparsed_is_failed=true
+
+# Additional ssh options for OpenShift Ansible
+[ssh_connection]
+pipelining = True
+ssh_args = -o ControlMaster=auto -o ControlPersist=600s
+timeout = 10
+# shorten the ControlPath which is often too long; when it is,
+# ssh connection reuse silently fails, making everything slower.
+control_path = %(directory)s/%%h-%%r
diff --git a/inventory/dynamic/gcp/group_vars/all/00_defaults.yml b/inventory/dynamic/gcp/group_vars/all/00_defaults.yml
new file mode 100644
index 000000000..2f72e905f
--- /dev/null
+++ b/inventory/dynamic/gcp/group_vars/all/00_defaults.yml
@@ -0,0 +1,42 @@
+# GCP uses non-root users by default, so sudo by default
+---
+ansible_become: yes
+
+openshift_deployment_type: origin
+
+# Debugging settings
+debug_level: 2
+openshift_debug_level: "{{ debug_level }}"
+openshift_master_debug_level: "{{ master_debug_level | default(debug_level, true) }}"
+openshift_node_debug_level: "{{ node_debug_level | default(debug_level, true) }}"
+
+# External API settings
+console_port: 443
+internal_console_port: 8443
+openshift_master_api_port: "8443"
+openshift_master_console_port: "8443"
+openshift_master_cluster_hostname: "internal-openshift-master.{{ public_hosted_zone }}"
+openshift_master_cluster_public_hostname: "openshift-master.{{ public_hosted_zone }}"
+openshift_master_default_subdomain: "{{ wildcard_zone }}"
+
+# Cloud specific settings
+openshift_cloudprovider_kind: gce
+openshift_hosted_registry_storage_provider: gcs
+
+openshift_master_access_token_max_seconds: 2419200
+openshift_master_identity_providers:
+
+# Networking settings
+openshift_node_port_range: 30000-32000
+openshift_node_open_ports: [{"service":"Router stats port", "port":"1936/tcp"}, {"service":"Allowed open host ports", "port":"9000-10000/tcp"}, {"service":"Allowed open host ports", "port":"9000-10000/udp"}]
+openshift_node_sdn_mtu: 1410
+osm_cluster_network_cidr: 172.16.0.0/16
+osm_host_subnet_length: 9
+openshift_portal_net: 172.30.0.0/16
+
+# Default cluster configuration
+openshift_master_cluster_method: native
+openshift_schedulable: true
+# TODO: change to upstream conventions
+openshift_hosted_infra_selector: "role=infra"
+osm_default_node_selector: "role=app"
diff --git a/inventory/dynamic/gcp/hosts.py b/inventory/dynamic/gcp/hosts.py
new file mode 100755
index 000000000..cd1262622
--- /dev/null
+++ b/inventory/dynamic/gcp/hosts.py
@@ -0,0 +1,408 @@
+#!/usr/bin/env python
+# Copyright 2013 Google Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# This is a derivative of gce.py that adds support for filtering
+# the returned inventory to only include instances that have tags
+# as specified by GCE_TAGGED_INSTANCES. This prevents dynamic
+# inventory for multiple clusters within the same project from
+# accidentally stomping each other.
+
+# pylint: skip-file
+
+'''
+GCE external inventory script
+=================================
+
+Generates inventory that Ansible can understand by making API requests
+Google Compute Engine via the libcloud library. Full install/configuration
+instructions for the gce* modules can be found in the comments of
+ansible/test/gce_tests.py.
+
+When run against a specific host, this script returns the following variables
+based on the data obtained from the libcloud Node object:
+ - gce_uuid
+ - gce_id
+ - gce_image
+ - gce_machine_type
+ - gce_private_ip
+ - gce_public_ip
+ - gce_name
+ - gce_description
+ - gce_status
+ - gce_zone
+ - gce_tags
+ - gce_metadata
+ - gce_network
+
+When run in --list mode, instances are grouped by the following categories:
+ - zone:
+ zone group name examples are us-central1-b, europe-west1-a, etc.
+ - instance tags:
+ An entry is created for each tag. For example, if you have two instances
+ with a common tag called 'foo', they will both be grouped together under
+ the 'tag_foo' name.
+ - network name:
+ the name of the network is appended to 'network_' (e.g. the 'default'
+ network will result in a group named 'network_default')
+ - machine type
+ types follow a pattern like n1-standard-4, g1-small, etc.
+ - running status:
+ group name prefixed with 'status_' (e.g. status_running, status_stopped,..)
+ - image:
+ when using an ephemeral/scratch disk, this will be set to the image name
+ used when creating the instance (e.g. debian-7-wheezy-v20130816). when
+ your instance was created with a root persistent disk it will be set to
+ 'persistent_disk' since there is no current way to determine the image.
+
+Examples:
+ Execute uname on all instances in the us-central1-a zone
+ $ ansible -i gce.py us-central1-a -m shell -a "/bin/uname -a"
+
+ Use the GCE inventory script to print out instance specific information
+ $ contrib/inventory/gce.py --host my_instance
+
+Author: Eric Johnson <erjohnso@google.com>
+Contributors: Matt Hite <mhite@hotmail.com>
+Version: 0.0.2
+'''
+
+__requires__ = ['pycrypto>=2.6']
+try:
+ import pkg_resources
+except ImportError:
+ # Use pkg_resources to find the correct versions of libraries and set
+ # sys.path appropriately when there are multiversion installs. We don't
+ # fail here as there is code that better expresses the errors where the
+ # library is used.
+ pass
+
+USER_AGENT_PRODUCT="Ansible-gce_inventory_plugin"
+USER_AGENT_VERSION="v2"
+
+import sys
+import os
+import time
+import argparse
+import ConfigParser
+
+import logging
+logging.getLogger('libcloud.common.google').addHandler(logging.NullHandler())
+
+try:
+ import json
+except ImportError:
+ import simplejson as json
+
+try:
+ from libcloud.compute.types import Provider
+ from libcloud.compute.providers import get_driver
+ from libcloud.common.google import ResourceNotFoundError
+ _ = Provider.GCE
+except:
+ sys.exit("GCE inventory script requires libcloud >= 0.13")
+
+
+class GceInventory(object):
+ def __init__(self):
+ # Read settings and parse CLI arguments
+ self.parse_cli_args()
+ self.config = self.get_config()
+ self.driver = self.get_gce_driver()
+ self.ip_type = self.get_inventory_options()
+ if self.ip_type:
+ self.ip_type = self.ip_type.lower()
+
+ # Just display data for specific host
+ if self.args.host:
+ print(self.json_format_dict(self.node_to_dict(
+ self.get_instance(self.args.host)),
+ pretty=self.args.pretty))
+ sys.exit(0)
+
+ zones = self.parse_env_zones()
+
+ # Otherwise, assume user wants all instances grouped
+ print(self.json_format_dict(self.group_instances(zones),
+ pretty=self.args.pretty))
+ sys.exit(0)
+
+ def get_config(self):
+ """
+ Populates a SafeConfigParser object with defaults and
+ attempts to read an .ini-style configuration from the filename
+ specified in GCE_INI_PATH. If the environment variable is
+ not present, the filename defaults to gce.ini in the current
+ working directory.
+ """
+ gce_ini_default_path = os.path.join(
+ os.path.dirname(os.path.realpath(__file__)), "gce.ini")
+ gce_ini_path = os.environ.get('GCE_INI_PATH', gce_ini_default_path)
+
+ # Create a ConfigParser.
+ # This provides empty defaults to each key, so that environment
+ # variable configuration (as opposed to INI configuration) is able
+ # to work.
+ config = ConfigParser.SafeConfigParser(defaults={
+ 'gce_service_account_email_address': '',
+ 'gce_service_account_pem_file_path': '',
+ 'gce_project_id': '',
+ 'libcloud_secrets': '',
+ 'inventory_ip_type': '',
+ })
+ if 'gce' not in config.sections():
+ config.add_section('gce')
+ if 'inventory' not in config.sections():
+ config.add_section('inventory')
+
+ config.read(gce_ini_path)
+
+ #########
+ # Section added for processing ini settings
+ #########
+
+ # Set the instance_states filter based on config file options
+ self.instance_states = []
+ if config.has_option('gce', 'instance_states'):
+ states = config.get('gce', 'instance_states')
+ # Ignore if instance_states is an empty string.
+ if states:
+ self.instance_states = states.split(',')
+
+ return config
+
+ def get_inventory_options(self):
+ """Determine inventory options. Environment variables always
+ take precedence over configuration files."""
+ ip_type = self.config.get('inventory', 'inventory_ip_type')
+ # If the appropriate environment variables are set, they override
+ # other configuration
+ ip_type = os.environ.get('INVENTORY_IP_TYPE', ip_type)
+ return ip_type
+
+ def get_gce_driver(self):
+ """Determine the GCE authorization settings and return a
+ libcloud driver.
+ """
+ # Attempt to get GCE params from a configuration file, if one
+ # exists.
+ secrets_path = self.config.get('gce', 'libcloud_secrets')
+ secrets_found = False
+ try:
+ import secrets
+ args = list(getattr(secrets, 'GCE_PARAMS', []))
+ kwargs = getattr(secrets, 'GCE_KEYWORD_PARAMS', {})
+ secrets_found = True
+ except:
+ pass
+
+ if not secrets_found and secrets_path:
+ if not secrets_path.endswith('secrets.py'):
+ err = "Must specify libcloud secrets file as "
+ err += "/absolute/path/to/secrets.py"
+ sys.exit(err)
+ sys.path.append(os.path.dirname(secrets_path))
+ try:
+ import secrets
+ args = list(getattr(secrets, 'GCE_PARAMS', []))
+ kwargs = getattr(secrets, 'GCE_KEYWORD_PARAMS', {})
+ secrets_found = True
+ except:
+ pass
+ if not secrets_found:
+ args = [
+ self.config.get('gce','gce_service_account_email_address'),
+ self.config.get('gce','gce_service_account_pem_file_path')
+ ]
+ kwargs = {'project': self.config.get('gce', 'gce_project_id')}
+
+ # If the appropriate environment variables are set, they override
+ # other configuration; process those into our args and kwargs.
+ args[0] = os.environ.get('GCE_EMAIL', args[0])
+ args[1] = os.environ.get('GCE_PEM_FILE_PATH', args[1])
+ kwargs['project'] = os.environ.get('GCE_PROJECT', kwargs['project'])
+
+ # Retrieve and return the GCE driver.
+ gce = get_driver(Provider.GCE)(*args, **kwargs)
+ gce.connection.user_agent_append(
+ '%s/%s' % (USER_AGENT_PRODUCT, USER_AGENT_VERSION),
+ )
+ return gce
+
+ def parse_env_zones(self):
+ '''returns a list of comma seperated zones parsed from the GCE_ZONE environment variable.
+ If provided, this will be used to filter the results of the grouped_instances call'''
+ import csv
+ reader = csv.reader([os.environ.get('GCE_ZONE',"")], skipinitialspace=True)
+ zones = [r for r in reader]
+ return [z for z in zones[0]]
+
+ def parse_cli_args(self):
+ ''' Command line argument processing '''
+
+ parser = argparse.ArgumentParser(
+ description='Produce an Ansible Inventory file based on GCE')
+ parser.add_argument('--list', action='store_true', default=True,
+ help='List instances (default: True)')
+ parser.add_argument('--host', action='store',
+ help='Get all information about an instance')
+ parser.add_argument('--tagged', action='store',
+ help='Only include instances with this tag')
+ parser.add_argument('--pretty', action='store_true', default=False,
+ help='Pretty format (default: False)')
+ self.args = parser.parse_args()
+
+ tag_env = os.environ.get('GCE_TAGGED_INSTANCES')
+ if not self.args.tagged and tag_env:
+ self.args.tagged = tag_env
+
+ def node_to_dict(self, inst):
+ md = {}
+
+ if inst is None:
+ return {}
+
+ if inst.extra['metadata'].has_key('items'):
+ for entry in inst.extra['metadata']['items']:
+ md[entry['key']] = entry['value']
+
+ net = inst.extra['networkInterfaces'][0]['network'].split('/')[-1]
+ # default to exernal IP unless user has specified they prefer internal
+ if self.ip_type == 'internal':
+ ssh_host = inst.private_ips[0]
+ else:
+ ssh_host = inst.public_ips[0] if len(inst.public_ips) >= 1 else inst.private_ips[0]
+
+ return {
+ 'gce_uuid': inst.uuid,
+ 'gce_id': inst.id,
+ 'gce_image': inst.image,
+ 'gce_machine_type': inst.size,
+ 'gce_private_ip': inst.private_ips[0],
+ 'gce_public_ip': inst.public_ips[0] if len(inst.public_ips) >= 1 else None,
+ 'gce_name': inst.name,
+ 'gce_description': inst.extra['description'],
+ 'gce_status': inst.extra['status'],
+ 'gce_zone': inst.extra['zone'].name,
+ 'gce_tags': inst.extra['tags'],
+ 'gce_metadata': md,
+ 'gce_network': net,
+ # Hosts don't have a public name, so we add an IP
+ 'ansible_host': ssh_host
+ }
+
+ def get_instance(self, instance_name):
+ '''Gets details about a specific instance '''
+ try:
+ return self.driver.ex_get_node(instance_name)
+ except Exception as e:
+ return None
+
+ def group_instances(self, zones=None):
+ '''Group all instances'''
+ groups = {}
+ meta = {}
+ meta["hostvars"] = {}
+
+ # list_nodes will fail if a disk is in the process of being deleted
+ # from a node, which is not uncommon if other playbooks are managing
+ # the same project. Retry if we receive a not found error.
+ nodes = []
+ tries = 0
+ while True:
+ try:
+ nodes = self.driver.list_nodes()
+ break
+ except ResourceNotFoundError:
+ tries = tries + 1
+ if tries > 15:
+ raise e
+ time.sleep(1)
+ continue
+
+ for node in nodes:
+
+ # This check filters on the desired instance states defined in the
+ # config file with the instance_states config option.
+ #
+ # If the instance_states list is _empty_ then _ALL_ states are returned.
+ #
+ # If the instance_states list is _populated_ then check the current
+ # state against the instance_states list
+ if self.instance_states and not node.extra['status'] in self.instance_states:
+ continue
+
+ name = node.name
+
+ if self.args.tagged and self.args.tagged not in node.extra['tags']:
+ continue
+
+ meta["hostvars"][name] = self.node_to_dict(node)
+
+ zone = node.extra['zone'].name
+
+ # To avoid making multiple requests per zone
+ # we list all nodes and then filter the results
+ if zones and zone not in zones:
+ continue
+
+ if groups.has_key(zone): groups[zone].append(name)
+ else: groups[zone] = [name]
+
+ tags = node.extra['tags']
+ for t in tags:
+ if t.startswith('group-'):
+ tag = t[6:]
+ else:
+ tag = 'tag_%s' % t
+ if groups.has_key(tag): groups[tag].append(name)
+ else: groups[tag] = [name]
+
+ net = node.extra['networkInterfaces'][0]['network'].split('/')[-1]
+ net = 'network_%s' % net
+ if groups.has_key(net): groups[net].append(name)
+ else: groups[net] = [name]
+
+ machine_type = node.size
+ if groups.has_key(machine_type): groups[machine_type].append(name)
+ else: groups[machine_type] = [name]
+
+ image = node.image and node.image or 'persistent_disk'
+ if groups.has_key(image): groups[image].append(name)
+ else: groups[image] = [name]
+
+ status = node.extra['status']
+ stat = 'status_%s' % status.lower()
+ if groups.has_key(stat): groups[stat].append(name)
+ else: groups[stat] = [name]
+
+ groups["_meta"] = meta
+
+ return groups
+
+ def json_format_dict(self, data, pretty=False):
+ ''' Converts a dict to a JSON object and dumps it as a formatted
+ string '''
+
+ if pretty:
+ return json.dumps(data, sort_keys=True, indent=2)
+ else:
+ return json.dumps(data)
+
+
+# Run the script
+GceInventory()
diff --git a/inventory/dynamic/gcp/hosts.sh b/inventory/dynamic/gcp/hosts.sh
new file mode 100755
index 000000000..0c88e3a6b
--- /dev/null
+++ b/inventory/dynamic/gcp/hosts.sh
@@ -0,0 +1,15 @@
+#!/bin/sh
+
+set -euo pipefail
+
+# Use a playbook to calculate the inventory dynamically from
+# the provided cluster variables.
+src="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
+if ! out="$( ansible-playbook --inventory-file "${src}/none" ${src}/../../../playbooks/gcp/openshift-cluster/inventory.yml 2>&1 )"; then
+ echo "error: Inventory configuration failed" 1>&2
+ echo "$out" 1>&2
+ echo "{}"
+ exit 1
+fi
+source "/tmp/inventory.sh"
+exec ${src}/hosts.py
diff --git a/inventory/dynamic/gcp/none b/inventory/dynamic/gcp/none
new file mode 100644
index 000000000..9e26dfeeb
--- /dev/null
+++ b/inventory/dynamic/gcp/none
@@ -0,0 +1 @@
+{} \ No newline at end of file
diff --git a/inventory/dynamic/injected/README.md b/inventory/dynamic/injected/README.md
new file mode 100644
index 000000000..5e2e4c549
--- /dev/null
+++ b/inventory/dynamic/injected/README.md
@@ -0,0 +1,3 @@
+This directory may be used to inject inventory into openshift-ansible
+when used in a container. Other scripts like the cloud provider entrypoints
+will automatically use the content of this directory as inventory.
diff --git a/inventory/hosts.example b/inventory/hosts.example
index da60b63e6..f9f331880 100644
--- a/inventory/hosts.example
+++ b/inventory/hosts.example
@@ -845,12 +845,12 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# See: https://github.com/nickhammond/ansible-logrotate
#logrotate_scripts=[{"name": "syslog", "path": "/var/log/cron\n/var/log/maillog\n/var/log/messages\n/var/log/secure\n/var/log/spooler\n", "options": ["daily", "rotate 7", "compress", "sharedscripts", "missingok"], "scripts": {"postrotate": "/bin/kill -HUP `cat /var/run/syslogd.pid 2> /dev/null` 2> /dev/null || true"}}]
-# openshift-ansible will wait indefinitely for your input when it detects that the
+# The OpenShift-Ansible installer will fail when it detects that the
# value of openshift_hostname resolves to an IP address not bound to any local
# interfaces. This mis-configuration is problematic for any pod leveraging host
# networking and liveness or readiness probes.
-# Setting this variable to true will override that check.
-#openshift_override_hostname_check=true
+# Setting this variable to false will override that check.
+#openshift_hostname_check=true
# openshift_use_dnsmasq is deprecated. This must be true, or installs will fail
# in versions >= 3.6
diff --git a/openshift-ansible.spec b/openshift-ansible.spec
index c09e14c66..719e54eb9 100644
--- a/openshift-ansible.spec
+++ b/openshift-ansible.spec
@@ -48,7 +48,8 @@ popd
%install
# Base openshift-ansible install
mkdir -p %{buildroot}%{_datadir}/%{name}
-mkdir -p %{buildroot}%{_datadir}/ansible/%{name}
+mkdir -p %{buildroot}%{_datadir}/ansible/%{name}/inventory
+cp -rp inventory/dynamic %{buildroot}%{_datadir}/ansible/%{name}/inventory
# openshift-ansible-bin install
mkdir -p %{buildroot}%{_bindir}
@@ -62,7 +63,7 @@ rm -f %{buildroot}%{python_sitelib}/openshift_ansible/gce
# openshift-ansible-docs install
# Install example inventory into docs/examples
mkdir -p docs/example-inventories
-cp inventory/* docs/example-inventories/
+cp inventory/hosts.* inventory/README.md docs/example-inventories/
# openshift-ansible-files install
cp -rp files %{buildroot}%{_datadir}/ansible/%{name}/
@@ -101,6 +102,7 @@ popd
%license LICENSE
%dir %{_datadir}/ansible/%{name}
%{_datadir}/ansible/%{name}/files
+%{_datadir}/ansible/%{name}/inventory/dynamic
%ghost %{_datadir}/ansible/%{name}/playbooks/common/openshift-master/library.rpmmoved
# ----------------------------------------------------------------------------------
diff --git a/playbooks/aws/openshift-cluster/hosted.yml b/playbooks/aws/openshift-cluster/hosted.yml
deleted file mode 100644
index 9d9ed29de..000000000
--- a/playbooks/aws/openshift-cluster/hosted.yml
+++ /dev/null
@@ -1,25 +0,0 @@
----
-- import_playbook: ../../openshift-hosted/private/config.yml
-
-- import_playbook: ../../openshift-metrics/private/config.yml
- when: openshift_metrics_install_metrics | default(false) | bool
-
-- import_playbook: ../../openshift-logging/private/config.yml
- when: openshift_logging_install_logging | default(false) | bool
-
-- import_playbook: ../../openshift-prometheus/private/config.yml
- when: openshift_hosted_prometheus_deploy | default(false) | bool
-
-- import_playbook: ../../openshift-service-catalog/private/config.yml
- when: openshift_enable_service_catalog | default(false) | bool
-
-- import_playbook: ../../openshift-management/private/config.yml
- when: openshift_management_install_management | default(false) | bool
-
-- name: Print deprecated variable warning message if necessary
- hosts: oo_first_master
- gather_facts: no
- tasks:
- - debug: msg="{{__deprecation_message}}"
- when:
- - __deprecation_message | default ('') | length > 0
diff --git a/playbooks/aws/openshift-cluster/install.yml b/playbooks/aws/openshift-cluster/install.yml
index a3fc82f9a..938e83f5e 100644
--- a/playbooks/aws/openshift-cluster/install.yml
+++ b/playbooks/aws/openshift-cluster/install.yml
@@ -18,29 +18,8 @@
- name: run the init
import_playbook: ../../init/main.yml
-- name: perform the installer openshift-checks
- import_playbook: ../../openshift-checks/private/install.yml
+- name: configure the control plane
+ import_playbook: ../../common/private/control_plane.yml
-- name: etcd install
- import_playbook: ../../openshift-etcd/private/config.yml
-
-- name: include nfs
- import_playbook: ../../openshift-nfs/private/config.yml
- when: groups.oo_nfs_to_config | default([]) | count > 0
-
-- name: include loadbalancer
- import_playbook: ../../openshift-loadbalancer/private/config.yml
- when: groups.oo_lb_to_config | default([]) | count > 0
-
-- name: include openshift-master config
- import_playbook: ../../openshift-master/private/config.yml
-
-- name: include master additional config
- import_playbook: ../../openshift-master/private/additional_config.yml
-
-- name: include master additional config
+- name: ensure the masters are configured as nodes
import_playbook: ../../openshift-node/private/config.yml
-
-- name: include openshift-glusterfs
- import_playbook: ../../openshift-glusterfs/private/config.yml
- when: groups.oo_glusterfs_to_config | default([]) | count > 0
diff --git a/playbooks/aws/openshift-cluster/provision_install.yml b/playbooks/aws/openshift-cluster/provision_install.yml
index f98f5be9a..bd154fa83 100644
--- a/playbooks/aws/openshift-cluster/provision_install.yml
+++ b/playbooks/aws/openshift-cluster/provision_install.yml
@@ -15,5 +15,5 @@
- name: Include the accept.yml playbook to accept nodes into the cluster
import_playbook: accept.yml
-- name: Include the hosted.yml playbook to finish the hosted configuration
- import_playbook: hosted.yml
+- name: Include the components playbook to finish the hosted configuration
+ import_playbook: ../../common/private/components.yml
diff --git a/playbooks/aws/provisioning_vars.yml.example b/playbooks/aws/provisioning_vars.yml.example
index f6b1a6b5d..a1a8a5b08 100644
--- a/playbooks/aws/provisioning_vars.yml.example
+++ b/playbooks/aws/provisioning_vars.yml.example
@@ -41,11 +41,27 @@ openshift_pkg_version: # -3.7.0
# a vpc, set this to false.
#openshift_aws_create_vpc: true
+# when openshift_aws_create_vpc is true (the default), the VPC defined in
+# openshift_aws_vpc will be created
+#openshift_aws_vpc:
+# name: "{{ openshift_aws_vpc_name }}"
+# cidr: 172.31.0.0/16
+# subnets:
+# us-east-1:
+# - cidr: 172.31.48.0/20
+# az: "us-east-1c"
+# default_az: true
+# - cidr: 172.31.32.0/20
+# az: "us-east-1e"
+# - cidr: 172.31.16.0/20
+# az: "us-east-1a"
+
# Name of the vpc. Needs to be set if using a pre-existing vpc.
#openshift_aws_vpc_name: "{{ openshift_aws_clusterid }}"
# Name of the subnet in the vpc to use. Needs to be set if using a pre-existing
-# vpc + subnet.
+# vpc + subnet. Otherwise will use the subnet with 'default_az' set (see above
+# example VPC structure)
#openshift_aws_subnet_az:
# -------------- #
diff --git a/playbooks/common/private/components.yml b/playbooks/common/private/components.yml
new file mode 100644
index 000000000..089645d07
--- /dev/null
+++ b/playbooks/common/private/components.yml
@@ -0,0 +1,38 @@
+---
+# These are the core component plays that configure the layers above the control
+# plane. A component is generally considered any part of OpenShift that runs on
+# top of the cluster and may be considered optional. Over time, much of OpenShift
+# above the Kubernetes apiserver and masters may be considered components.
+#
+# Preconditions:
+#
+# 1. The control plane is configured and reachable from nodes inside the cluster
+# 2. An admin kubeconfig file in /etc/origin/master/admin.kubeconfig that can
+# perform root level actions against the cluster
+# 3. On cloud providers, persistent volume provisioners are configured
+# 4. A subset of nodes is available to allow components to schedule - this must
+# include the masters and usually includes infra nodes.
+# 5. The init/main.yml playbook has been invoked
+
+- import_playbook: ../../openshift-glusterfs/private/config.yml
+ when: groups.oo_glusterfs_to_config | default([]) | count > 0
+
+- import_playbook: ../../openshift-hosted/private/config.yml
+
+- import_playbook: ../../openshift-web-console/private/config.yml
+ when: openshift_web_console_install | default(true) | bool
+
+- import_playbook: ../../openshift-metrics/private/config.yml
+ when: openshift_metrics_install_metrics | default(false) | bool
+
+- import_playbook: ../../openshift-logging/private/config.yml
+ when: openshift_logging_install_logging | default(false) | bool
+
+- import_playbook: ../../openshift-prometheus/private/config.yml
+ when: openshift_hosted_prometheus_deploy | default(false) | bool
+
+- import_playbook: ../../openshift-service-catalog/private/config.yml
+ when: openshift_enable_service_catalog | default(true) | bool
+
+- import_playbook: ../../openshift-management/private/config.yml
+ when: openshift_management_install_management | default(false) | bool
diff --git a/playbooks/common/private/control_plane.yml b/playbooks/common/private/control_plane.yml
new file mode 100644
index 000000000..0a5f1142b
--- /dev/null
+++ b/playbooks/common/private/control_plane.yml
@@ -0,0 +1,34 @@
+---
+# These are the control plane plays that configure a control plane on top of hosts
+# identified as masters. Over time, some of the pieces of the current control plane
+# may be moved to the components list.
+#
+# It is not required for any nodes to be configured, or passed to be configured,
+# when this playbook is invoked.
+#
+# Preconditions:
+#
+# 1. A set of machines have been identified to act as masters
+# 2. On cloud providers, a load balancer has been configured to point to the masters
+# and that load balancer has a DNS name
+# 3. The init/main.yml playbook has been invoked
+#
+# Postconditions:
+#
+# 1. The control plane is reachable from the outside of the cluster
+# 2. The master has an /etc/origin/master/admin.kubeconfig file that gives cluster-admin
+# access.
+
+- import_playbook: ../../openshift-checks/private/install.yml
+
+- import_playbook: ../../openshift-etcd/private/config.yml
+
+- import_playbook: ../../openshift-nfs/private/config.yml
+ when: groups.oo_nfs_to_config | default([]) | count > 0
+
+- import_playbook: ../../openshift-loadbalancer/private/config.yml
+ when: groups.oo_lb_to_config | default([]) | count > 0
+
+- import_playbook: ../../openshift-master/private/config.yml
+
+- import_playbook: ../../openshift-master/private/additional_config.yml
diff --git a/playbooks/container-runtime/private/build_container_groups.yml b/playbooks/container-runtime/private/build_container_groups.yml
index a2361d50c..8fb7b63e8 100644
--- a/playbooks/container-runtime/private/build_container_groups.yml
+++ b/playbooks/container-runtime/private/build_container_groups.yml
@@ -1,6 +1,8 @@
---
+# l_build_container_groups_hosts is passed in via prerequisites.yml during
+# etcd scaleup plays.
- name: create oo_hosts_containerized_managed_true host group
- hosts: oo_all_hosts:!oo_nodes_to_config
+ hosts: "{{ l_build_container_groups_hosts | default('oo_all_hosts:!oo_nodes_to_config') }}"
tasks:
- group_by:
key: oo_hosts_containerized_managed_{{ (openshift_is_containerized | default(False)) | ternary('true','false') }}
diff --git a/playbooks/container-runtime/private/config.yml b/playbooks/container-runtime/private/config.yml
index 817a8bf30..5396df20a 100644
--- a/playbooks/container-runtime/private/config.yml
+++ b/playbooks/container-runtime/private/config.yml
@@ -1,9 +1,11 @@
---
# l_scale_up_hosts may be passed in via prerequisites.yml during scaleup plays.
+# l_etcd_scale_up_hosts may be passed in via prerequisites.yml during etcd
+# scaleup plays.
- import_playbook: build_container_groups.yml
-- hosts: "{{ l_scale_up_hosts | default(l_default_container_runtime_hosts) }}"
+- hosts: "{{ l_etcd_scale_up_hosts | default(l_scale_up_hosts) | default(l_default_container_runtime_hosts) }}"
vars:
l_default_container_runtime_hosts: "oo_nodes_to_config:oo_hosts_containerized_managed_true"
roles:
diff --git a/playbooks/container-runtime/private/setup_storage.yml b/playbooks/container-runtime/private/setup_storage.yml
index 65630be62..586149b1d 100644
--- a/playbooks/container-runtime/private/setup_storage.yml
+++ b/playbooks/container-runtime/private/setup_storage.yml
@@ -1,9 +1,11 @@
---
# l_scale_up_hosts may be passed in via prerequisites.yml during scaleup plays.
+# l_etcd_scale_up_hosts may be passed in via prerequisites.yml during etcd
+# scaleup plays.
- import_playbook: build_container_groups.yml
-- hosts: "{{ l_scale_up_hosts | default(l_default_container_storage_hosts) }}"
+- hosts: "{{ l_etcd_scale_up_hosts | default(l_scale_up_hosts) | default(l_default_container_storage_hosts) }}"
vars:
l_default_container_storage_hosts: "oo_nodes_to_config:oo_hosts_containerized_managed_true"
l_chg_temp: "{{ hostvars[groups['oo_first_master'][0]]['openshift_containerized_host_groups'] | default([]) }}"
diff --git a/playbooks/deploy_cluster.yml b/playbooks/deploy_cluster.yml
index 5efdc486a..361553ee4 100644
--- a/playbooks/deploy_cluster.yml
+++ b/playbooks/deploy_cluster.yml
@@ -1,44 +1,11 @@
---
- import_playbook: init/main.yml
-- import_playbook: openshift-checks/private/install.yml
-
-- import_playbook: openshift-etcd/private/config.yml
-
-- import_playbook: openshift-nfs/private/config.yml
- when: groups.oo_nfs_to_config | default([]) | count > 0
-
-- import_playbook: openshift-loadbalancer/private/config.yml
- when: groups.oo_lb_to_config | default([]) | count > 0
-
-- import_playbook: openshift-master/private/config.yml
-
-- import_playbook: openshift-master/private/additional_config.yml
+- import_playbook: common/private/control_plane.yml
- import_playbook: openshift-node/private/config.yml
-- import_playbook: openshift-glusterfs/private/config.yml
- when: groups.oo_glusterfs_to_config | default([]) | count > 0
-
-- import_playbook: openshift-hosted/private/config.yml
-
-- import_playbook: openshift-web-console/private/config.yml
- when: openshift_web_console_install | default(true) | bool
-
-- import_playbook: openshift-metrics/private/config.yml
- when: openshift_metrics_install_metrics | default(false) | bool
-
-- import_playbook: openshift-logging/private/config.yml
- when: openshift_logging_install_logging | default(false) | bool
-
-- import_playbook: openshift-prometheus/private/config.yml
- when: openshift_hosted_prometheus_deploy | default(false) | bool
-
-- import_playbook: openshift-service-catalog/private/config.yml
- when: openshift_enable_service_catalog | default(true) | bool
-
-- import_playbook: openshift-management/private/config.yml
- when: openshift_management_install_management | default(false) | bool
+- import_playbook: common/private/components.yml
- name: Print deprecated variable warning message if necessary
hosts: oo_first_master
diff --git a/playbooks/gcp/openshift-cluster/build_base_image.yml b/playbooks/gcp/openshift-cluster/build_base_image.yml
new file mode 100644
index 000000000..75d0ddf9d
--- /dev/null
+++ b/playbooks/gcp/openshift-cluster/build_base_image.yml
@@ -0,0 +1,162 @@
+---
+# This playbook ensures that a base image is up to date with all of the required settings
+- name: Launch image build instance
+ hosts: localhost
+ connection: local
+ gather_facts: no
+ tasks:
+ - name: Require openshift_gcp_root_image
+ fail:
+ msg: "A root OS image name or family is required for base image building. Please ensure `openshift_gcp_root_image` is defined."
+ when: openshift_gcp_root_image is undefined
+
+ - name: Create the image instance disk
+ gce_pd:
+ service_account_email: "{{ (lookup('file', openshift_gcp_iam_service_account_keyfile ) | from_json ).client_email }}"
+ credentials_file: "{{ openshift_gcp_iam_service_account_keyfile }}"
+ project_id: "{{ openshift_gcp_project }}"
+ zone: "{{ openshift_gcp_zone }}"
+ name: "{{ openshift_gcp_prefix }}build-image-instance"
+ disk_type: pd-ssd
+ image: "{{ openshift_gcp_root_image }}"
+ size_gb: 10
+ state: present
+
+ - name: Launch the image build instance
+ gce:
+ service_account_email: "{{ (lookup('file', openshift_gcp_iam_service_account_keyfile ) | from_json ).client_email }}"
+ credentials_file: "{{ openshift_gcp_iam_service_account_keyfile }}"
+ project_id: "{{ openshift_gcp_project }}"
+ zone: "{{ openshift_gcp_zone }}"
+ machine_type: n1-standard-1
+ instance_names: "{{ openshift_gcp_prefix }}build-image-instance"
+ state: present
+ tags:
+ - build-image-instance
+ disk_auto_delete: false
+ disks:
+ - "{{ openshift_gcp_prefix }}build-image-instance"
+ register: gce
+
+ - add_host:
+ hostname: "{{ item.public_ip }}"
+ groupname: build_instance_ips
+ with_items: "{{ gce.instance_data }}"
+
+ - name: Wait for instance to respond to SSH
+ wait_for:
+ delay: 1
+ host: "{{ item.public_ip }}"
+ port: 22
+ state: started
+ timeout: 120
+ with_items: "{{ gce.instance_data }}"
+
+- name: Prepare instance content sources
+ pre_tasks:
+ - set_fact:
+ allow_rhel_subscriptions: "{{ rhsub_skip | default('no', True) | lower in ['no', 'false'] }}"
+ - set_fact:
+ using_rhel_subscriptions: "{{ (deployment_type in ['enterprise', 'atomic-enterprise', 'openshift-enterprise'] or ansible_distribution == 'RedHat') and allow_rhel_subscriptions }}"
+ hosts: build_instance_ips
+ roles:
+ - role: rhel_subscribe
+ when: using_rhel_subscriptions
+ - role: openshift_repos
+ vars:
+ openshift_additional_repos: []
+ post_tasks:
+ - name: Add custom repositories
+ include_role:
+ name: openshift_gcp
+ tasks_from: add_custom_repositories.yml
+ - name: Add the Google Cloud repo
+ yum_repository:
+ name: google-cloud
+ description: Google Cloud Compute
+ baseurl: https://packages.cloud.google.com/yum/repos/google-cloud-compute-el7-x86_64
+ gpgkey: https://packages.cloud.google.com/yum/doc/yum-key.gpg https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
+ gpgcheck: yes
+ repo_gpgcheck: yes
+ state: present
+ when: ansible_os_family == "RedHat"
+ - name: Add the jdetiber-qemu-user-static copr repo
+ yum_repository:
+ name: jdetiber-qemu-user-static
+ description: QEMU user static COPR
+ baseurl: https://copr-be.cloud.fedoraproject.org/results/jdetiber/qemu-user-static/epel-7-$basearch/
+ gpgkey: https://copr-be.cloud.fedoraproject.org/results/jdetiber/qemu-user-static/pubkey.gpg
+ gpgcheck: yes
+ repo_gpgcheck: no
+ state: present
+ when: ansible_os_family == "RedHat"
+ - name: Install qemu-user-static
+ package:
+ name: qemu-user-static
+ state: present
+ - name: Start and enable systemd-binfmt service
+ systemd:
+ name: systemd-binfmt
+ state: started
+ enabled: yes
+
+- name: Build image
+ hosts: build_instance_ips
+ pre_tasks:
+ - name: Set up core host GCP configuration
+ include_role:
+ name: openshift_gcp
+ tasks_from: configure_gcp_base_image.yml
+ roles:
+ - role: os_update_latest
+ post_tasks:
+ - name: Disable all repos on RHEL
+ command: subscription-manager repos --disable="*"
+ when: using_rhel_subscriptions
+ - name: Enable repos for packages on RHEL
+ command: subscription-manager repos --enable="rhel-7-server-rpms" --enable="rhel-7-server-extras-rpms"
+ when: using_rhel_subscriptions
+ - name: Install common image prerequisites
+ package: name={{ item }} state=latest
+ with_items:
+ # required by Ansible
+ - PyYAML
+ - docker
+ - google-compute-engine
+ - google-compute-engine-init
+ - google-config
+ - wget
+ - git
+ - net-tools
+ - bind-utils
+ - iptables-services
+ - bridge-utils
+ - bash-completion
+ - name: Clean yum metadata
+ command: yum clean all
+ args:
+ warn: no
+ when: ansible_os_family == "RedHat"
+
+- name: Commit image
+ hosts: localhost
+ connection: local
+ tasks:
+ - name: Terminate the image build instance
+ gce:
+ service_account_email: "{{ (lookup('file', openshift_gcp_iam_service_account_keyfile ) | from_json ).client_email }}"
+ credentials_file: "{{ openshift_gcp_iam_service_account_keyfile }}"
+ project_id: "{{ openshift_gcp_project }}"
+ zone: "{{ openshift_gcp_zone }}"
+ instance_names: "{{ openshift_gcp_prefix }}build-image-instance"
+ state: absent
+ - name: Save the new image
+ command: gcloud --project "{{ openshift_gcp_project}}" compute images create "{{ openshift_gcp_base_image_name | default(openshift_gcp_base_image + '-' + lookup('pipe','date +%Y%m%d-%H%M%S')) }}" --source-disk "{{ openshift_gcp_prefix }}build-image-instance" --source-disk-zone "{{ openshift_gcp_zone }}" --family "{{ openshift_gcp_base_image }}"
+ - name: Remove the image instance disk
+ gce_pd:
+ service_account_email: "{{ (lookup('file', openshift_gcp_iam_service_account_keyfile ) | from_json ).client_email }}"
+ credentials_file: "{{ openshift_gcp_iam_service_account_keyfile }}"
+ project_id: "{{ openshift_gcp_project }}"
+ zone: "{{ openshift_gcp_zone }}"
+ name: "{{ openshift_gcp_prefix }}build-image-instance"
+ state: absent
diff --git a/playbooks/gcp/openshift-cluster/build_image.yml b/playbooks/gcp/openshift-cluster/build_image.yml
new file mode 100644
index 000000000..787de8ebc
--- /dev/null
+++ b/playbooks/gcp/openshift-cluster/build_image.yml
@@ -0,0 +1,106 @@
+---
+- name: Verify prerequisites for image build
+ hosts: localhost
+ connection: local
+ gather_facts: no
+ tasks:
+ - name: Require openshift_gcp_base_image
+ fail:
+ msg: "A base image name or family is required for image building. Please ensure `openshift_gcp_base_image` is defined."
+ when: openshift_gcp_base_image is undefined
+
+- name: Launch image build instance
+ hosts: localhost
+ connection: local
+ gather_facts: no
+ tasks:
+ - name: Set facts
+ set_fact:
+ openshift_node_bootstrap: True
+ openshift_master_unsupported_embedded_etcd: True
+
+ - name: Create the image instance disk
+ gce_pd:
+ service_account_email: "{{ (lookup('file', openshift_gcp_iam_service_account_keyfile ) | from_json ).client_email }}"
+ credentials_file: "{{ openshift_gcp_iam_service_account_keyfile }}"
+ project_id: "{{ openshift_gcp_project }}"
+ zone: "{{ openshift_gcp_zone }}"
+ name: "{{ openshift_gcp_prefix }}build-image-instance"
+ disk_type: pd-ssd
+ image: "{{ openshift_gcp_base_image }}"
+ size_gb: 10
+ state: present
+
+ - name: Launch the image build instance
+ gce:
+ service_account_email: "{{ (lookup('file', openshift_gcp_iam_service_account_keyfile ) | from_json ).client_email }}"
+ credentials_file: "{{ openshift_gcp_iam_service_account_keyfile }}"
+ project_id: "{{ openshift_gcp_project }}"
+ zone: "{{ openshift_gcp_zone }}"
+ machine_type: n1-standard-1
+ instance_names: "{{ openshift_gcp_prefix }}build-image-instance"
+ state: present
+ tags:
+ - build-image-instance
+ disk_auto_delete: false
+ disks:
+ - "{{ openshift_gcp_prefix }}build-image-instance"
+ register: gce
+
+ - name: add host to nodes
+ add_host:
+ hostname: "{{ item.public_ip }}"
+ groupname: nodes
+ with_items: "{{ gce.instance_data }}"
+
+ - name: Wait for instance to respond to SSH
+ wait_for:
+ delay: 1
+ host: "{{ item.public_ip }}"
+ port: 22
+ state: started
+ timeout: 120
+ with_items: "{{ gce.instance_data }}"
+
+- hosts: nodes
+ tasks:
+ - name: Set facts
+ set_fact:
+ openshift_node_bootstrap: True
+
+# This is the part that installs all of the software and configs for the instance
+# to become a node.
+- import_playbook: ../../openshift-node/private/image_prep.yml
+
+# Add additional GCP specific behavior
+- hosts: nodes
+ tasks:
+ - include_role:
+ name: openshift_gcp
+ tasks_from: node_cloud_config.yml
+ - include_role:
+ name: openshift_gcp
+ tasks_from: frequent_log_rotation.yml
+
+- name: Commit image
+ hosts: localhost
+ connection: local
+ tasks:
+ - name: Terminate the image build instance
+ gce:
+ service_account_email: "{{ (lookup('file', openshift_gcp_iam_service_account_keyfile ) | from_json ).client_email }}"
+ credentials_file: "{{ openshift_gcp_iam_service_account_keyfile }}"
+ project_id: "{{ openshift_gcp_project }}"
+ zone: "{{ openshift_gcp_zone }}"
+ instance_names: "{{ openshift_gcp_prefix }}build-image-instance"
+ state: absent
+ - name: Save the new image
+ command: gcloud --project "{{ openshift_gcp_project}}" compute images create "{{ openshift_gcp_image_name | default(openshift_gcp_image + '-' + lookup('pipe','date +%Y%m%d-%H%M%S')) }}" --source-disk "{{ openshift_gcp_prefix }}build-image-instance" --source-disk-zone "{{ openshift_gcp_zone }}" --family "{{ openshift_gcp_image }}"
+ - name: Remove the image instance disk
+ gce_pd:
+ service_account_email: "{{ (lookup('file', openshift_gcp_iam_service_account_keyfile ) | from_json ).client_email }}"
+ credentials_file: "{{ openshift_gcp_iam_service_account_keyfile }}"
+ project_id: "{{ openshift_gcp_project }}"
+ zone: "{{ openshift_gcp_zone }}"
+ name: "{{ openshift_gcp_prefix }}build-image-instance"
+ state: absent
diff --git a/playbooks/gcp/openshift-cluster/deprovision.yml b/playbooks/gcp/openshift-cluster/deprovision.yml
new file mode 100644
index 000000000..589fddd2f
--- /dev/null
+++ b/playbooks/gcp/openshift-cluster/deprovision.yml
@@ -0,0 +1,10 @@
+# This playbook terminates a running cluster
+---
+- name: Terminate running cluster and remove all supporting resources in GCE
+ hosts: localhost
+ connection: local
+ tasks:
+ - include_role:
+ name: openshift_gcp
+ vars:
+ state: absent
diff --git a/playbooks/gcp/openshift-cluster/install.yml b/playbooks/gcp/openshift-cluster/install.yml
new file mode 100644
index 000000000..fb35b4348
--- /dev/null
+++ b/playbooks/gcp/openshift-cluster/install.yml
@@ -0,0 +1,33 @@
+# This playbook installs onto a provisioned cluster
+---
+- hosts: localhost
+ connection: local
+ tasks:
+ - name: place all scale groups into Ansible groups
+ include_role:
+ name: openshift_gcp
+ tasks_from: setup_scale_group_facts.yml
+
+- name: run the init
+ import_playbook: ../../init/main.yml
+
+- name: configure the control plane
+ import_playbook: ../../common/private/control_plane.yml
+
+- name: ensure the masters are configured as nodes
+ import_playbook: ../../openshift-node/private/config.yml
+
+- name: run the GCP specific post steps
+ import_playbook: install_gcp.yml
+
+- name: install components
+ import_playbook: ../../common/private/components.yml
+
+- hosts: primary_master
+ gather_facts: no
+ tasks:
+ - name: Retrieve cluster configuration
+ fetch:
+ src: "{{ openshift.common.config_base }}/master/admin.kubeconfig"
+ dest: "/tmp/"
+ flat: yes
diff --git a/playbooks/gcp/openshift-cluster/install_gcp.yml b/playbooks/gcp/openshift-cluster/install_gcp.yml
new file mode 100644
index 000000000..09db78971
--- /dev/null
+++ b/playbooks/gcp/openshift-cluster/install_gcp.yml
@@ -0,0 +1,21 @@
+---
+- hosts: masters
+ gather_facts: no
+ tasks:
+ - name: create master health check service
+ include_role:
+ name: openshift_gcp
+ tasks_from: configure_master_healthcheck.yml
+ - name: configure node bootstrapping
+ include_role:
+ name: openshift_gcp
+ tasks_from: configure_master_bootstrap.yml
+ when:
+ - openshift_master_bootstrap_enabled | default(False)
+ - name: configure node bootstrap autoapprover
+ include_role:
+ name: openshift_bootstrap_autoapprover
+ tasks_from: main
+ when:
+ - openshift_master_bootstrap_enabled | default(False)
+ - openshift_master_bootstrap_auto_approve | default(False) | bool
diff --git a/playbooks/gcp/openshift-cluster/inventory.yml b/playbooks/gcp/openshift-cluster/inventory.yml
new file mode 100644
index 000000000..96de6d6db
--- /dev/null
+++ b/playbooks/gcp/openshift-cluster/inventory.yml
@@ -0,0 +1,10 @@
+---
+- name: Set up the connection variables for retrieving inventory from GCE
+ hosts: localhost
+ connection: local
+ gather_facts: no
+ tasks:
+ - name: materialize the inventory
+ include_role:
+ name: openshift_gcp
+ tasks_from: dynamic_inventory.yml
diff --git a/playbooks/gcp/openshift-cluster/launch.yml b/playbooks/gcp/openshift-cluster/launch.yml
new file mode 100644
index 000000000..02f00408a
--- /dev/null
+++ b/playbooks/gcp/openshift-cluster/launch.yml
@@ -0,0 +1,12 @@
+# This playbook launches a new cluster or converges it if already launched
+---
+- import_playbook: build_image.yml
+ when: openshift_gcp_build_image | default(False) | bool
+
+- import_playbook: provision.yml
+
+- hosts: localhost
+ tasks:
+ - meta: refresh_inventory
+
+- import_playbook: install.yml
diff --git a/playbooks/gcp/provision.yml b/playbooks/gcp/openshift-cluster/provision.yml
index b6edf9961..293a195c9 100644
--- a/playbooks/gcp/provision.yml
+++ b/playbooks/gcp/openshift-cluster/provision.yml
@@ -3,11 +3,10 @@
hosts: localhost
connection: local
gather_facts: no
+ roles:
+ - openshift_gcp
tasks:
-
- - name: provision a GCP cluster in the specified project
+ - name: recalculate the dynamic inventory
import_role:
name: openshift_gcp
-
-- name: run the cluster deploy
- import_playbook: ../deploy_cluster.yml
+ tasks_from: dynamic_inventory.yml
diff --git a/playbooks/gcp/openshift-cluster/publish_image.yml b/playbooks/gcp/openshift-cluster/publish_image.yml
new file mode 100644
index 000000000..76fd49e9c
--- /dev/null
+++ b/playbooks/gcp/openshift-cluster/publish_image.yml
@@ -0,0 +1,9 @@
+---
+- name: Publish the most recent image
+ hosts: localhost
+ connection: local
+ gather_facts: no
+ tasks:
+ - import_role:
+ name: openshift_gcp
+ tasks_from: publish_image.yml
diff --git a/playbooks/gcp/openshift-cluster/roles b/playbooks/gcp/openshift-cluster/roles
new file mode 120000
index 000000000..20c4c58cf
--- /dev/null
+++ b/playbooks/gcp/openshift-cluster/roles
@@ -0,0 +1 @@
+../../../roles \ No newline at end of file
diff --git a/playbooks/init/evaluate_groups.yml b/playbooks/init/evaluate_groups.yml
index c4cd226c9..924ae481a 100644
--- a/playbooks/init/evaluate_groups.yml
+++ b/playbooks/init/evaluate_groups.yml
@@ -47,7 +47,7 @@
msg: >
Running etcd as an embedded service is no longer supported.
when:
- - g_etcd_hosts | default([]) | length not in [3,1]
+ - g_etcd_hosts | default([]) | length not in [5,3,1]
- not (openshift_node_bootstrap | default(False))
- name: Evaluate oo_all_hosts
diff --git a/playbooks/init/validate_hostnames.yml b/playbooks/init/validate_hostnames.yml
index 86e0b2416..b49f7dd08 100644
--- a/playbooks/init/validate_hostnames.yml
+++ b/playbooks/init/validate_hostnames.yml
@@ -25,7 +25,7 @@
when:
- lookupip.stdout != '127.0.0.1'
- lookupip.stdout not in ansible_all_ipv4_addresses
- - openshift_hostname_check | default(true)
+ - openshift_hostname_check | default(true) | bool
- name: Validate openshift_ip exists on node when defined
fail:
@@ -40,4 +40,4 @@
when:
- openshift_ip is defined
- openshift_ip not in ansible_all_ipv4_addresses
- - openshift_ip_check | default(true)
+ - openshift_ip_check | default(true) | bool
diff --git a/playbooks/openshift-etcd/scaleup.yml b/playbooks/openshift-etcd/scaleup.yml
index 7e9ab6834..656454fe3 100644
--- a/playbooks/openshift-etcd/scaleup.yml
+++ b/playbooks/openshift-etcd/scaleup.yml
@@ -1,4 +1,51 @@
---
+- import_playbook: ../init/evaluate_groups.yml
+
+- name: Ensure there are new_etcd
+ hosts: localhost
+ connection: local
+ gather_facts: no
+ tasks:
+ - fail:
+ msg: >
+ Detected no new_etcd in inventory. Please add hosts to the
+ new_etcd host group to add etcd hosts.
+ when:
+ - g_new_etcd_hosts | default([]) | length == 0
+
+ - fail:
+ msg: >
+ Detected new_etcd host is member of new_masters or new_nodes. Please
+ run playbooks/openshift-master/scaleup.yml or
+ playbooks/openshift-node/scaleup.yml before running this play.
+ when: >
+ inventory_hostname in (groups['new_masters'] | default([]))
+ or inventory_hostname in (groups['new_nodes'] | default([]))
+
+# We only need to run this if etcd is being installed on a standalone host;
+# If etcd is part of master or node group, there's no need to
+# re-run prerequisites
+- import_playbook: ../prerequisites.yml
+ vars:
+ # We need to ensure container_runtime is only processed for containerized
+ # etcd hosts by setting l_build_container_groups_hosts and l_etcd_scale_up_hosts
+ l_build_container_groups_hosts: "oo_new_etcd_to_config"
+ l_etcd_scale_up_hosts: "oo_hosts_containerized_managed_true"
+ l_scale_up_hosts: "oo_new_etcd_to_config"
+ l_init_fact_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config:oo_new_etcd_to_config"
+ l_sanity_check_hosts: "{{ groups['oo_new_etcd_to_config'] | union(groups['oo_masters_to_config']) | union(groups['oo_etcd_to_config']) }}"
+ when:
+ - inventory_hostname not in groups['oo_masters']
+ - inventory_hostname not in groups['oo_nodes_to_config']
+
+# If this etcd host is part of a master or node, we don't need to run
+# prerequisites, we can just init facts as normal.
- import_playbook: ../init/main.yml
+ vars:
+ skip_verison: True
+ l_init_fact_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config:oo_new_etcd_to_config"
+ when:
+ - inventory_hostname in groups['oo_masters']
+ - inventory_hostname in groups['oo_nodes_to_config']
- import_playbook: private/scaleup.yml
diff --git a/playbooks/openstack/sample-inventory/group_vars/OSEv3.yml b/playbooks/openstack/sample-inventory/group_vars/OSEv3.yml
index a8663f946..1287b25f3 100644
--- a/playbooks/openstack/sample-inventory/group_vars/OSEv3.yml
+++ b/playbooks/openstack/sample-inventory/group_vars/OSEv3.yml
@@ -43,7 +43,7 @@ openshift_hosted_registry_wait: True
# NOTE(shadower): the hostname check seems to always fail because the
# host's floating IP address doesn't match the address received from
# inside the host.
-openshift_override_hostname_check: true
+openshift_hostname_check: false
# For POCs or demo environments that are using smaller instances than
# the official recommended values for RAM and DISK, uncomment the line below.
diff --git a/roles/container_runtime/defaults/main.yml b/roles/container_runtime/defaults/main.yml
index d0e37e2f4..8203d15f5 100644
--- a/roles/container_runtime/defaults/main.yml
+++ b/roles/container_runtime/defaults/main.yml
@@ -101,45 +101,34 @@ l_crt_crio_image_tag_dict:
openshift-enterprise: "{{ l_openshift_image_tag }}"
origin: "{{ openshift_crio_image_tag | default(openshift_crio_image_tag_default) }}"
-l_crt_crio_image_prepend_dict:
- openshift-enterprise: "registry.access.redhat.com/openshift3"
- origin: "docker.io/gscrivano"
-
l_crt_crio_image_dict:
- Fedora:
- crio_image_name: "cri-o-fedora"
- crio_image_tag: "latest"
- CentOS:
- crio_image_name: "cri-o-centos"
- crio_image_tag: "latest"
- RedHat:
- crio_image_name: "cri-o"
- crio_image_tag: "{{ openshift_crio_image_tag | default(l_crt_crio_image_tag_dict[openshift_deployment_type]) }}"
-
-l_crio_image_prepend: "{{ l_crt_crio_image_prepend_dict[openshift_deployment_type] }}"
-l_crio_image_name: "{{ l_crt_crio_image_dict[ansible_distribution]['crio_image_name'] }}"
-l_crio_image_tag: "{{ l_crt_crio_image_dict[ansible_distribution] }}"
-
-l_crio_image_default: "{{ l_crio_image_prepend }}/{{ l_crio_image_name }}:{{ l_crio_image_tag }}"
+ Fedora: "registry.fedoraproject.org/latest/cri-o"
+ CentOS: "registry.centos.org/projectatomic/cri-o"
+ RedHat: "registry.access.redhat.com/openshift3/cri-o"
+
+l_crio_image_name: "{{ l_crt_crio_image_dict[ansible_distribution] }}"
+l_crio_image_tag: "{{ l_crt_crio_image_tag_dict[openshift_deployment_type] }}"
+
+l_crio_image_default: "{{ l_crio_image_name }}:{{ l_crio_image_tag }}"
l_crio_image: "{{ openshift_crio_systemcontainer_image_override | default(l_crio_image_default) }}"
# ----------------------- #
# systemcontainers_docker #
# ----------------------- #
-l_crt_docker_image_prepend_dict:
- Fedora: "registry.fedoraproject.org/latest"
- Centos: "docker.io/gscrivano"
- RedHat: "registry.access.redhat.com/openshift3"
+l_crt_docker_image_dict:
+ Fedora: "registry.fedoraproject.org/latest/docker"
+ Centos: "registry.centos.org/projectatomic/docker"
+ RedHat: "registry.access.redhat.com/openshift3/container-engine"
openshift_docker_image_tag_default: "latest"
l_crt_docker_image_tag_dict:
openshift-enterprise: "{{ l_openshift_image_tag }}"
origin: "{{ openshift_docker_image_tag | default(openshift_docker_image_tag_default) }}"
-l_docker_image_prepend: "{{ l_crt_docker_image_prepend_dict[ansible_distribution] }}"
+l_docker_image_prepend: "{{ l_crt_docker_image_dict[ansible_distribution] }}"
l_docker_image_tag: "{{ l_crt_docker_image_tag_dict[openshift_deployment_type] }}"
-l_docker_image_default: "{{ l_docker_image_prepend }}/{{ openshift_docker_service_name }}:{{ l_docker_image_tag }}"
+l_docker_image_default: "{{ l_docker_image_prepend }}:{{ l_docker_image_tag }}"
l_docker_image: "{{ openshift_docker_systemcontainer_image_override | default(l_docker_image_default) }}"
l_is_node_system_container: "{{ (openshift_use_node_system_container | default(openshift_use_system_containers | default(false)) | bool) }}"
diff --git a/roles/flannel/meta/main.yml b/roles/flannel/meta/main.yml
index 7634b8192..38d2f748b 100644
--- a/roles/flannel/meta/main.yml
+++ b/roles/flannel/meta/main.yml
@@ -14,3 +14,4 @@ galaxy_info:
- system
dependencies:
- role: lib_utils
+- role: openshift_facts
diff --git a/roles/lib_utils/filter_plugins/oo_filters.py b/roles/lib_utils/filter_plugins/oo_filters.py
index 9f73510c4..ef996fefe 100644
--- a/roles/lib_utils/filter_plugins/oo_filters.py
+++ b/roles/lib_utils/filter_plugins/oo_filters.py
@@ -4,6 +4,7 @@
"""
Custom filters for use in openshift-ansible
"""
+import json
import os
import pdb
import random
@@ -586,6 +587,18 @@ that result to this filter plugin.
return secret_name
+def lib_utils_oo_l_of_d_to_csv(input_list):
+ """Map a list of dictionaries, input_list, into a csv string
+ of json values.
+
+ Example input:
+ [{'var1': 'val1', 'var2': 'val2'}, {'var1': 'val3', 'var2': 'val4'}]
+ Example output:
+ u'{"var1": "val1", "var2": "val2"},{"var1": "val3", "var2": "val4"}'
+ """
+ return ','.join(json.dumps(x) for x in input_list)
+
+
def map_from_pairs(source, delim="="):
''' Returns a dict given the source and delim delimited '''
if source == '':
@@ -623,5 +636,6 @@ class FilterModule(object):
"lib_utils_oo_contains_rule": lib_utils_oo_contains_rule,
"lib_utils_oo_selector_to_string_list": lib_utils_oo_selector_to_string_list,
"lib_utils_oo_filter_sa_secrets": lib_utils_oo_filter_sa_secrets,
+ "lib_utils_oo_l_of_d_to_csv": lib_utils_oo_l_of_d_to_csv,
"map_from_pairs": map_from_pairs
}
diff --git a/roles/lib_utils/filter_plugins/openshift_aws_filters.py b/roles/lib_utils/filter_plugins/openshift_aws_filters.py
index dfcb11da3..f16048056 100644
--- a/roles/lib_utils/filter_plugins/openshift_aws_filters.py
+++ b/roles/lib_utils/filter_plugins/openshift_aws_filters.py
@@ -67,8 +67,24 @@ class FilterModule(object):
return tags
+ @staticmethod
+ def get_default_az(subnets):
+ ''' From a list of subnets/AZs in a specific region (from the VPC
+ structure), return the AZ that has the key/value
+ 'default_az=True.' '''
+
+ for subnet in subnets:
+ if subnet.get('default_az'):
+ return subnet['az']
+
+ # if there was none marked with default_az=True, just return the first
+ # one. (this does mean we could possible return an item that has
+ # default_az=False set
+ return subnets[0]['az']
+
def filters(self):
''' returns a mapping of filters to methods '''
return {'build_instance_tags': self.build_instance_tags,
+ 'get_default_az': self.get_default_az,
'scale_groups_match_capacity': self.scale_groups_match_capacity,
'scale_groups_serial': self.scale_groups_serial}
diff --git a/roles/lib_utils/library/docker_creds.py b/roles/lib_utils/library/docker_creds.py
index d4674845e..b94c0b779 100644
--- a/roles/lib_utils/library/docker_creds.py
+++ b/roles/lib_utils/library/docker_creds.py
@@ -135,7 +135,7 @@ def update_config(docker_config, registry, username, password):
docker_config['auths'][registry] = {}
# base64 encode our username:password string
- encoded_data = base64.b64encode('{}:{}'.format(username, password))
+ encoded_data = base64.b64encode('{}:{}'.format(username, password).encode())
# check if the same value is already present for idempotency.
if 'auth' in docker_config['auths'][registry]:
@@ -151,7 +151,7 @@ def write_config(module, docker_config, dest):
conf_file_path = os.path.join(dest, 'config.json')
try:
with open(conf_file_path, 'w') as conf_file:
- json.dump(docker_config, conf_file, indent=8)
+ json.dump(docker_config.decode(), conf_file, indent=8)
except IOError as ioerror:
result = {'failed': True,
'changed': False,
diff --git a/roles/openshift_aws/defaults/main.yml b/roles/openshift_aws/defaults/main.yml
index a729e8dbd..e14d57702 100644
--- a/roles/openshift_aws/defaults/main.yml
+++ b/roles/openshift_aws/defaults/main.yml
@@ -59,7 +59,7 @@ openshift_aws_elb_name_dict:
external: "{{ openshift_aws_elb_basename }}-infra"
openshift_aws_elb_idle_timout: 400
-openshift_aws_elb_scheme: internet-facing
+
openshift_aws_elb_cert_arn: ''
openshift_aws_elb_dict:
@@ -282,8 +282,6 @@ openshift_aws_node_security_groups:
openshift_aws_vpc_tags:
Name: "{{ openshift_aws_vpc_name }}"
-openshift_aws_subnet_az: us-east-1c
-
openshift_aws_vpc:
name: "{{ openshift_aws_vpc_name }}"
cidr: 172.31.0.0/16
@@ -291,11 +289,14 @@ openshift_aws_vpc:
us-east-1:
- cidr: 172.31.48.0/20
az: "us-east-1c"
+ default_az: true
- cidr: 172.31.32.0/20
az: "us-east-1e"
- cidr: 172.31.16.0/20
az: "us-east-1a"
+openshift_aws_subnet_az: "{{ openshift_aws_vpc.subnets[openshift_aws_region] | get_default_az }}"
+
openshift_aws_node_run_bootstrap_startup: True
openshift_aws_node_user_data: ''
openshift_aws_node_config_namespace: openshift-node
diff --git a/roles/openshift_aws/tasks/elb.yml b/roles/openshift_aws/tasks/elb.yml
index 5d371ec7a..6f0028a3d 100644
--- a/roles/openshift_aws/tasks/elb.yml
+++ b/roles/openshift_aws/tasks/elb.yml
@@ -15,7 +15,7 @@
- "{{ subnetout.subnets[0].id }}"
health_check: "{{ openshift_aws_elb_health_check }}"
listeners: "{{ item.value }}"
- scheme: "{{ openshift_aws_elb_scheme }}"
+ scheme: "{{ (item.key == 'internal') | ternary('internal','internet-facing') }}"
tags: "{{ openshift_aws_elb_tags }}"
wait: True
register: new_elb
diff --git a/roles/openshift_bootstrap_autoapprover/files/openshift-bootstrap-controller-policy.yaml b/roles/openshift_bootstrap_autoapprover/files/openshift-bootstrap-controller-policy.yaml
new file mode 100644
index 000000000..90ee40943
--- /dev/null
+++ b/roles/openshift_bootstrap_autoapprover/files/openshift-bootstrap-controller-policy.yaml
@@ -0,0 +1,10 @@
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+ name: bootstrap-autoapprover
+roleRef:
+ kind: ClusterRole
+ name: system:node-bootstrap-autoapprover
+subjects:
+- kind: User
+ name: system:serviceaccount:openshift-infra:bootstrap-autoapprover
diff --git a/roles/openshift_bootstrap_autoapprover/files/openshift-bootstrap-controller-role.yaml b/roles/openshift_bootstrap_autoapprover/files/openshift-bootstrap-controller-role.yaml
new file mode 100644
index 000000000..d8143d047
--- /dev/null
+++ b/roles/openshift_bootstrap_autoapprover/files/openshift-bootstrap-controller-role.yaml
@@ -0,0 +1,21 @@
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRole
+metadata:
+ name: system:node-bootstrap-autoapprover
+rules:
+- apiGroups:
+ - certificates.k8s.io
+ resources:
+ - certificatesigningrequests
+ verbs:
+ - delete
+ - get
+ - list
+ - watch
+- apiGroups:
+ - certificates.k8s.io
+ resources:
+ - certificatesigningrequests/approval
+ verbs:
+ - create
+ - update
diff --git a/roles/openshift_bootstrap_autoapprover/files/openshift-bootstrap-controller-serviceaccount.yaml b/roles/openshift_bootstrap_autoapprover/files/openshift-bootstrap-controller-serviceaccount.yaml
new file mode 100644
index 000000000..e22ce6f34
--- /dev/null
+++ b/roles/openshift_bootstrap_autoapprover/files/openshift-bootstrap-controller-serviceaccount.yaml
@@ -0,0 +1,5 @@
+kind: ServiceAccount
+apiVersion: v1
+metadata:
+ name: bootstrap-autoapprover
+ namespace: openshift-infra
diff --git a/roles/openshift_bootstrap_autoapprover/files/openshift-bootstrap-controller.yaml b/roles/openshift_bootstrap_autoapprover/files/openshift-bootstrap-controller.yaml
new file mode 100644
index 000000000..dbcedb407
--- /dev/null
+++ b/roles/openshift_bootstrap_autoapprover/files/openshift-bootstrap-controller.yaml
@@ -0,0 +1,68 @@
+kind: StatefulSet
+apiVersion: apps/v1beta1
+metadata:
+ name: bootstrap-autoapprover
+ namespace: openshift-infra
+spec:
+ updateStrategy:
+ type: RollingUpdate
+ template:
+ metadata:
+ labels:
+ app: bootstrap-autoapprover
+ spec:
+ serviceAccountName: bootstrap-autoapprover
+ terminationGracePeriodSeconds: 1
+ containers:
+ - name: signer
+ image: openshift/node:v3.7.0-rc.0
+ command:
+ - /bin/bash
+ - -c
+ args:
+ - |
+ #!/bin/bash
+ set -o errexit
+ set -o nounset
+ set -o pipefail
+
+ unset KUBECONFIG
+ cat <<SCRIPT > /tmp/signer
+ #!/bin/bash
+ #
+ # It will approve any CSR that is not approved yet, and delete any CSR that expired more than 60 seconds
+ # ago.
+ #
+
+ set -o errexit
+ set -o nounset
+ set -o pipefail
+
+ name=\${1}
+ condition=\${2}
+ certificate=\${3}
+ username=\${4}
+
+ # auto approve
+ if [[ -z "\${condition}" && ("\${username}" == "system:serviceaccount:openshift-infra:node-bootstrapper" || "\${username}" == "system:node:"* ) ]]; then
+ oc adm certificate approve "\${name}"
+ exit 0
+ fi
+
+ # check certificate age
+ if [[ -n "\${certificate}" ]]; then
+ text="\$( echo "\${certificate}" | base64 -d - )"
+ if ! echo "\${text}" | openssl x509 -noout; then
+ echo "error: Unable to parse certificate" 2>&1
+ exit 1
+ fi
+ if ! echo "\${text}" | openssl x509 -checkend -60 > /dev/null; then
+ echo "Certificate is expired, deleting"
+ oc delete csr "\${name}"
+ fi
+ exit 0
+ fi
+ SCRIPT
+ chmod u+x /tmp/signer
+
+ exec oc observe csr --maximum-errors=1 --resync-period=10m -a '{.status.conditions[*].type}' -a '{.status.certificate}' -a '{.spec.username}' -- /tmp/signer
diff --git a/roles/openshift_bootstrap_autoapprover/tasks/main.yml b/roles/openshift_bootstrap_autoapprover/tasks/main.yml
new file mode 100644
index 000000000..88e9d08e7
--- /dev/null
+++ b/roles/openshift_bootstrap_autoapprover/tasks/main.yml
@@ -0,0 +1,28 @@
+---
+- name: Copy auto-approver config to host
+ run_once: true
+ copy:
+ src: "{{ item }}"
+ dest: /tmp/openshift-approver/
+ owner: root
+ mode: 0400
+ with_fileglob:
+ - "*.yaml"
+
+- name: Set auto-approver nodeSelector
+ run_once: true
+ yedit:
+ src: "/tmp/openshift-approver/openshift-bootstrap-controller.yaml"
+ key: spec.template.spec.nodeSelector
+ value: "{{ openshift_master_bootstrap_auto_approver_node_selector | default({}) }}"
+ value_type: list
+
+- name: Create auto-approver on cluster
+ run_once: true
+ command: oc apply -f /tmp/openshift-approver/
+
+- name: Remove auto-approver config
+ run_once: true
+ file:
+ path: /tmp/openshift-approver/
+ state: absent
diff --git a/roles/openshift_cloud_provider/tasks/gce.yml b/roles/openshift_cloud_provider/tasks/gce.yml
index ee4048911..395bd304c 100644
--- a/roles/openshift_cloud_provider/tasks/gce.yml
+++ b/roles/openshift_cloud_provider/tasks/gce.yml
@@ -13,5 +13,11 @@
ini_file:
dest: "{{ openshift.common.config_base }}/cloudprovider/gce.conf"
section: Global
- option: multizone
- value: "true"
+ option: "{{ item.key }}"
+ value: "{{ item.value }}"
+ with_items:
+ - { key: 'project-id', value: '{{ openshift_gcp_project }}' }
+ - { key: 'network-name', value: '{{ openshift_gcp_network_name }}' }
+ - { key: 'node-tags', value: '{{ openshift_gcp_prefix }}ocp' }
+ - { key: 'node-instance-prefix', value: '{{ openshift_gcp_prefix }}' }
+ - { key: 'multizone', value: 'false' }
diff --git a/roles/openshift_examples/files/examples/v3.7/cfme-templates/cfme-backup-job.yaml b/roles/openshift_examples/files/examples/v3.7/cfme-templates/cfme-backup-job.yaml
new file mode 100644
index 000000000..48d1d4e26
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v3.7/cfme-templates/cfme-backup-job.yaml
@@ -0,0 +1,28 @@
+apiVersion: batch/v1
+kind: Job
+metadata:
+ name: cloudforms-backup
+spec:
+ template:
+ metadata:
+ name: cloudforms-backup
+ spec:
+ containers:
+ - name: postgresql
+ image: registry.access.redhat.com/cloudforms46-beta/cfme-openshift-postgresql:latest
+ command:
+ - "/opt/rh/cfme-container-scripts/backup_db"
+ env:
+ - name: DATABASE_URL
+ valueFrom:
+ secretKeyRef:
+ name: cloudforms-secrets
+ key: database-url
+ volumeMounts:
+ - name: cfme-backup-vol
+ mountPath: "/backups"
+ volumes:
+ - name: cfme-backup-vol
+ persistentVolumeClaim:
+ claimName: cloudforms-backup
+ restartPolicy: Never
diff --git a/roles/openshift_examples/files/examples/v3.7/cfme-templates/cfme-backup-pvc.yaml b/roles/openshift_examples/files/examples/v3.7/cfme-templates/cfme-backup-pvc.yaml
new file mode 100644
index 000000000..92598ce82
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v3.7/cfme-templates/cfme-backup-pvc.yaml
@@ -0,0 +1,10 @@
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+ name: cloudforms-backup
+spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 15Gi
diff --git a/roles/openshift_examples/files/examples/v3.7/cfme-templates/cfme-pv-backup-example.yaml b/roles/openshift_examples/files/examples/v3.7/cfme-templates/cfme-pv-backup-example.yaml
new file mode 100644
index 000000000..4fe349897
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v3.7/cfme-templates/cfme-pv-backup-example.yaml
@@ -0,0 +1,13 @@
+apiVersion: v1
+kind: PersistentVolume
+metadata:
+ name: cfme-pv03
+spec:
+ capacity:
+ storage: 15Gi
+ accessModes:
+ - ReadWriteOnce
+ nfs:
+ path: "/exports/cfme-pv03"
+ server: "<your-nfs-host-here>"
+ persistentVolumeReclaimPolicy: Retain
diff --git a/roles/openshift_examples/files/examples/v3.7/cfme-templates/cfme-pv-db-example.yaml b/roles/openshift_examples/files/examples/v3.7/cfme-templates/cfme-pv-db-example.yaml
index 250a99b8d..0cdd821b5 100644
--- a/roles/openshift_examples/files/examples/v3.7/cfme-templates/cfme-pv-db-example.yaml
+++ b/roles/openshift_examples/files/examples/v3.7/cfme-templates/cfme-pv-db-example.yaml
@@ -1,13 +1,38 @@
apiVersion: v1
-kind: PersistentVolume
+kind: Template
+labels:
+ template: cloudforms-db-pv
metadata:
- name: cfme-pv01
-spec:
- capacity:
- storage: 15Gi
- accessModes:
+ name: cloudforms-db-pv
+ annotations:
+ description: PV Template for CFME PostgreSQL DB
+ tags: PVS, CFME
+objects:
+- apiVersion: v1
+ kind: PersistentVolume
+ metadata:
+ name: cfme-db
+ spec:
+ capacity:
+ storage: "${PV_SIZE}"
+ accessModes:
- ReadWriteOnce
- nfs:
- path: /exports/cfme-pv01
- server: <your-nfs-host-here>
- persistentVolumeReclaimPolicy: Retain
+ nfs:
+ path: "${BASE_PATH}/cfme-db"
+ server: "${NFS_HOST}"
+ persistentVolumeReclaimPolicy: Retain
+parameters:
+- name: PV_SIZE
+ displayName: PV Size for DB
+ required: true
+ description: The size of the CFME DB PV given in Gi
+ value: 15Gi
+- name: BASE_PATH
+ displayName: Exports Directory Base Path
+ required: true
+ description: The parent directory of your NFS exports
+ value: "/exports"
+- name: NFS_HOST
+ displayName: NFS Server Hostname
+ required: true
+ description: The hostname or IP address of the NFS server
diff --git a/roles/openshift_examples/files/examples/v3.7/cfme-templates/cfme-pv-region-example.yaml b/roles/openshift_examples/files/examples/v3.7/cfme-templates/cfme-pv-region-example.yaml
deleted file mode 100644
index cba9bbe35..000000000
--- a/roles/openshift_examples/files/examples/v3.7/cfme-templates/cfme-pv-region-example.yaml
+++ /dev/null
@@ -1,13 +0,0 @@
-apiVersion: v1
-kind: PersistentVolume
-metadata:
- name: cfme-pv02
-spec:
- capacity:
- storage: 5Gi
- accessModes:
- - ReadWriteOnce
- nfs:
- path: /exports/cfme-pv02
- server: <your-nfs-host-here>
- persistentVolumeReclaimPolicy: Retain
diff --git a/roles/openshift_examples/files/examples/v3.7/cfme-templates/cfme-pv-server-example.yaml b/roles/openshift_examples/files/examples/v3.7/cfme-templates/cfme-pv-server-example.yaml
index c08c21265..527090ae8 100644
--- a/roles/openshift_examples/files/examples/v3.7/cfme-templates/cfme-pv-server-example.yaml
+++ b/roles/openshift_examples/files/examples/v3.7/cfme-templates/cfme-pv-server-example.yaml
@@ -1,13 +1,38 @@
apiVersion: v1
-kind: PersistentVolume
+kind: Template
+labels:
+ template: cloudforms-app-pv
metadata:
- name: cfme-pv03
-spec:
- capacity:
- storage: 5Gi
- accessModes:
+ name: cloudforms-app-pv
+ annotations:
+ description: PV Template for CFME Server
+ tags: PVS, CFME
+objects:
+- apiVersion: v1
+ kind: PersistentVolume
+ metadata:
+ name: cfme-app
+ spec:
+ capacity:
+ storage: "${PV_SIZE}"
+ accessModes:
- ReadWriteOnce
- nfs:
- path: /exports/cfme-pv03
- server: <your-nfs-host-here>
- persistentVolumeReclaimPolicy: Retain
+ nfs:
+ path: "${BASE_PATH}/cfme-app"
+ server: "${NFS_HOST}"
+ persistentVolumeReclaimPolicy: Retain
+parameters:
+- name: PV_SIZE
+ displayName: PV Size for App
+ required: true
+ description: The size of the CFME APP PV given in Gi
+ value: 5Gi
+- name: BASE_PATH
+ displayName: Exports Directory Base Path
+ required: true
+ description: The parent directory of your NFS exports
+ value: "/exports"
+- name: NFS_HOST
+ displayName: NFS Server Hostname
+ required: true
+ description: The hostname or IP address of the NFS server
diff --git a/roles/openshift_examples/files/examples/v3.7/cfme-templates/cfme-restore-job.yaml b/roles/openshift_examples/files/examples/v3.7/cfme-templates/cfme-restore-job.yaml
new file mode 100644
index 000000000..7fd4fc2e1
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v3.7/cfme-templates/cfme-restore-job.yaml
@@ -0,0 +1,35 @@
+apiVersion: batch/v1
+kind: Job
+metadata:
+ name: cloudforms-restore
+spec:
+ template:
+ metadata:
+ name: cloudforms-restore
+ spec:
+ containers:
+ - name: postgresql
+ image: registry.access.redhat.com/cloudforms46-beta/cfme-openshift-postgresql:latest
+ command:
+ - "/opt/rh/cfme-container-scripts/restore_db"
+ env:
+ - name: DATABASE_URL
+ valueFrom:
+ secretKeyRef:
+ name: cloudforms-secrets
+ key: database-url
+ - name: BACKUP_VERSION
+ value: latest
+ volumeMounts:
+ - name: cfme-backup-vol
+ mountPath: "/backups"
+ - name: cfme-prod-vol
+ mountPath: "/restore"
+ volumes:
+ - name: cfme-backup-vol
+ persistentVolumeClaim:
+ claimName: cloudforms-backup
+ - name: cfme-prod-vol
+ persistentVolumeClaim:
+ claimName: cloudforms-postgresql
+ restartPolicy: Never
diff --git a/roles/openshift_examples/files/examples/v3.7/cfme-templates/cfme-scc-sysadmin.yaml b/roles/openshift_examples/files/examples/v3.7/cfme-templates/cfme-scc-sysadmin.yaml
new file mode 100644
index 000000000..d2ece9298
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v3.7/cfme-templates/cfme-scc-sysadmin.yaml
@@ -0,0 +1,38 @@
+allowHostDirVolumePlugin: false
+allowHostIPC: false
+allowHostNetwork: false
+allowHostPID: false
+allowHostPorts: false
+allowPrivilegedContainer: false
+allowedCapabilities:
+apiVersion: v1
+defaultAddCapabilities:
+- SYS_ADMIN
+fsGroup:
+ type: RunAsAny
+groups:
+- system:cluster-admins
+kind: SecurityContextConstraints
+metadata:
+ annotations:
+ kubernetes.io/description: cfme-sysadmin provides all features of the anyuid SCC but allows users to have SYS_ADMIN capabilities. This is the required scc for Pods requiring to run with systemd and the message bus.
+ creationTimestamp:
+ name: cfme-sysadmin
+priority: 10
+readOnlyRootFilesystem: false
+requiredDropCapabilities:
+- MKNOD
+- SYS_CHROOT
+runAsUser:
+ type: RunAsAny
+seLinuxContext:
+ type: MustRunAs
+supplementalGroups:
+ type: RunAsAny
+users:
+volumes:
+- configMap
+- downwardAPI
+- emptyDir
+- persistentVolumeClaim
+- secret
diff --git a/roles/openshift_examples/files/examples/v3.7/cfme-templates/cfme-template-ext-db.yaml b/roles/openshift_examples/files/examples/v3.7/cfme-templates/cfme-template-ext-db.yaml
new file mode 100644
index 000000000..9866c29c3
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v3.7/cfme-templates/cfme-template-ext-db.yaml
@@ -0,0 +1,956 @@
+apiVersion: v1
+kind: Template
+labels:
+ template: cloudforms-ext-db
+metadata:
+ name: cloudforms-ext-db
+ annotations:
+ description: CloudForms appliance with persistent storage using a external DB host
+ tags: instant-app,cloudforms,cfme
+ iconClass: icon-rails
+objects:
+- apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: cfme-orchestrator
+- apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: cfme-anyuid
+- apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: cfme-privileged
+- apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: cfme-httpd
+- apiVersion: v1
+ kind: Secret
+ metadata:
+ name: "${NAME}-secrets"
+ stringData:
+ pg-password: "${DATABASE_PASSWORD}"
+ admin-password: "${APPLICATION_ADMIN_PASSWORD}"
+ database-url: postgresql://${DATABASE_USER}:${DATABASE_PASSWORD}@${DATABASE_SERVICE_NAME}/${DATABASE_NAME}?encoding=utf8&pool=5&wait_timeout=5
+ v2-key: "${V2_KEY}"
+- apiVersion: v1
+ kind: Secret
+ metadata:
+ name: "${ANSIBLE_SERVICE_NAME}-secrets"
+ stringData:
+ rabbit-password: "${ANSIBLE_RABBITMQ_PASSWORD}"
+ secret-key: "${ANSIBLE_SECRET_KEY}"
+ admin-password: "${ANSIBLE_ADMIN_PASSWORD}"
+- apiVersion: v1
+ kind: Service
+ metadata:
+ annotations:
+ description: Exposes and load balances CloudForms pods
+ service.alpha.openshift.io/dependencies: '[{"name":"${DATABASE_SERVICE_NAME}","namespace":"","kind":"Service"},{"name":"${MEMCACHED_SERVICE_NAME}","namespace":"","kind":"Service"}]'
+ name: "${NAME}"
+ spec:
+ clusterIP: None
+ ports:
+ - name: http
+ port: 80
+ protocol: TCP
+ targetPort: 80
+ selector:
+ name: "${NAME}"
+- apiVersion: v1
+ kind: Route
+ metadata:
+ name: "${HTTPD_SERVICE_NAME}"
+ spec:
+ host: "${APPLICATION_DOMAIN}"
+ port:
+ targetPort: http
+ tls:
+ termination: edge
+ insecureEdgeTerminationPolicy: Redirect
+ to:
+ kind: Service
+ name: "${HTTPD_SERVICE_NAME}"
+- apiVersion: apps/v1beta1
+ kind: StatefulSet
+ metadata:
+ name: "${NAME}"
+ annotations:
+ description: Defines how to deploy the CloudForms appliance
+ spec:
+ serviceName: "${NAME}"
+ replicas: "${APPLICATION_REPLICA_COUNT}"
+ template:
+ metadata:
+ labels:
+ name: "${NAME}"
+ name: "${NAME}"
+ spec:
+ containers:
+ - name: cloudforms
+ image: "${FRONTEND_APPLICATION_IMG_NAME}:${FRONTEND_APPLICATION_IMG_TAG}"
+ livenessProbe:
+ exec:
+ command:
+ - pidof
+ - MIQ Server
+ initialDelaySeconds: 480
+ timeoutSeconds: 3
+ readinessProbe:
+ tcpSocket:
+ port: 80
+ initialDelaySeconds: 200
+ timeoutSeconds: 3
+ ports:
+ - containerPort: 80
+ protocol: TCP
+ volumeMounts:
+ - name: "${NAME}-server"
+ mountPath: "/persistent"
+ env:
+ - name: MY_POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: APPLICATION_INIT_DELAY
+ value: "${APPLICATION_INIT_DELAY}"
+ - name: DATABASE_REGION
+ value: "${DATABASE_REGION}"
+ - name: DATABASE_URL
+ valueFrom:
+ secretKeyRef:
+ name: "${NAME}-secrets"
+ key: database-url
+ - name: V2_KEY
+ valueFrom:
+ secretKeyRef:
+ name: "${NAME}-secrets"
+ key: v2-key
+ - name: APPLICATION_ADMIN_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: "${NAME}-secrets"
+ key: admin-password
+ - name: ANSIBLE_ADMIN_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: "${ANSIBLE_SERVICE_NAME}-secrets"
+ key: admin-password
+ resources:
+ requests:
+ memory: "${APPLICATION_MEM_REQ}"
+ cpu: "${APPLICATION_CPU_REQ}"
+ limits:
+ memory: "${APPLICATION_MEM_LIMIT}"
+ lifecycle:
+ preStop:
+ exec:
+ command:
+ - "/opt/rh/cfme-container-scripts/sync-pv-data"
+ serviceAccount: cfme-orchestrator
+ serviceAccountName: cfme-orchestrator
+ terminationGracePeriodSeconds: 90
+ volumeClaimTemplates:
+ - metadata:
+ name: "${NAME}-server"
+ annotations:
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: "${APPLICATION_VOLUME_CAPACITY}"
+- apiVersion: v1
+ kind: Service
+ metadata:
+ annotations:
+ description: Headless service for CloudForms backend pods
+ name: "${NAME}-backend"
+ spec:
+ clusterIP: None
+ selector:
+ name: "${NAME}-backend"
+- apiVersion: apps/v1beta1
+ kind: StatefulSet
+ metadata:
+ name: "${NAME}-backend"
+ annotations:
+ description: Defines how to deploy the CloudForms appliance
+ spec:
+ serviceName: "${NAME}-backend"
+ replicas: 0
+ template:
+ metadata:
+ labels:
+ name: "${NAME}-backend"
+ name: "${NAME}-backend"
+ spec:
+ containers:
+ - name: cloudforms
+ image: "${BACKEND_APPLICATION_IMG_NAME}:${BACKEND_APPLICATION_IMG_TAG}"
+ livenessProbe:
+ exec:
+ command:
+ - pidof
+ - MIQ Server
+ initialDelaySeconds: 480
+ timeoutSeconds: 3
+ volumeMounts:
+ - name: "${NAME}-server"
+ mountPath: "/persistent"
+ env:
+ - name: APPLICATION_INIT_DELAY
+ value: "${APPLICATION_INIT_DELAY}"
+ - name: DATABASE_URL
+ valueFrom:
+ secretKeyRef:
+ name: "${NAME}-secrets"
+ key: database-url
+ - name: MIQ_SERVER_DEFAULT_ROLES
+ value: database_operations,event,reporting,scheduler,smartstate,ems_operations,ems_inventory,automate
+ - name: FRONTEND_SERVICE_NAME
+ value: "${NAME}"
+ - name: V2_KEY
+ valueFrom:
+ secretKeyRef:
+ name: "${NAME}-secrets"
+ key: v2-key
+ - name: ANSIBLE_ADMIN_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: "${ANSIBLE_SERVICE_NAME}-secrets"
+ key: admin-password
+ resources:
+ requests:
+ memory: "${APPLICATION_MEM_REQ}"
+ cpu: "${APPLICATION_CPU_REQ}"
+ limits:
+ memory: "${APPLICATION_MEM_LIMIT}"
+ lifecycle:
+ preStop:
+ exec:
+ command:
+ - "/opt/rh/cfme-container-scripts/sync-pv-data"
+ serviceAccount: cfme-orchestrator
+ serviceAccountName: cfme-orchestrator
+ terminationGracePeriodSeconds: 90
+ volumeClaimTemplates:
+ - metadata:
+ name: "${NAME}-server"
+ annotations:
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: "${APPLICATION_VOLUME_CAPACITY}"
+- apiVersion: v1
+ kind: Service
+ metadata:
+ name: "${MEMCACHED_SERVICE_NAME}"
+ annotations:
+ description: Exposes the memcached server
+ spec:
+ ports:
+ - name: memcached
+ port: 11211
+ targetPort: 11211
+ selector:
+ name: "${MEMCACHED_SERVICE_NAME}"
+- apiVersion: v1
+ kind: DeploymentConfig
+ metadata:
+ name: "${MEMCACHED_SERVICE_NAME}"
+ annotations:
+ description: Defines how to deploy memcached
+ spec:
+ strategy:
+ type: Recreate
+ triggers:
+ - type: ConfigChange
+ replicas: 1
+ selector:
+ name: "${MEMCACHED_SERVICE_NAME}"
+ template:
+ metadata:
+ name: "${MEMCACHED_SERVICE_NAME}"
+ labels:
+ name: "${MEMCACHED_SERVICE_NAME}"
+ spec:
+ volumes: []
+ containers:
+ - name: memcached
+ image: "${MEMCACHED_IMG_NAME}:${MEMCACHED_IMG_TAG}"
+ ports:
+ - containerPort: 11211
+ readinessProbe:
+ timeoutSeconds: 1
+ initialDelaySeconds: 5
+ tcpSocket:
+ port: 11211
+ livenessProbe:
+ timeoutSeconds: 1
+ initialDelaySeconds: 30
+ tcpSocket:
+ port: 11211
+ volumeMounts: []
+ env:
+ - name: MEMCACHED_MAX_MEMORY
+ value: "${MEMCACHED_MAX_MEMORY}"
+ - name: MEMCACHED_MAX_CONNECTIONS
+ value: "${MEMCACHED_MAX_CONNECTIONS}"
+ - name: MEMCACHED_SLAB_PAGE_SIZE
+ value: "${MEMCACHED_SLAB_PAGE_SIZE}"
+ resources:
+ requests:
+ memory: "${MEMCACHED_MEM_REQ}"
+ cpu: "${MEMCACHED_CPU_REQ}"
+ limits:
+ memory: "${MEMCACHED_MEM_LIMIT}"
+- apiVersion: v1
+ kind: Service
+ metadata:
+ name: "${DATABASE_SERVICE_NAME}"
+ annotations:
+ description: Remote database service
+ spec:
+ ports:
+ - name: postgresql
+ port: 5432
+ targetPort: "${{DATABASE_PORT}}"
+ selector: {}
+- apiVersion: v1
+ kind: Endpoints
+ metadata:
+ name: "${DATABASE_SERVICE_NAME}"
+ subsets:
+ - addresses:
+ - ip: "${DATABASE_IP}"
+ ports:
+ - port: "${{DATABASE_PORT}}"
+ name: postgresql
+- apiVersion: v1
+ kind: Service
+ metadata:
+ annotations:
+ description: Exposes and load balances Ansible pods
+ service.alpha.openshift.io/dependencies: '[{"name":"${DATABASE_SERVICE_NAME}","namespace":"","kind":"Service"}]'
+ name: "${ANSIBLE_SERVICE_NAME}"
+ spec:
+ ports:
+ - name: http
+ port: 80
+ protocol: TCP
+ targetPort: 80
+ - name: https
+ port: 443
+ protocol: TCP
+ targetPort: 443
+ selector:
+ name: "${ANSIBLE_SERVICE_NAME}"
+- apiVersion: v1
+ kind: DeploymentConfig
+ metadata:
+ name: "${ANSIBLE_SERVICE_NAME}"
+ annotations:
+ description: Defines how to deploy the Ansible appliance
+ spec:
+ strategy:
+ type: Recreate
+ serviceName: "${ANSIBLE_SERVICE_NAME}"
+ replicas: 0
+ template:
+ metadata:
+ labels:
+ name: "${ANSIBLE_SERVICE_NAME}"
+ name: "${ANSIBLE_SERVICE_NAME}"
+ spec:
+ containers:
+ - name: ansible
+ image: "${ANSIBLE_IMG_NAME}:${ANSIBLE_IMG_TAG}"
+ livenessProbe:
+ tcpSocket:
+ port: 443
+ initialDelaySeconds: 480
+ timeoutSeconds: 3
+ readinessProbe:
+ httpGet:
+ path: "/"
+ port: 443
+ scheme: HTTPS
+ initialDelaySeconds: 200
+ timeoutSeconds: 3
+ ports:
+ - containerPort: 80
+ protocol: TCP
+ - containerPort: 443
+ protocol: TCP
+ securityContext:
+ privileged: true
+ env:
+ - name: ADMIN_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: "${ANSIBLE_SERVICE_NAME}-secrets"
+ key: admin-password
+ - name: RABBITMQ_USER_NAME
+ value: "${ANSIBLE_RABBITMQ_USER_NAME}"
+ - name: RABBITMQ_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: "${ANSIBLE_SERVICE_NAME}-secrets"
+ key: rabbit-password
+ - name: ANSIBLE_SECRET_KEY
+ valueFrom:
+ secretKeyRef:
+ name: "${ANSIBLE_SERVICE_NAME}-secrets"
+ key: secret-key
+ - name: DATABASE_SERVICE_NAME
+ value: "${DATABASE_SERVICE_NAME}"
+ - name: POSTGRESQL_USER
+ value: "${DATABASE_USER}"
+ - name: POSTGRESQL_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: "${NAME}-secrets"
+ key: pg-password
+ - name: POSTGRESQL_DATABASE
+ value: "${ANSIBLE_DATABASE_NAME}"
+ resources:
+ requests:
+ memory: "${ANSIBLE_MEM_REQ}"
+ cpu: "${ANSIBLE_CPU_REQ}"
+ limits:
+ memory: "${ANSIBLE_MEM_LIMIT}"
+ serviceAccount: cfme-privileged
+ serviceAccountName: cfme-privileged
+- apiVersion: v1
+ kind: ConfigMap
+ metadata:
+ name: "${HTTPD_SERVICE_NAME}-configs"
+ data:
+ application.conf: |
+ # Timeout: The number of seconds before receives and sends time out.
+ Timeout 120
+
+ RewriteEngine On
+ Options SymLinksIfOwnerMatch
+
+ <VirtualHost *:80>
+ KeepAlive on
+ # Without ServerName mod_auth_mellon compares against http:// and not https:// from the IdP
+ ServerName https://%{REQUEST_HOST}
+
+ ProxyPreserveHost on
+
+ RewriteCond %{REQUEST_URI} ^/ws [NC]
+ RewriteCond %{HTTP:UPGRADE} ^websocket$ [NC]
+ RewriteCond %{HTTP:CONNECTION} ^Upgrade$ [NC]
+ RewriteRule .* ws://${NAME}%{REQUEST_URI} [P,QSA,L]
+
+ # For httpd, some ErrorDocuments must by served by the httpd pod
+ RewriteCond %{REQUEST_URI} !^/proxy_pages
+
+ # For SAML /saml2 is only served by mod_auth_mellon in the httpd pod
+ RewriteCond %{REQUEST_URI} !^/saml2
+ RewriteRule ^/ http://${NAME}%{REQUEST_URI} [P,QSA,L]
+ ProxyPassReverse / http://${NAME}/
+
+ # Ensures httpd stdout/stderr are seen by docker logs.
+ ErrorLog "| /usr/bin/tee /proc/1/fd/2 /var/log/httpd/error_log"
+ CustomLog "| /usr/bin/tee /proc/1/fd/1 /var/log/httpd/access_log" common
+ </VirtualHost>
+ authentication.conf: |
+ # Load appropriate authentication configuration files
+ #
+ Include "conf.d/configuration-${HTTPD_AUTH_TYPE}-auth"
+ configuration-internal-auth: |
+ # Internal authentication
+ #
+ configuration-external-auth: |
+ Include "conf.d/external-auth-load-modules-conf"
+
+ <Location /dashboard/kerberos_authenticate>
+ AuthType Kerberos
+ AuthName "Kerberos Login"
+ KrbMethodNegotiate On
+ KrbMethodK5Passwd Off
+ KrbAuthRealms ${HTTPD_AUTH_KERBEROS_REALMS}
+ Krb5KeyTab /etc/http.keytab
+ KrbServiceName Any
+ Require pam-account httpd-auth
+
+ ErrorDocument 401 /proxy_pages/invalid_sso_credentials.js
+ </Location>
+
+ Include "conf.d/external-auth-login-form-conf"
+ Include "conf.d/external-auth-application-api-conf"
+ Include "conf.d/external-auth-lookup-user-details-conf"
+ Include "conf.d/external-auth-remote-user-conf"
+ configuration-active-directory-auth: |
+ Include "conf.d/external-auth-load-modules-conf"
+
+ <Location /dashboard/kerberos_authenticate>
+ AuthType Kerberos
+ AuthName "Kerberos Login"
+ KrbMethodNegotiate On
+ KrbMethodK5Passwd Off
+ KrbAuthRealms ${HTTPD_AUTH_KERBEROS_REALMS}
+ Krb5KeyTab /etc/krb5.keytab
+ KrbServiceName Any
+ Require pam-account httpd-auth
+
+ ErrorDocument 401 /proxy_pages/invalid_sso_credentials.js
+ </Location>
+
+ Include "conf.d/external-auth-login-form-conf"
+ Include "conf.d/external-auth-application-api-conf"
+ Include "conf.d/external-auth-lookup-user-details-conf"
+ Include "conf.d/external-auth-remote-user-conf"
+ configuration-saml-auth: |
+ LoadModule auth_mellon_module modules/mod_auth_mellon.so
+
+ <Location />
+ MellonEnable "info"
+
+ MellonIdPMetadataFile "/etc/httpd/saml2/idp-metadata.xml"
+
+ MellonSPPrivateKeyFile "/etc/httpd/saml2/sp-key.key"
+ MellonSPCertFile "/etc/httpd/saml2/sp-cert.cert"
+ MellonSPMetadataFile "/etc/httpd/saml2/sp-metadata.xml"
+
+ MellonVariable "sp-cookie"
+ MellonSecureCookie On
+ MellonCookiePath "/"
+
+ MellonIdP "IDP"
+
+ MellonEndpointPath "/saml2"
+
+ MellonUser username
+ MellonMergeEnvVars On
+
+ MellonSetEnvNoPrefix "REMOTE_USER" username
+ MellonSetEnvNoPrefix "REMOTE_USER_EMAIL" email
+ MellonSetEnvNoPrefix "REMOTE_USER_FIRSTNAME" firstname
+ MellonSetEnvNoPrefix "REMOTE_USER_LASTNAME" lastname
+ MellonSetEnvNoPrefix "REMOTE_USER_FULLNAME" fullname
+ MellonSetEnvNoPrefix "REMOTE_USER_GROUPS" groups
+ </Location>
+
+ <Location /saml_login>
+ AuthType "Mellon"
+ MellonEnable "auth"
+ Require valid-user
+ </Location>
+
+ Include "conf.d/external-auth-remote-user-conf"
+ external-auth-load-modules-conf: |
+ LoadModule authnz_pam_module modules/mod_authnz_pam.so
+ LoadModule intercept_form_submit_module modules/mod_intercept_form_submit.so
+ LoadModule lookup_identity_module modules/mod_lookup_identity.so
+ LoadModule auth_kerb_module modules/mod_auth_kerb.so
+ external-auth-login-form-conf: |
+ <Location /dashboard/external_authenticate>
+ InterceptFormPAMService httpd-auth
+ InterceptFormLogin user_name
+ InterceptFormPassword user_password
+ InterceptFormLoginSkip admin
+ InterceptFormClearRemoteUserForSkipped on
+ </Location>
+ external-auth-application-api-conf: |
+ <LocationMatch ^/api>
+ SetEnvIf Authorization '^Basic +YWRtaW46' let_admin_in
+ SetEnvIf X-Auth-Token '^.+$' let_api_token_in
+ SetEnvIf X-MIQ-Token '^.+$' let_sys_token_in
+
+ AuthType Basic
+ AuthName "External Authentication (httpd) for API"
+ AuthBasicProvider PAM
+
+ AuthPAMService httpd-auth
+ Require valid-user
+ Order Allow,Deny
+ Allow from env=let_admin_in
+ Allow from env=let_api_token_in
+ Allow from env=let_sys_token_in
+ Satisfy Any
+ </LocationMatch>
+ external-auth-lookup-user-details-conf: |
+ <LocationMatch ^/dashboard/external_authenticate$|^/dashboard/kerberos_authenticate$|^/api>
+ LookupUserAttr mail REMOTE_USER_EMAIL
+ LookupUserAttr givenname REMOTE_USER_FIRSTNAME
+ LookupUserAttr sn REMOTE_USER_LASTNAME
+ LookupUserAttr displayname REMOTE_USER_FULLNAME
+ LookupUserAttr domainname REMOTE_USER_DOMAIN
+
+ LookupUserGroups REMOTE_USER_GROUPS ":"
+ LookupDbusTimeout 5000
+ </LocationMatch>
+ external-auth-remote-user-conf: |
+ RequestHeader unset X_REMOTE_USER
+
+ RequestHeader set X_REMOTE_USER %{REMOTE_USER}e env=REMOTE_USER
+ RequestHeader set X_EXTERNAL_AUTH_ERROR %{EXTERNAL_AUTH_ERROR}e env=EXTERNAL_AUTH_ERROR
+ RequestHeader set X_REMOTE_USER_EMAIL %{REMOTE_USER_EMAIL}e env=REMOTE_USER_EMAIL
+ RequestHeader set X_REMOTE_USER_FIRSTNAME %{REMOTE_USER_FIRSTNAME}e env=REMOTE_USER_FIRSTNAME
+ RequestHeader set X_REMOTE_USER_LASTNAME %{REMOTE_USER_LASTNAME}e env=REMOTE_USER_LASTNAME
+ RequestHeader set X_REMOTE_USER_FULLNAME %{REMOTE_USER_FULLNAME}e env=REMOTE_USER_FULLNAME
+ RequestHeader set X_REMOTE_USER_GROUPS %{REMOTE_USER_GROUPS}e env=REMOTE_USER_GROUPS
+ RequestHeader set X_REMOTE_USER_DOMAIN %{REMOTE_USER_DOMAIN}e env=REMOTE_USER_DOMAIN
+- apiVersion: v1
+ kind: ConfigMap
+ metadata:
+ name: "${HTTPD_SERVICE_NAME}-auth-configs"
+ data:
+ auth-type: internal
+ auth-kerberos-realms: undefined
+ auth-configuration.conf: |
+ # External Authentication Configuration File
+ #
+ # For details on usage please see https://github.com/ManageIQ/manageiq-pods/blob/master/README.md#configuring-external-authentication
+- apiVersion: v1
+ kind: Service
+ metadata:
+ name: "${HTTPD_SERVICE_NAME}"
+ annotations:
+ description: Exposes the httpd server
+ service.alpha.openshift.io/dependencies: '[{"name":"${NAME}","namespace":"","kind":"Service"}]'
+ spec:
+ ports:
+ - name: http
+ port: 80
+ targetPort: 80
+ selector:
+ name: httpd
+- apiVersion: v1
+ kind: Service
+ metadata:
+ name: "${HTTPD_DBUS_API_SERVICE_NAME}"
+ annotations:
+ description: Exposes the httpd server dbus api
+ service.alpha.openshift.io/dependencies: '[{"name":"${NAME}","namespace":"","kind":"Service"}]'
+ spec:
+ ports:
+ - name: http-dbus-api
+ port: 8080
+ targetPort: 8080
+ selector:
+ name: httpd
+- apiVersion: v1
+ kind: DeploymentConfig
+ metadata:
+ name: "${HTTPD_SERVICE_NAME}"
+ annotations:
+ description: Defines how to deploy httpd
+ spec:
+ strategy:
+ type: Recreate
+ recreateParams:
+ timeoutSeconds: 1200
+ triggers:
+ - type: ConfigChange
+ replicas: 1
+ selector:
+ name: "${HTTPD_SERVICE_NAME}"
+ template:
+ metadata:
+ name: "${HTTPD_SERVICE_NAME}"
+ labels:
+ name: "${HTTPD_SERVICE_NAME}"
+ spec:
+ volumes:
+ - name: httpd-config
+ configMap:
+ name: "${HTTPD_SERVICE_NAME}-configs"
+ - name: httpd-auth-config
+ configMap:
+ name: "${HTTPD_SERVICE_NAME}-auth-configs"
+ containers:
+ - name: httpd
+ image: "${HTTPD_IMG_NAME}:${HTTPD_IMG_TAG}"
+ ports:
+ - containerPort: 80
+ protocol: TCP
+ - containerPort: 8080
+ protocol: TCP
+ livenessProbe:
+ exec:
+ command:
+ - pidof
+ - httpd
+ initialDelaySeconds: 15
+ timeoutSeconds: 3
+ readinessProbe:
+ tcpSocket:
+ port: 80
+ initialDelaySeconds: 10
+ timeoutSeconds: 3
+ volumeMounts:
+ - name: httpd-config
+ mountPath: "${HTTPD_CONFIG_DIR}"
+ - name: httpd-auth-config
+ mountPath: "${HTTPD_AUTH_CONFIG_DIR}"
+ resources:
+ requests:
+ memory: "${HTTPD_MEM_REQ}"
+ cpu: "${HTTPD_CPU_REQ}"
+ limits:
+ memory: "${HTTPD_MEM_LIMIT}"
+ env:
+ - name: HTTPD_AUTH_TYPE
+ valueFrom:
+ configMapKeyRef:
+ name: "${HTTPD_SERVICE_NAME}-auth-configs"
+ key: auth-type
+ - name: HTTPD_AUTH_KERBEROS_REALMS
+ valueFrom:
+ configMapKeyRef:
+ name: "${HTTPD_SERVICE_NAME}-auth-configs"
+ key: auth-kerberos-realms
+ lifecycle:
+ postStart:
+ exec:
+ command:
+ - "/usr/bin/save-container-environment"
+ serviceAccount: cfme-httpd
+ serviceAccountName: cfme-httpd
+parameters:
+- name: NAME
+ displayName: Name
+ required: true
+ description: The name assigned to all of the frontend objects defined in this template.
+ value: cloudforms
+- name: V2_KEY
+ displayName: CloudForms Encryption Key
+ required: true
+ description: Encryption Key for CloudForms Passwords
+ from: "[a-zA-Z0-9]{43}"
+ generate: expression
+- name: DATABASE_SERVICE_NAME
+ displayName: PostgreSQL Service Name
+ required: true
+ description: The name of the OpenShift Service exposed for the PostgreSQL container.
+ value: postgresql
+- name: DATABASE_USER
+ displayName: PostgreSQL User
+ required: true
+ description: PostgreSQL user that will access the database.
+ value: root
+- name: DATABASE_PASSWORD
+ displayName: PostgreSQL Password
+ required: true
+ description: Password for the PostgreSQL user.
+ from: "[a-zA-Z0-9]{8}"
+ generate: expression
+- name: DATABASE_IP
+ displayName: PostgreSQL Server IP
+ required: true
+ description: PostgreSQL external server IP used to configure service.
+ value: ''
+- name: DATABASE_PORT
+ displayName: PostgreSQL Server Port
+ required: true
+ description: PostgreSQL external server port used to configure service.
+ value: '5432'
+- name: DATABASE_NAME
+ required: true
+ displayName: PostgreSQL Database Name
+ description: Name of the PostgreSQL database accessed.
+ value: vmdb_production
+- name: DATABASE_REGION
+ required: true
+ displayName: Application Database Region
+ description: Database region that will be used for application.
+ value: '0'
+- name: APPLICATION_ADMIN_PASSWORD
+ displayName: Application Admin Password
+ required: true
+ description: Admin password that will be set on the application.
+ value: smartvm
+- name: ANSIBLE_DATABASE_NAME
+ displayName: Ansible PostgreSQL database name
+ required: true
+ description: The database to be used by the Ansible continer
+ value: awx
+- name: MEMCACHED_SERVICE_NAME
+ required: true
+ displayName: Memcached Service Name
+ description: The name of the OpenShift Service exposed for the Memcached container.
+ value: memcached
+- name: MEMCACHED_MAX_MEMORY
+ displayName: Memcached Max Memory
+ description: Memcached maximum memory for memcached object storage in MB.
+ value: '64'
+- name: MEMCACHED_MAX_CONNECTIONS
+ displayName: Memcached Max Connections
+ description: Memcached maximum number of connections allowed.
+ value: '1024'
+- name: MEMCACHED_SLAB_PAGE_SIZE
+ displayName: Memcached Slab Page Size
+ description: Memcached size of each slab page.
+ value: 1m
+- name: ANSIBLE_SERVICE_NAME
+ displayName: Ansible Service Name
+ description: The name of the OpenShift Service exposed for the Ansible container.
+ value: ansible
+- name: ANSIBLE_ADMIN_PASSWORD
+ displayName: Ansible admin User password
+ required: true
+ description: The password for the Ansible container admin user
+ from: "[a-zA-Z0-9]{32}"
+ generate: expression
+- name: ANSIBLE_SECRET_KEY
+ displayName: Ansible Secret Key
+ required: true
+ description: Encryption key for the Ansible container
+ from: "[a-f0-9]{32}"
+ generate: expression
+- name: ANSIBLE_RABBITMQ_USER_NAME
+ displayName: RabbitMQ Username
+ required: true
+ description: Username for the Ansible RabbitMQ Server
+ value: ansible
+- name: ANSIBLE_RABBITMQ_PASSWORD
+ displayName: RabbitMQ Server Password
+ required: true
+ description: Password for the Ansible RabbitMQ Server
+ from: "[a-zA-Z0-9]{32}"
+ generate: expression
+- name: APPLICATION_CPU_REQ
+ displayName: Application Min CPU Requested
+ required: true
+ description: Minimum amount of CPU time the Application container will need (expressed in millicores).
+ value: 1000m
+- name: MEMCACHED_CPU_REQ
+ displayName: Memcached Min CPU Requested
+ required: true
+ description: Minimum amount of CPU time the Memcached container will need (expressed in millicores).
+ value: 200m
+- name: ANSIBLE_CPU_REQ
+ displayName: Ansible Min CPU Requested
+ required: true
+ description: Minimum amount of CPU time the Ansible container will need (expressed in millicores).
+ value: 1000m
+- name: APPLICATION_MEM_REQ
+ displayName: Application Min RAM Requested
+ required: true
+ description: Minimum amount of memory the Application container will need.
+ value: 6144Mi
+- name: MEMCACHED_MEM_REQ
+ displayName: Memcached Min RAM Requested
+ required: true
+ description: Minimum amount of memory the Memcached container will need.
+ value: 64Mi
+- name: ANSIBLE_MEM_REQ
+ displayName: Ansible Min RAM Requested
+ required: true
+ description: Minimum amount of memory the Ansible container will need.
+ value: 2048Mi
+- name: APPLICATION_MEM_LIMIT
+ displayName: Application Max RAM Limit
+ required: true
+ description: Maximum amount of memory the Application container can consume.
+ value: 16384Mi
+- name: MEMCACHED_MEM_LIMIT
+ displayName: Memcached Max RAM Limit
+ required: true
+ description: Maximum amount of memory the Memcached container can consume.
+ value: 256Mi
+- name: ANSIBLE_MEM_LIMIT
+ displayName: Ansible Max RAM Limit
+ required: true
+ description: Maximum amount of memory the Ansible container can consume.
+ value: 8096Mi
+- name: MEMCACHED_IMG_NAME
+ displayName: Memcached Image Name
+ description: This is the Memcached image name requested to deploy.
+ value: registry.access.redhat.com/cloudforms46-beta/cfme-openshift-memcached
+- name: MEMCACHED_IMG_TAG
+ displayName: Memcached Image Tag
+ description: This is the Memcached image tag/version requested to deploy.
+ value: latest
+- name: FRONTEND_APPLICATION_IMG_NAME
+ displayName: Frontend Application Image Name
+ description: This is the Frontend Application image name requested to deploy.
+ value: registry.access.redhat.com/cloudforms46-beta/cfme-openshift-app-ui
+- name: BACKEND_APPLICATION_IMG_NAME
+ displayName: Backend Application Image Name
+ description: This is the Backend Application image name requested to deploy.
+ value: registry.access.redhat.com/cloudforms46-beta/cfme-openshift-app
+- name: FRONTEND_APPLICATION_IMG_TAG
+ displayName: Front end Application Image Tag
+ description: This is the CloudForms Frontend Application image tag/version requested to deploy.
+ value: latest
+- name: BACKEND_APPLICATION_IMG_TAG
+ displayName: Back end Application Image Tag
+ description: This is the CloudForms Backend Application image tag/version requested to deploy.
+ value: latest
+- name: ANSIBLE_IMG_NAME
+ displayName: Ansible Image Name
+ description: This is the Ansible image name requested to deploy.
+ value: registry.access.redhat.com/cloudforms46-beta/cfme-openshift-embedded-ansible
+- name: ANSIBLE_IMG_TAG
+ displayName: Ansible Image Tag
+ description: This is the Ansible image tag/version requested to deploy.
+ value: latest
+- name: APPLICATION_DOMAIN
+ displayName: Application Hostname
+ description: The exposed hostname that will route to the application service, if left blank a value will be defaulted.
+ value: ''
+- name: APPLICATION_REPLICA_COUNT
+ displayName: Application Replica Count
+ description: This is the number of Application replicas requested to deploy.
+ value: '1'
+- name: APPLICATION_INIT_DELAY
+ displayName: Application Init Delay
+ required: true
+ description: Delay in seconds before we attempt to initialize the application.
+ value: '15'
+- name: APPLICATION_VOLUME_CAPACITY
+ displayName: Application Volume Capacity
+ required: true
+ description: Volume space available for application data.
+ value: 5Gi
+- name: HTTPD_SERVICE_NAME
+ required: true
+ displayName: Apache httpd Service Name
+ description: The name of the OpenShift Service exposed for the httpd container.
+ value: httpd
+- name: HTTPD_DBUS_API_SERVICE_NAME
+ required: true
+ displayName: Apache httpd DBus API Service Name
+ description: The name of httpd dbus api service.
+ value: httpd-dbus-api
+- name: HTTPD_IMG_NAME
+ displayName: Apache httpd Image Name
+ description: This is the httpd image name requested to deploy.
+ value: registry.access.redhat.com/cloudforms46-beta/cfme-openshift-httpd
+- name: HTTPD_IMG_TAG
+ displayName: Apache httpd Image Tag
+ description: This is the httpd image tag/version requested to deploy.
+ value: latest
+- name: HTTPD_CONFIG_DIR
+ displayName: Apache httpd Configuration Directory
+ description: Directory used to store the Apache configuration files.
+ value: "/etc/httpd/conf.d"
+- name: HTTPD_AUTH_CONFIG_DIR
+ displayName: External Authentication Configuration Directory
+ description: Directory used to store the external authentication configuration files.
+ value: "/etc/httpd/auth-conf.d"
+- name: HTTPD_CPU_REQ
+ displayName: Apache httpd Min CPU Requested
+ required: true
+ description: Minimum amount of CPU time the httpd container will need (expressed in millicores).
+ value: 500m
+- name: HTTPD_MEM_REQ
+ displayName: Apache httpd Min RAM Requested
+ required: true
+ description: Minimum amount of memory the httpd container will need.
+ value: 512Mi
+- name: HTTPD_MEM_LIMIT
+ displayName: Apache httpd Max RAM Limit
+ required: true
+ description: Maximum amount of memory the httpd container can consume.
+ value: 8192Mi
diff --git a/roles/openshift_examples/files/examples/v3.7/cfme-templates/cfme-template.yaml b/roles/openshift_examples/files/examples/v3.7/cfme-templates/cfme-template.yaml
index 3bc6c5813..5c757b6c2 100644
--- a/roles/openshift_examples/files/examples/v3.7/cfme-templates/cfme-template.yaml
+++ b/roles/openshift_examples/files/examples/v3.7/cfme-templates/cfme-template.yaml
@@ -5,17 +5,308 @@ labels:
metadata:
name: cloudforms
annotations:
- description: "CloudForms appliance with persistent storage"
- tags: "instant-app,cloudforms,cfme"
- iconClass: "icon-rails"
+ description: CloudForms appliance with persistent storage
+ tags: instant-app,cloudforms,cfme
+ iconClass: icon-rails
objects:
- apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: cfme-orchestrator
+- apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: cfme-anyuid
+- apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: cfme-privileged
+- apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: cfme-httpd
+- apiVersion: v1
+ kind: Secret
+ metadata:
+ name: "${NAME}-secrets"
+ stringData:
+ pg-password: "${DATABASE_PASSWORD}"
+ admin-password: "${APPLICATION_ADMIN_PASSWORD}"
+ database-url: postgresql://${DATABASE_USER}:${DATABASE_PASSWORD}@${DATABASE_SERVICE_NAME}/${DATABASE_NAME}?encoding=utf8&pool=5&wait_timeout=5
+ v2-key: "${V2_KEY}"
+- apiVersion: v1
+ kind: Secret
+ metadata:
+ name: "${ANSIBLE_SERVICE_NAME}-secrets"
+ stringData:
+ rabbit-password: "${ANSIBLE_RABBITMQ_PASSWORD}"
+ secret-key: "${ANSIBLE_SECRET_KEY}"
+ admin-password: "${ANSIBLE_ADMIN_PASSWORD}"
+- apiVersion: v1
+ kind: ConfigMap
+ metadata:
+ name: "${DATABASE_SERVICE_NAME}-configs"
+ data:
+ 01_miq_overrides.conf: |
+ #------------------------------------------------------------------------------
+ # CONNECTIONS AND AUTHENTICATION
+ #------------------------------------------------------------------------------
+
+ tcp_keepalives_count = 9
+ tcp_keepalives_idle = 3
+ tcp_keepalives_interval = 75
+
+ #------------------------------------------------------------------------------
+ # RESOURCE USAGE (except WAL)
+ #------------------------------------------------------------------------------
+
+ shared_preload_libraries = 'pglogical,repmgr_funcs'
+ max_worker_processes = 10
+
+ #------------------------------------------------------------------------------
+ # WRITE AHEAD LOG
+ #------------------------------------------------------------------------------
+
+ wal_level = 'logical'
+ wal_log_hints = on
+ wal_buffers = 16MB
+ checkpoint_completion_target = 0.9
+
+ #------------------------------------------------------------------------------
+ # REPLICATION
+ #------------------------------------------------------------------------------
+
+ max_wal_senders = 10
+ wal_sender_timeout = 0
+ max_replication_slots = 10
+ hot_standby = on
+
+ #------------------------------------------------------------------------------
+ # ERROR REPORTING AND LOGGING
+ #------------------------------------------------------------------------------
+
+ log_filename = 'postgresql.log'
+ log_rotation_age = 0
+ log_min_duration_statement = 5000
+ log_connections = on
+ log_disconnections = on
+ log_line_prefix = '%t:%r:%c:%u@%d:[%p]:'
+ log_lock_waits = on
+
+ #------------------------------------------------------------------------------
+ # AUTOVACUUM PARAMETERS
+ #------------------------------------------------------------------------------
+
+ log_autovacuum_min_duration = 0
+ autovacuum_naptime = 5min
+ autovacuum_vacuum_threshold = 500
+ autovacuum_analyze_threshold = 500
+ autovacuum_vacuum_scale_factor = 0.05
+
+ #------------------------------------------------------------------------------
+ # LOCK MANAGEMENT
+ #------------------------------------------------------------------------------
+
+ deadlock_timeout = 5s
+
+ #------------------------------------------------------------------------------
+ # VERSION/PLATFORM COMPATIBILITY
+ #------------------------------------------------------------------------------
+
+ escape_string_warning = off
+ standard_conforming_strings = off
+- apiVersion: v1
+ kind: ConfigMap
+ metadata:
+ name: "${HTTPD_SERVICE_NAME}-configs"
+ data:
+ application.conf: |
+ # Timeout: The number of seconds before receives and sends time out.
+ Timeout 120
+
+ RewriteEngine On
+ Options SymLinksIfOwnerMatch
+
+ <VirtualHost *:80>
+ KeepAlive on
+ # Without ServerName mod_auth_mellon compares against http:// and not https:// from the IdP
+ ServerName https://%{REQUEST_HOST}
+
+ ProxyPreserveHost on
+
+ RewriteCond %{REQUEST_URI} ^/ws [NC]
+ RewriteCond %{HTTP:UPGRADE} ^websocket$ [NC]
+ RewriteCond %{HTTP:CONNECTION} ^Upgrade$ [NC]
+ RewriteRule .* ws://${NAME}%{REQUEST_URI} [P,QSA,L]
+
+ # For httpd, some ErrorDocuments must by served by the httpd pod
+ RewriteCond %{REQUEST_URI} !^/proxy_pages
+
+ # For SAML /saml2 is only served by mod_auth_mellon in the httpd pod
+ RewriteCond %{REQUEST_URI} !^/saml2
+ RewriteRule ^/ http://${NAME}%{REQUEST_URI} [P,QSA,L]
+ ProxyPassReverse / http://${NAME}/
+
+ # Ensures httpd stdout/stderr are seen by docker logs.
+ ErrorLog "| /usr/bin/tee /proc/1/fd/2 /var/log/httpd/error_log"
+ CustomLog "| /usr/bin/tee /proc/1/fd/1 /var/log/httpd/access_log" common
+ </VirtualHost>
+ authentication.conf: |
+ # Load appropriate authentication configuration files
+ #
+ Include "conf.d/configuration-${HTTPD_AUTH_TYPE}-auth"
+ configuration-internal-auth: |
+ # Internal authentication
+ #
+ configuration-external-auth: |
+ Include "conf.d/external-auth-load-modules-conf"
+
+ <Location /dashboard/kerberos_authenticate>
+ AuthType Kerberos
+ AuthName "Kerberos Login"
+ KrbMethodNegotiate On
+ KrbMethodK5Passwd Off
+ KrbAuthRealms ${HTTPD_AUTH_KERBEROS_REALMS}
+ Krb5KeyTab /etc/http.keytab
+ KrbServiceName Any
+ Require pam-account httpd-auth
+
+ ErrorDocument 401 /proxy_pages/invalid_sso_credentials.js
+ </Location>
+
+ Include "conf.d/external-auth-login-form-conf"
+ Include "conf.d/external-auth-application-api-conf"
+ Include "conf.d/external-auth-lookup-user-details-conf"
+ Include "conf.d/external-auth-remote-user-conf"
+ configuration-active-directory-auth: |
+ Include "conf.d/external-auth-load-modules-conf"
+
+ <Location /dashboard/kerberos_authenticate>
+ AuthType Kerberos
+ AuthName "Kerberos Login"
+ KrbMethodNegotiate On
+ KrbMethodK5Passwd Off
+ KrbAuthRealms ${HTTPD_AUTH_KERBEROS_REALMS}
+ Krb5KeyTab /etc/krb5.keytab
+ KrbServiceName Any
+ Require pam-account httpd-auth
+
+ ErrorDocument 401 /proxy_pages/invalid_sso_credentials.js
+ </Location>
+
+ Include "conf.d/external-auth-login-form-conf"
+ Include "conf.d/external-auth-application-api-conf"
+ Include "conf.d/external-auth-lookup-user-details-conf"
+ Include "conf.d/external-auth-remote-user-conf"
+ configuration-saml-auth: |
+ LoadModule auth_mellon_module modules/mod_auth_mellon.so
+
+ <Location />
+ MellonEnable "info"
+
+ MellonIdPMetadataFile "/etc/httpd/saml2/idp-metadata.xml"
+
+ MellonSPPrivateKeyFile "/etc/httpd/saml2/sp-key.key"
+ MellonSPCertFile "/etc/httpd/saml2/sp-cert.cert"
+ MellonSPMetadataFile "/etc/httpd/saml2/sp-metadata.xml"
+
+ MellonVariable "sp-cookie"
+ MellonSecureCookie On
+ MellonCookiePath "/"
+
+ MellonIdP "IDP"
+
+ MellonEndpointPath "/saml2"
+
+ MellonUser username
+ MellonMergeEnvVars On
+
+ MellonSetEnvNoPrefix "REMOTE_USER" username
+ MellonSetEnvNoPrefix "REMOTE_USER_EMAIL" email
+ MellonSetEnvNoPrefix "REMOTE_USER_FIRSTNAME" firstname
+ MellonSetEnvNoPrefix "REMOTE_USER_LASTNAME" lastname
+ MellonSetEnvNoPrefix "REMOTE_USER_FULLNAME" fullname
+ MellonSetEnvNoPrefix "REMOTE_USER_GROUPS" groups
+ </Location>
+
+ <Location /saml_login>
+ AuthType "Mellon"
+ MellonEnable "auth"
+ Require valid-user
+ </Location>
+
+ Include "conf.d/external-auth-remote-user-conf"
+ external-auth-load-modules-conf: |
+ LoadModule authnz_pam_module modules/mod_authnz_pam.so
+ LoadModule intercept_form_submit_module modules/mod_intercept_form_submit.so
+ LoadModule lookup_identity_module modules/mod_lookup_identity.so
+ LoadModule auth_kerb_module modules/mod_auth_kerb.so
+ external-auth-login-form-conf: |
+ <Location /dashboard/external_authenticate>
+ InterceptFormPAMService httpd-auth
+ InterceptFormLogin user_name
+ InterceptFormPassword user_password
+ InterceptFormLoginSkip admin
+ InterceptFormClearRemoteUserForSkipped on
+ </Location>
+ external-auth-application-api-conf: |
+ <LocationMatch ^/api>
+ SetEnvIf Authorization '^Basic +YWRtaW46' let_admin_in
+ SetEnvIf X-Auth-Token '^.+$' let_api_token_in
+ SetEnvIf X-MIQ-Token '^.+$' let_sys_token_in
+
+ AuthType Basic
+ AuthName "External Authentication (httpd) for API"
+ AuthBasicProvider PAM
+
+ AuthPAMService httpd-auth
+ Require valid-user
+ Order Allow,Deny
+ Allow from env=let_admin_in
+ Allow from env=let_api_token_in
+ Allow from env=let_sys_token_in
+ Satisfy Any
+ </LocationMatch>
+ external-auth-lookup-user-details-conf: |
+ <LocationMatch ^/dashboard/external_authenticate$|^/dashboard/kerberos_authenticate$|^/api>
+ LookupUserAttr mail REMOTE_USER_EMAIL
+ LookupUserAttr givenname REMOTE_USER_FIRSTNAME
+ LookupUserAttr sn REMOTE_USER_LASTNAME
+ LookupUserAttr displayname REMOTE_USER_FULLNAME
+ LookupUserAttr domainname REMOTE_USER_DOMAIN
+
+ LookupUserGroups REMOTE_USER_GROUPS ":"
+ LookupDbusTimeout 5000
+ </LocationMatch>
+ external-auth-remote-user-conf: |
+ RequestHeader unset X_REMOTE_USER
+
+ RequestHeader set X_REMOTE_USER %{REMOTE_USER}e env=REMOTE_USER
+ RequestHeader set X_EXTERNAL_AUTH_ERROR %{EXTERNAL_AUTH_ERROR}e env=EXTERNAL_AUTH_ERROR
+ RequestHeader set X_REMOTE_USER_EMAIL %{REMOTE_USER_EMAIL}e env=REMOTE_USER_EMAIL
+ RequestHeader set X_REMOTE_USER_FIRSTNAME %{REMOTE_USER_FIRSTNAME}e env=REMOTE_USER_FIRSTNAME
+ RequestHeader set X_REMOTE_USER_LASTNAME %{REMOTE_USER_LASTNAME}e env=REMOTE_USER_LASTNAME
+ RequestHeader set X_REMOTE_USER_FULLNAME %{REMOTE_USER_FULLNAME}e env=REMOTE_USER_FULLNAME
+ RequestHeader set X_REMOTE_USER_GROUPS %{REMOTE_USER_GROUPS}e env=REMOTE_USER_GROUPS
+ RequestHeader set X_REMOTE_USER_DOMAIN %{REMOTE_USER_DOMAIN}e env=REMOTE_USER_DOMAIN
+- apiVersion: v1
+ kind: ConfigMap
+ metadata:
+ name: "${HTTPD_SERVICE_NAME}-auth-configs"
+ data:
+ auth-type: internal
+ auth-kerberos-realms: undefined
+ auth-configuration.conf: |
+ # External Authentication Configuration File
+ #
+ # For details on usage please see https://github.com/ManageIQ/manageiq-pods/blob/master/README.md#configuring-external-authentication
+- apiVersion: v1
kind: Service
metadata:
annotations:
- description: "Exposes and load balances CloudForms pods"
+ description: Exposes and load balances CloudForms pods
service.alpha.openshift.io/dependencies: '[{"name":"${DATABASE_SERVICE_NAME}","namespace":"","kind":"Service"},{"name":"${MEMCACHED_SERVICE_NAME}","namespace":"","kind":"Service"}]'
- name: ${NAME}
+ name: "${NAME}"
spec:
clusterIP: None
ports:
@@ -23,141 +314,97 @@ objects:
port: 80
protocol: TCP
targetPort: 80
- - name: https
- port: 443
- protocol: TCP
- targetPort: 443
selector:
- name: ${NAME}
+ name: "${NAME}"
- apiVersion: v1
kind: Route
metadata:
- name: ${NAME}
+ name: "${HTTPD_SERVICE_NAME}"
spec:
- host: ${APPLICATION_DOMAIN}
+ host: "${APPLICATION_DOMAIN}"
port:
- targetPort: https
+ targetPort: http
tls:
- termination: passthrough
+ termination: edge
+ insecureEdgeTerminationPolicy: Redirect
to:
kind: Service
- name: ${NAME}
-- apiVersion: v1
- kind: ImageStream
- metadata:
- name: cfme-openshift-app
- annotations:
- description: "Keeps track of changes in the CloudForms app image"
- spec:
- dockerImageRepository: "${APPLICATION_IMG_NAME}"
-- apiVersion: v1
- kind: ImageStream
- metadata:
- name: cfme-openshift-postgresql
- annotations:
- description: "Keeps track of changes in the CloudForms postgresql image"
- spec:
- dockerImageRepository: "${POSTGRESQL_IMG_NAME}"
-- apiVersion: v1
- kind: ImageStream
- metadata:
- name: cfme-openshift-memcached
- annotations:
- description: "Keeps track of changes in the CloudForms memcached image"
- spec:
- dockerImageRepository: "${MEMCACHED_IMG_NAME}"
+ name: "${HTTPD_SERVICE_NAME}"
- apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: "${NAME}-${DATABASE_SERVICE_NAME}"
spec:
accessModes:
- - ReadWriteOnce
+ - ReadWriteOnce
resources:
requests:
- storage: ${DATABASE_VOLUME_CAPACITY}
-- apiVersion: v1
- kind: PersistentVolumeClaim
- metadata:
- name: "${NAME}-region"
- spec:
- accessModes:
- - ReadWriteOnce
- resources:
- requests:
- storage: ${APPLICATION_REGION_VOLUME_CAPACITY}
+ storage: "${DATABASE_VOLUME_CAPACITY}"
- apiVersion: apps/v1beta1
- kind: "StatefulSet"
+ kind: StatefulSet
metadata:
- name: ${NAME}
+ name: "${NAME}"
annotations:
- description: "Defines how to deploy the CloudForms appliance"
+ description: Defines how to deploy the CloudForms appliance
spec:
serviceName: "${NAME}"
- replicas: 1
+ replicas: "${APPLICATION_REPLICA_COUNT}"
template:
metadata:
labels:
- name: ${NAME}
- name: ${NAME}
+ name: "${NAME}"
+ name: "${NAME}"
spec:
containers:
- name: cloudforms
- image: "${APPLICATION_IMG_NAME}:${APPLICATION_IMG_TAG}"
+ image: "${FRONTEND_APPLICATION_IMG_NAME}:${FRONTEND_APPLICATION_IMG_TAG}"
livenessProbe:
- tcpSocket:
- port: 443
+ exec:
+ command:
+ - pidof
+ - MIQ Server
initialDelaySeconds: 480
timeoutSeconds: 3
readinessProbe:
- httpGet:
- path: /
- port: 443
- scheme: HTTPS
+ tcpSocket:
+ port: 80
initialDelaySeconds: 200
timeoutSeconds: 3
ports:
- containerPort: 80
protocol: TCP
- - containerPort: 443
- protocol: TCP
- securityContext:
- privileged: true
volumeMounts:
- -
- name: "${NAME}-server"
- mountPath: "/persistent"
- -
- name: "${NAME}-region"
- mountPath: "/persistent-region"
+ - name: "${NAME}-server"
+ mountPath: "/persistent"
env:
- -
- name: "APPLICATION_INIT_DELAY"
- value: "${APPLICATION_INIT_DELAY}"
- -
- name: "DATABASE_SERVICE_NAME"
- value: "${DATABASE_SERVICE_NAME}"
- -
- name: "DATABASE_REGION"
- value: "${DATABASE_REGION}"
- -
- name: "MEMCACHED_SERVICE_NAME"
- value: "${MEMCACHED_SERVICE_NAME}"
- -
- name: "POSTGRESQL_USER"
- value: "${DATABASE_USER}"
- -
- name: "POSTGRESQL_PASSWORD"
- value: "${DATABASE_PASSWORD}"
- -
- name: "POSTGRESQL_DATABASE"
- value: "${DATABASE_NAME}"
- -
- name: "POSTGRESQL_MAX_CONNECTIONS"
- value: "${POSTGRESQL_MAX_CONNECTIONS}"
- -
- name: "POSTGRESQL_SHARED_BUFFERS"
- value: "${POSTGRESQL_SHARED_BUFFERS}"
+ - name: MY_POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: APPLICATION_INIT_DELAY
+ value: "${APPLICATION_INIT_DELAY}"
+ - name: DATABASE_REGION
+ value: "${DATABASE_REGION}"
+ - name: DATABASE_URL
+ valueFrom:
+ secretKeyRef:
+ name: "${NAME}-secrets"
+ key: database-url
+ - name: V2_KEY
+ valueFrom:
+ secretKeyRef:
+ name: "${NAME}-secrets"
+ key: v2-key
+ - name: APPLICATION_ADMIN_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: "${NAME}-secrets"
+ key: admin-password
+ - name: ANSIBLE_ADMIN_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: "${ANSIBLE_SERVICE_NAME}-secrets"
+ key: admin-password
resources:
requests:
memory: "${APPLICATION_MEM_REQ}"
@@ -168,59 +415,128 @@ objects:
preStop:
exec:
command:
- - /opt/rh/cfme-container-scripts/sync-pv-data
- volumes:
- -
- name: "${NAME}-region"
- persistentVolumeClaim:
- claimName: ${NAME}-region
+ - "/opt/rh/cfme-container-scripts/sync-pv-data"
+ serviceAccount: cfme-orchestrator
+ serviceAccountName: cfme-orchestrator
+ terminationGracePeriodSeconds: 90
volumeClaimTemplates:
- - metadata:
- name: "${NAME}-server"
- annotations:
- # Uncomment this if using dynamic volume provisioning.
- # https://docs.openshift.org/latest/install_config/persistent_storage/dynamically_provisioning_pvs.html
- # volume.alpha.kubernetes.io/storage-class: anything
- spec:
- accessModes: [ ReadWriteOnce ]
+ - metadata:
+ name: "${NAME}-server"
+ annotations:
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: "${APPLICATION_VOLUME_CAPACITY}"
+- apiVersion: v1
+ kind: Service
+ metadata:
+ annotations:
+ description: Headless service for CloudForms backend pods
+ name: "${NAME}-backend"
+ spec:
+ clusterIP: None
+ selector:
+ name: "${NAME}-backend"
+- apiVersion: apps/v1beta1
+ kind: StatefulSet
+ metadata:
+ name: "${NAME}-backend"
+ annotations:
+ description: Defines how to deploy the CloudForms appliance
+ spec:
+ serviceName: "${NAME}-backend"
+ replicas: 0
+ template:
+ metadata:
+ labels:
+ name: "${NAME}-backend"
+ name: "${NAME}-backend"
+ spec:
+ containers:
+ - name: cloudforms
+ image: "${BACKEND_APPLICATION_IMG_NAME}:${BACKEND_APPLICATION_IMG_TAG}"
+ livenessProbe:
+ exec:
+ command:
+ - pidof
+ - MIQ Server
+ initialDelaySeconds: 480
+ timeoutSeconds: 3
+ volumeMounts:
+ - name: "${NAME}-server"
+ mountPath: "/persistent"
+ env:
+ - name: APPLICATION_INIT_DELAY
+ value: "${APPLICATION_INIT_DELAY}"
+ - name: DATABASE_URL
+ valueFrom:
+ secretKeyRef:
+ name: "${NAME}-secrets"
+ key: database-url
+ - name: MIQ_SERVER_DEFAULT_ROLES
+ value: database_operations,event,reporting,scheduler,smartstate,ems_operations,ems_inventory,automate
+ - name: FRONTEND_SERVICE_NAME
+ value: "${NAME}"
+ - name: V2_KEY
+ valueFrom:
+ secretKeyRef:
+ name: "${NAME}-secrets"
+ key: v2-key
+ - name: ANSIBLE_ADMIN_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: "${ANSIBLE_SERVICE_NAME}-secrets"
+ key: admin-password
resources:
requests:
- storage: "${APPLICATION_VOLUME_CAPACITY}"
+ memory: "${APPLICATION_MEM_REQ}"
+ cpu: "${APPLICATION_CPU_REQ}"
+ limits:
+ memory: "${APPLICATION_MEM_LIMIT}"
+ lifecycle:
+ preStop:
+ exec:
+ command:
+ - "/opt/rh/cfme-container-scripts/sync-pv-data"
+ serviceAccount: cfme-orchestrator
+ serviceAccountName: cfme-orchestrator
+ terminationGracePeriodSeconds: 90
+ volumeClaimTemplates:
+ - metadata:
+ name: "${NAME}-server"
+ annotations:
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: "${APPLICATION_VOLUME_CAPACITY}"
- apiVersion: v1
- kind: "Service"
+ kind: Service
metadata:
name: "${MEMCACHED_SERVICE_NAME}"
annotations:
- description: "Exposes the memcached server"
+ description: Exposes the memcached server
spec:
ports:
- -
- name: "memcached"
- port: 11211
- targetPort: 11211
+ - name: memcached
+ port: 11211
+ targetPort: 11211
selector:
name: "${MEMCACHED_SERVICE_NAME}"
- apiVersion: v1
- kind: "DeploymentConfig"
+ kind: DeploymentConfig
metadata:
name: "${MEMCACHED_SERVICE_NAME}"
annotations:
- description: "Defines how to deploy memcached"
+ description: Defines how to deploy memcached
spec:
strategy:
- type: "Recreate"
+ type: Recreate
triggers:
- -
- type: "ImageChange"
- imageChangeParams:
- automatic: true
- containerNames:
- - "memcached"
- from:
- kind: "ImageStreamTag"
- name: "cfme-openshift-memcached:${MEMCACHED_IMG_TAG}"
- -
- type: "ConfigChange"
+ - type: ConfigChange
replicas: 1
selector:
name: "${MEMCACHED_SERVICE_NAME}"
@@ -232,74 +548,58 @@ objects:
spec:
volumes: []
containers:
- -
- name: "memcached"
- image: "${MEMCACHED_IMG_NAME}:${MEMCACHED_IMG_TAG}"
- ports:
- -
- containerPort: 11211
- readinessProbe:
- timeoutSeconds: 1
- initialDelaySeconds: 5
- tcpSocket:
- port: 11211
- livenessProbe:
- timeoutSeconds: 1
- initialDelaySeconds: 30
- tcpSocket:
- port: 11211
- volumeMounts: []
- env:
- -
- name: "MEMCACHED_MAX_MEMORY"
- value: "${MEMCACHED_MAX_MEMORY}"
- -
- name: "MEMCACHED_MAX_CONNECTIONS"
- value: "${MEMCACHED_MAX_CONNECTIONS}"
- -
- name: "MEMCACHED_SLAB_PAGE_SIZE"
- value: "${MEMCACHED_SLAB_PAGE_SIZE}"
- resources:
- requests:
- memory: "${MEMCACHED_MEM_REQ}"
- cpu: "${MEMCACHED_CPU_REQ}"
- limits:
- memory: "${MEMCACHED_MEM_LIMIT}"
+ - name: memcached
+ image: "${MEMCACHED_IMG_NAME}:${MEMCACHED_IMG_TAG}"
+ ports:
+ - containerPort: 11211
+ readinessProbe:
+ timeoutSeconds: 1
+ initialDelaySeconds: 5
+ tcpSocket:
+ port: 11211
+ livenessProbe:
+ timeoutSeconds: 1
+ initialDelaySeconds: 30
+ tcpSocket:
+ port: 11211
+ volumeMounts: []
+ env:
+ - name: MEMCACHED_MAX_MEMORY
+ value: "${MEMCACHED_MAX_MEMORY}"
+ - name: MEMCACHED_MAX_CONNECTIONS
+ value: "${MEMCACHED_MAX_CONNECTIONS}"
+ - name: MEMCACHED_SLAB_PAGE_SIZE
+ value: "${MEMCACHED_SLAB_PAGE_SIZE}"
+ resources:
+ requests:
+ memory: "${MEMCACHED_MEM_REQ}"
+ cpu: "${MEMCACHED_CPU_REQ}"
+ limits:
+ memory: "${MEMCACHED_MEM_LIMIT}"
- apiVersion: v1
- kind: "Service"
+ kind: Service
metadata:
name: "${DATABASE_SERVICE_NAME}"
annotations:
- description: "Exposes the database server"
+ description: Exposes the database server
spec:
ports:
- -
- name: "postgresql"
- port: 5432
- targetPort: 5432
+ - name: postgresql
+ port: 5432
+ targetPort: 5432
selector:
name: "${DATABASE_SERVICE_NAME}"
- apiVersion: v1
- kind: "DeploymentConfig"
+ kind: DeploymentConfig
metadata:
name: "${DATABASE_SERVICE_NAME}"
annotations:
- description: "Defines how to deploy the database"
+ description: Defines how to deploy the database
spec:
strategy:
- type: "Recreate"
+ type: Recreate
triggers:
- -
- type: "ImageChange"
- imageChangeParams:
- automatic: true
- containerNames:
- - "postgresql"
- from:
- kind: "ImageStreamTag"
- name: "cfme-openshift-postgresql:${POSTGRESQL_IMG_TAG}"
- -
- type: "ConfigChange"
+ - type: ConfigChange
replicas: 1
selector:
name: "${DATABASE_SERVICE_NAME}"
@@ -310,236 +610,524 @@ objects:
name: "${DATABASE_SERVICE_NAME}"
spec:
volumes:
- -
- name: "cfme-pgdb-volume"
- persistentVolumeClaim:
- claimName: "${NAME}-${DATABASE_SERVICE_NAME}"
+ - name: cfme-pgdb-volume
+ persistentVolumeClaim:
+ claimName: "${NAME}-${DATABASE_SERVICE_NAME}"
+ - name: cfme-pg-configs
+ configMap:
+ name: "${DATABASE_SERVICE_NAME}-configs"
containers:
- -
- name: "postgresql"
- image: "${POSTGRESQL_IMG_NAME}:${POSTGRESQL_IMG_TAG}"
- ports:
- -
- containerPort: 5432
- readinessProbe:
- timeoutSeconds: 1
- initialDelaySeconds: 15
+ - name: postgresql
+ image: "${POSTGRESQL_IMG_NAME}:${POSTGRESQL_IMG_TAG}"
+ ports:
+ - containerPort: 5432
+ readinessProbe:
+ timeoutSeconds: 1
+ initialDelaySeconds: 15
+ exec:
+ command:
+ - "/bin/sh"
+ - "-i"
+ - "-c"
+ - psql -h 127.0.0.1 -U ${POSTGRESQL_USER} -q -d ${POSTGRESQL_DATABASE} -c 'SELECT 1'
+ livenessProbe:
+ timeoutSeconds: 1
+ initialDelaySeconds: 60
+ tcpSocket:
+ port: 5432
+ volumeMounts:
+ - name: cfme-pgdb-volume
+ mountPath: "/var/lib/pgsql/data"
+ - name: cfme-pg-configs
+ mountPath: "${POSTGRESQL_CONFIG_DIR}"
+ env:
+ - name: POSTGRESQL_USER
+ value: "${DATABASE_USER}"
+ - name: POSTGRESQL_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: "${NAME}-secrets"
+ key: pg-password
+ - name: POSTGRESQL_DATABASE
+ value: "${DATABASE_NAME}"
+ - name: POSTGRESQL_MAX_CONNECTIONS
+ value: "${POSTGRESQL_MAX_CONNECTIONS}"
+ - name: POSTGRESQL_SHARED_BUFFERS
+ value: "${POSTGRESQL_SHARED_BUFFERS}"
+ - name: POSTGRESQL_CONFIG_DIR
+ value: "${POSTGRESQL_CONFIG_DIR}"
+ resources:
+ requests:
+ memory: "${POSTGRESQL_MEM_REQ}"
+ cpu: "${POSTGRESQL_CPU_REQ}"
+ limits:
+ memory: "${POSTGRESQL_MEM_LIMIT}"
+- apiVersion: v1
+ kind: Service
+ metadata:
+ annotations:
+ description: Exposes and load balances Ansible pods
+ service.alpha.openshift.io/dependencies: '[{"name":"${DATABASE_SERVICE_NAME}","namespace":"","kind":"Service"}]'
+ name: "${ANSIBLE_SERVICE_NAME}"
+ spec:
+ ports:
+ - name: http
+ port: 80
+ protocol: TCP
+ targetPort: 80
+ - name: https
+ port: 443
+ protocol: TCP
+ targetPort: 443
+ selector:
+ name: "${ANSIBLE_SERVICE_NAME}"
+- apiVersion: v1
+ kind: DeploymentConfig
+ metadata:
+ name: "${ANSIBLE_SERVICE_NAME}"
+ annotations:
+ description: Defines how to deploy the Ansible appliance
+ spec:
+ strategy:
+ type: Recreate
+ serviceName: "${ANSIBLE_SERVICE_NAME}"
+ replicas: 0
+ template:
+ metadata:
+ labels:
+ name: "${ANSIBLE_SERVICE_NAME}"
+ name: "${ANSIBLE_SERVICE_NAME}"
+ spec:
+ containers:
+ - name: ansible
+ image: "${ANSIBLE_IMG_NAME}:${ANSIBLE_IMG_TAG}"
+ livenessProbe:
+ tcpSocket:
+ port: 443
+ initialDelaySeconds: 480
+ timeoutSeconds: 3
+ readinessProbe:
+ httpGet:
+ path: "/"
+ port: 443
+ scheme: HTTPS
+ initialDelaySeconds: 200
+ timeoutSeconds: 3
+ ports:
+ - containerPort: 80
+ protocol: TCP
+ - containerPort: 443
+ protocol: TCP
+ securityContext:
+ privileged: true
+ env:
+ - name: ADMIN_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: "${ANSIBLE_SERVICE_NAME}-secrets"
+ key: admin-password
+ - name: RABBITMQ_USER_NAME
+ value: "${ANSIBLE_RABBITMQ_USER_NAME}"
+ - name: RABBITMQ_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: "${ANSIBLE_SERVICE_NAME}-secrets"
+ key: rabbit-password
+ - name: ANSIBLE_SECRET_KEY
+ valueFrom:
+ secretKeyRef:
+ name: "${ANSIBLE_SERVICE_NAME}-secrets"
+ key: secret-key
+ - name: DATABASE_SERVICE_NAME
+ value: "${DATABASE_SERVICE_NAME}"
+ - name: POSTGRESQL_USER
+ value: "${DATABASE_USER}"
+ - name: POSTGRESQL_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: "${NAME}-secrets"
+ key: pg-password
+ - name: POSTGRESQL_DATABASE
+ value: "${ANSIBLE_DATABASE_NAME}"
+ resources:
+ requests:
+ memory: "${ANSIBLE_MEM_REQ}"
+ cpu: "${ANSIBLE_CPU_REQ}"
+ limits:
+ memory: "${ANSIBLE_MEM_LIMIT}"
+ serviceAccount: cfme-privileged
+ serviceAccountName: cfme-privileged
+- apiVersion: v1
+ kind: Service
+ metadata:
+ name: "${HTTPD_SERVICE_NAME}"
+ annotations:
+ description: Exposes the httpd server
+ service.alpha.openshift.io/dependencies: '[{"name":"${NAME}","namespace":"","kind":"Service"}]'
+ spec:
+ ports:
+ - name: http
+ port: 80
+ targetPort: 80
+ selector:
+ name: httpd
+- apiVersion: v1
+ kind: Service
+ metadata:
+ name: "${HTTPD_DBUS_API_SERVICE_NAME}"
+ annotations:
+ description: Exposes the httpd server dbus api
+ service.alpha.openshift.io/dependencies: '[{"name":"${NAME}","namespace":"","kind":"Service"}]'
+ spec:
+ ports:
+ - name: http-dbus-api
+ port: 8080
+ targetPort: 8080
+ selector:
+ name: httpd
+- apiVersion: v1
+ kind: DeploymentConfig
+ metadata:
+ name: "${HTTPD_SERVICE_NAME}"
+ annotations:
+ description: Defines how to deploy httpd
+ spec:
+ strategy:
+ type: Recreate
+ recreateParams:
+ timeoutSeconds: 1200
+ triggers:
+ - type: ConfigChange
+ replicas: 1
+ selector:
+ name: "${HTTPD_SERVICE_NAME}"
+ template:
+ metadata:
+ name: "${HTTPD_SERVICE_NAME}"
+ labels:
+ name: "${HTTPD_SERVICE_NAME}"
+ spec:
+ volumes:
+ - name: httpd-config
+ configMap:
+ name: "${HTTPD_SERVICE_NAME}-configs"
+ - name: httpd-auth-config
+ configMap:
+ name: "${HTTPD_SERVICE_NAME}-auth-configs"
+ containers:
+ - name: httpd
+ image: "${HTTPD_IMG_NAME}:${HTTPD_IMG_TAG}"
+ ports:
+ - containerPort: 80
+ protocol: TCP
+ - containerPort: 8080
+ protocol: TCP
+ livenessProbe:
+ exec:
+ command:
+ - pidof
+ - httpd
+ initialDelaySeconds: 15
+ timeoutSeconds: 3
+ readinessProbe:
+ tcpSocket:
+ port: 80
+ initialDelaySeconds: 10
+ timeoutSeconds: 3
+ volumeMounts:
+ - name: httpd-config
+ mountPath: "${HTTPD_CONFIG_DIR}"
+ - name: httpd-auth-config
+ mountPath: "${HTTPD_AUTH_CONFIG_DIR}"
+ resources:
+ requests:
+ memory: "${HTTPD_MEM_REQ}"
+ cpu: "${HTTPD_CPU_REQ}"
+ limits:
+ memory: "${HTTPD_MEM_LIMIT}"
+ env:
+ - name: HTTPD_AUTH_TYPE
+ valueFrom:
+ configMapKeyRef:
+ name: "${HTTPD_SERVICE_NAME}-auth-configs"
+ key: auth-type
+ - name: HTTPD_AUTH_KERBEROS_REALMS
+ valueFrom:
+ configMapKeyRef:
+ name: "${HTTPD_SERVICE_NAME}-auth-configs"
+ key: auth-kerberos-realms
+ lifecycle:
+ postStart:
exec:
command:
- - "/bin/sh"
- - "-i"
- - "-c"
- - "psql -h 127.0.0.1 -U ${POSTGRESQL_USER} -q -d ${POSTGRESQL_DATABASE} -c 'SELECT 1'"
- livenessProbe:
- timeoutSeconds: 1
- initialDelaySeconds: 60
- tcpSocket:
- port: 5432
- volumeMounts:
- -
- name: "cfme-pgdb-volume"
- mountPath: "/var/lib/pgsql/data"
- env:
- -
- name: "POSTGRESQL_USER"
- value: "${DATABASE_USER}"
- -
- name: "POSTGRESQL_PASSWORD"
- value: "${DATABASE_PASSWORD}"
- -
- name: "POSTGRESQL_DATABASE"
- value: "${DATABASE_NAME}"
- -
- name: "POSTGRESQL_MAX_CONNECTIONS"
- value: "${POSTGRESQL_MAX_CONNECTIONS}"
- -
- name: "POSTGRESQL_SHARED_BUFFERS"
- value: "${POSTGRESQL_SHARED_BUFFERS}"
- resources:
- requests:
- memory: "${POSTGRESQL_MEM_REQ}"
- cpu: "${POSTGRESQL_CPU_REQ}"
- limits:
- memory: "${POSTGRESQL_MEM_LIMIT}"
-
+ - "/usr/bin/save-container-environment"
+ serviceAccount: cfme-httpd
+ serviceAccountName: cfme-httpd
parameters:
- -
- name: "NAME"
- displayName: Name
- required: true
- description: "The name assigned to all of the frontend objects defined in this template."
- value: cloudforms
- -
- name: "DATABASE_SERVICE_NAME"
- displayName: "PostgreSQL Service Name"
- required: true
- description: "The name of the OpenShift Service exposed for the PostgreSQL container."
- value: "postgresql"
- -
- name: "DATABASE_USER"
- displayName: "PostgreSQL User"
- required: true
- description: "PostgreSQL user that will access the database."
- value: "root"
- -
- name: "DATABASE_PASSWORD"
- displayName: "PostgreSQL Password"
- required: true
- description: "Password for the PostgreSQL user."
- value: "smartvm"
- -
- name: "DATABASE_NAME"
- required: true
- displayName: "PostgreSQL Database Name"
- description: "Name of the PostgreSQL database accessed."
- value: "vmdb_production"
- -
- name: "DATABASE_REGION"
- required: true
- displayName: "Application Database Region"
- description: "Database region that will be used for application."
- value: "0"
- -
- name: "MEMCACHED_SERVICE_NAME"
- required: true
- displayName: "Memcached Service Name"
- description: "The name of the OpenShift Service exposed for the Memcached container."
- value: "memcached"
- -
- name: "MEMCACHED_MAX_MEMORY"
- displayName: "Memcached Max Memory"
- description: "Memcached maximum memory for memcached object storage in MB."
- value: "64"
- -
- name: "MEMCACHED_MAX_CONNECTIONS"
- displayName: "Memcached Max Connections"
- description: "Memcached maximum number of connections allowed."
- value: "1024"
- -
- name: "MEMCACHED_SLAB_PAGE_SIZE"
- displayName: "Memcached Slab Page Size"
- description: "Memcached size of each slab page."
- value: "1m"
- -
- name: "POSTGRESQL_MAX_CONNECTIONS"
- displayName: "PostgreSQL Max Connections"
- description: "PostgreSQL maximum number of database connections allowed."
- value: "100"
- -
- name: "POSTGRESQL_SHARED_BUFFERS"
- displayName: "PostgreSQL Shared Buffer Amount"
- description: "Amount of memory dedicated for PostgreSQL shared memory buffers."
- value: "256MB"
- -
- name: "APPLICATION_CPU_REQ"
- displayName: "Application Min CPU Requested"
- required: true
- description: "Minimum amount of CPU time the Application container will need (expressed in millicores)."
- value: "1000m"
- -
- name: "POSTGRESQL_CPU_REQ"
- displayName: "PostgreSQL Min CPU Requested"
- required: true
- description: "Minimum amount of CPU time the PostgreSQL container will need (expressed in millicores)."
- value: "500m"
- -
- name: "MEMCACHED_CPU_REQ"
- displayName: "Memcached Min CPU Requested"
- required: true
- description: "Minimum amount of CPU time the Memcached container will need (expressed in millicores)."
- value: "200m"
- -
- name: "APPLICATION_MEM_REQ"
- displayName: "Application Min RAM Requested"
- required: true
- description: "Minimum amount of memory the Application container will need."
- value: "6144Mi"
- -
- name: "POSTGRESQL_MEM_REQ"
- displayName: "PostgreSQL Min RAM Requested"
- required: true
- description: "Minimum amount of memory the PostgreSQL container will need."
- value: "1024Mi"
- -
- name: "MEMCACHED_MEM_REQ"
- displayName: "Memcached Min RAM Requested"
- required: true
- description: "Minimum amount of memory the Memcached container will need."
- value: "64Mi"
- -
- name: "APPLICATION_MEM_LIMIT"
- displayName: "Application Max RAM Limit"
- required: true
- description: "Maximum amount of memory the Application container can consume."
- value: "16384Mi"
- -
- name: "POSTGRESQL_MEM_LIMIT"
- displayName: "PostgreSQL Max RAM Limit"
- required: true
- description: "Maximum amount of memory the PostgreSQL container can consume."
- value: "8192Mi"
- -
- name: "MEMCACHED_MEM_LIMIT"
- displayName: "Memcached Max RAM Limit"
- required: true
- description: "Maximum amount of memory the Memcached container can consume."
- value: "256Mi"
- -
- name: "POSTGRESQL_IMG_NAME"
- displayName: "PostgreSQL Image Name"
- description: "This is the PostgreSQL image name requested to deploy."
- value: "registry.access.redhat.com/cloudforms45/cfme-openshift-postgresql"
- -
- name: "POSTGRESQL_IMG_TAG"
- displayName: "PostgreSQL Image Tag"
- description: "This is the PostgreSQL image tag/version requested to deploy."
- value: "latest"
- -
- name: "MEMCACHED_IMG_NAME"
- displayName: "Memcached Image Name"
- description: "This is the Memcached image name requested to deploy."
- value: "registry.access.redhat.com/cloudforms45/cfme-openshift-memcached"
- -
- name: "MEMCACHED_IMG_TAG"
- displayName: "Memcached Image Tag"
- description: "This is the Memcached image tag/version requested to deploy."
- value: "latest"
- -
- name: "APPLICATION_IMG_NAME"
- displayName: "Application Image Name"
- description: "This is the Application image name requested to deploy."
- value: "registry.access.redhat.com/cloudforms45/cfme-openshift-app"
- -
- name: "APPLICATION_IMG_TAG"
- displayName: "Application Image Tag"
- description: "This is the Application image tag/version requested to deploy."
- value: "latest"
- -
- name: "APPLICATION_DOMAIN"
- displayName: "Application Hostname"
- description: "The exposed hostname that will route to the application service, if left blank a value will be defaulted."
- value: ""
- -
- name: "APPLICATION_INIT_DELAY"
- displayName: "Application Init Delay"
- required: true
- description: "Delay in seconds before we attempt to initialize the application."
- value: "15"
- -
- name: "APPLICATION_VOLUME_CAPACITY"
- displayName: "Application Volume Capacity"
- required: true
- description: "Volume space available for application data."
- value: "5Gi"
- -
- name: "APPLICATION_REGION_VOLUME_CAPACITY"
- displayName: "Application Region Volume Capacity"
- required: true
- description: "Volume space available for region application data."
- value: "5Gi"
- -
- name: "DATABASE_VOLUME_CAPACITY"
- displayName: "Database Volume Capacity"
- required: true
- description: "Volume space available for database."
- value: "15Gi"
+- name: NAME
+ displayName: Name
+ required: true
+ description: The name assigned to all of the frontend objects defined in this template.
+ value: cloudforms
+- name: V2_KEY
+ displayName: CloudForms Encryption Key
+ required: true
+ description: Encryption Key for CloudForms Passwords
+ from: "[a-zA-Z0-9]{43}"
+ generate: expression
+- name: DATABASE_SERVICE_NAME
+ displayName: PostgreSQL Service Name
+ required: true
+ description: The name of the OpenShift Service exposed for the PostgreSQL container.
+ value: postgresql
+- name: DATABASE_USER
+ displayName: PostgreSQL User
+ required: true
+ description: PostgreSQL user that will access the database.
+ value: root
+- name: DATABASE_PASSWORD
+ displayName: PostgreSQL Password
+ required: true
+ description: Password for the PostgreSQL user.
+ from: "[a-zA-Z0-9]{8}"
+ generate: expression
+- name: DATABASE_NAME
+ required: true
+ displayName: PostgreSQL Database Name
+ description: Name of the PostgreSQL database accessed.
+ value: vmdb_production
+- name: DATABASE_REGION
+ required: true
+ displayName: Application Database Region
+ description: Database region that will be used for application.
+ value: '0'
+- name: APPLICATION_ADMIN_PASSWORD
+ displayName: Application Admin Password
+ required: true
+ description: Admin password that will be set on the application.
+ value: smartvm
+- name: ANSIBLE_DATABASE_NAME
+ displayName: Ansible PostgreSQL database name
+ required: true
+ description: The database to be used by the Ansible continer
+ value: awx
+- name: MEMCACHED_SERVICE_NAME
+ required: true
+ displayName: Memcached Service Name
+ description: The name of the OpenShift Service exposed for the Memcached container.
+ value: memcached
+- name: MEMCACHED_MAX_MEMORY
+ displayName: Memcached Max Memory
+ description: Memcached maximum memory for memcached object storage in MB.
+ value: '64'
+- name: MEMCACHED_MAX_CONNECTIONS
+ displayName: Memcached Max Connections
+ description: Memcached maximum number of connections allowed.
+ value: '1024'
+- name: MEMCACHED_SLAB_PAGE_SIZE
+ displayName: Memcached Slab Page Size
+ description: Memcached size of each slab page.
+ value: 1m
+- name: POSTGRESQL_CONFIG_DIR
+ displayName: PostgreSQL Configuration Overrides
+ description: Directory used to store PostgreSQL configuration overrides.
+ value: "/var/lib/pgsql/conf.d"
+- name: POSTGRESQL_MAX_CONNECTIONS
+ displayName: PostgreSQL Max Connections
+ description: PostgreSQL maximum number of database connections allowed.
+ value: '1000'
+- name: POSTGRESQL_SHARED_BUFFERS
+ displayName: PostgreSQL Shared Buffer Amount
+ description: Amount of memory dedicated for PostgreSQL shared memory buffers.
+ value: 1GB
+- name: ANSIBLE_SERVICE_NAME
+ displayName: Ansible Service Name
+ description: The name of the OpenShift Service exposed for the Ansible container.
+ value: ansible
+- name: ANSIBLE_ADMIN_PASSWORD
+ displayName: Ansible admin User password
+ required: true
+ description: The password for the Ansible container admin user
+ from: "[a-zA-Z0-9]{32}"
+ generate: expression
+- name: ANSIBLE_SECRET_KEY
+ displayName: Ansible Secret Key
+ required: true
+ description: Encryption key for the Ansible container
+ from: "[a-f0-9]{32}"
+ generate: expression
+- name: ANSIBLE_RABBITMQ_USER_NAME
+ displayName: RabbitMQ Username
+ required: true
+ description: Username for the Ansible RabbitMQ Server
+ value: ansible
+- name: ANSIBLE_RABBITMQ_PASSWORD
+ displayName: RabbitMQ Server Password
+ required: true
+ description: Password for the Ansible RabbitMQ Server
+ from: "[a-zA-Z0-9]{32}"
+ generate: expression
+- name: APPLICATION_CPU_REQ
+ displayName: Application Min CPU Requested
+ required: true
+ description: Minimum amount of CPU time the Application container will need (expressed in millicores).
+ value: 1000m
+- name: POSTGRESQL_CPU_REQ
+ displayName: PostgreSQL Min CPU Requested
+ required: true
+ description: Minimum amount of CPU time the PostgreSQL container will need (expressed in millicores).
+ value: 500m
+- name: MEMCACHED_CPU_REQ
+ displayName: Memcached Min CPU Requested
+ required: true
+ description: Minimum amount of CPU time the Memcached container will need (expressed in millicores).
+ value: 200m
+- name: ANSIBLE_CPU_REQ
+ displayName: Ansible Min CPU Requested
+ required: true
+ description: Minimum amount of CPU time the Ansible container will need (expressed in millicores).
+ value: 1000m
+- name: APPLICATION_MEM_REQ
+ displayName: Application Min RAM Requested
+ required: true
+ description: Minimum amount of memory the Application container will need.
+ value: 6144Mi
+- name: POSTGRESQL_MEM_REQ
+ displayName: PostgreSQL Min RAM Requested
+ required: true
+ description: Minimum amount of memory the PostgreSQL container will need.
+ value: 4Gi
+- name: MEMCACHED_MEM_REQ
+ displayName: Memcached Min RAM Requested
+ required: true
+ description: Minimum amount of memory the Memcached container will need.
+ value: 64Mi
+- name: ANSIBLE_MEM_REQ
+ displayName: Ansible Min RAM Requested
+ required: true
+ description: Minimum amount of memory the Ansible container will need.
+ value: 2048Mi
+- name: APPLICATION_MEM_LIMIT
+ displayName: Application Max RAM Limit
+ required: true
+ description: Maximum amount of memory the Application container can consume.
+ value: 16384Mi
+- name: POSTGRESQL_MEM_LIMIT
+ displayName: PostgreSQL Max RAM Limit
+ required: true
+ description: Maximum amount of memory the PostgreSQL container can consume.
+ value: 8Gi
+- name: MEMCACHED_MEM_LIMIT
+ displayName: Memcached Max RAM Limit
+ required: true
+ description: Maximum amount of memory the Memcached container can consume.
+ value: 256Mi
+- name: ANSIBLE_MEM_LIMIT
+ displayName: Ansible Max RAM Limit
+ required: true
+ description: Maximum amount of memory the Ansible container can consume.
+ value: 8096Mi
+- name: POSTGRESQL_IMG_NAME
+ displayName: PostgreSQL Image Name
+ description: This is the PostgreSQL image name requested to deploy.
+ value: registry.access.redhat.com/cloudforms46-beta/cfme-openshift-postgresql
+- name: POSTGRESQL_IMG_TAG
+ displayName: PostgreSQL Image Tag
+ description: This is the PostgreSQL image tag/version requested to deploy.
+ value: latest
+- name: MEMCACHED_IMG_NAME
+ displayName: Memcached Image Name
+ description: This is the Memcached image name requested to deploy.
+ value: registry.access.redhat.com/cloudforms46-beta/cfme-openshift-memcached
+- name: MEMCACHED_IMG_TAG
+ displayName: Memcached Image Tag
+ description: This is the Memcached image tag/version requested to deploy.
+ value: latest
+- name: FRONTEND_APPLICATION_IMG_NAME
+ displayName: Frontend Application Image Name
+ description: This is the Frontend Application image name requested to deploy.
+ value: registry.access.redhat.com/cloudforms46-beta/cfme-openshift-app-ui
+- name: BACKEND_APPLICATION_IMG_NAME
+ displayName: Backend Application Image Name
+ description: This is the Backend Application image name requested to deploy.
+ value: registry.access.redhat.com/cloudforms46-beta/cfme-openshift-app
+- name: FRONTEND_APPLICATION_IMG_TAG
+ displayName: Front end Application Image Tag
+ description: This is the CloudForms Frontend Application image tag/version requested to deploy.
+ value: latest
+- name: BACKEND_APPLICATION_IMG_TAG
+ displayName: Back end Application Image Tag
+ description: This is the CloudForms Backend Application image tag/version requested to deploy.
+ value: latest
+- name: ANSIBLE_IMG_NAME
+ displayName: Ansible Image Name
+ description: This is the Ansible image name requested to deploy.
+ value: registry.access.redhat.com/cloudforms46-beta/cfme-openshift-embedded-ansible
+- name: ANSIBLE_IMG_TAG
+ displayName: Ansible Image Tag
+ description: This is the Ansible image tag/version requested to deploy.
+ value: latest
+- name: APPLICATION_DOMAIN
+ displayName: Application Hostname
+ description: The exposed hostname that will route to the application service, if left blank a value will be defaulted.
+ value: ''
+- name: APPLICATION_REPLICA_COUNT
+ displayName: Application Replica Count
+ description: This is the number of Application replicas requested to deploy.
+ value: '1'
+- name: APPLICATION_INIT_DELAY
+ displayName: Application Init Delay
+ required: true
+ description: Delay in seconds before we attempt to initialize the application.
+ value: '15'
+- name: APPLICATION_VOLUME_CAPACITY
+ displayName: Application Volume Capacity
+ required: true
+ description: Volume space available for application data.
+ value: 5Gi
+- name: DATABASE_VOLUME_CAPACITY
+ displayName: Database Volume Capacity
+ required: true
+ description: Volume space available for database.
+ value: 15Gi
+- name: HTTPD_SERVICE_NAME
+ required: true
+ displayName: Apache httpd Service Name
+ description: The name of the OpenShift Service exposed for the httpd container.
+ value: httpd
+- name: HTTPD_DBUS_API_SERVICE_NAME
+ required: true
+ displayName: Apache httpd DBus API Service Name
+ description: The name of httpd dbus api service.
+ value: httpd-dbus-api
+- name: HTTPD_IMG_NAME
+ displayName: Apache httpd Image Name
+ description: This is the httpd image name requested to deploy.
+ value: registry.access.redhat.com/cloudforms46-beta/cfme-openshift-httpd
+- name: HTTPD_IMG_TAG
+ displayName: Apache httpd Image Tag
+ description: This is the httpd image tag/version requested to deploy.
+ value: latest
+- name: HTTPD_CONFIG_DIR
+ displayName: Apache Configuration Directory
+ description: Directory used to store the Apache configuration files.
+ value: "/etc/httpd/conf.d"
+- name: HTTPD_AUTH_CONFIG_DIR
+ displayName: External Authentication Configuration Directory
+ description: Directory used to store the external authentication configuration files.
+ value: "/etc/httpd/auth-conf.d"
+- name: HTTPD_CPU_REQ
+ displayName: Apache httpd Min CPU Requested
+ required: true
+ description: Minimum amount of CPU time the httpd container will need (expressed in millicores).
+ value: 500m
+- name: HTTPD_MEM_REQ
+ displayName: Apache httpd Min RAM Requested
+ required: true
+ description: Minimum amount of memory the httpd container will need.
+ value: 512Mi
+- name: HTTPD_MEM_LIMIT
+ displayName: Apache httpd Max RAM Limit
+ required: true
+ description: Maximum amount of memory the httpd container can consume.
+ value: 8192Mi
diff --git a/roles/openshift_examples/files/examples/v3.9/cfme-templates/cfme-backup-job.yaml b/roles/openshift_examples/files/examples/v3.9/cfme-templates/cfme-backup-job.yaml
new file mode 100644
index 000000000..48d1d4e26
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v3.9/cfme-templates/cfme-backup-job.yaml
@@ -0,0 +1,28 @@
+apiVersion: batch/v1
+kind: Job
+metadata:
+ name: cloudforms-backup
+spec:
+ template:
+ metadata:
+ name: cloudforms-backup
+ spec:
+ containers:
+ - name: postgresql
+ image: registry.access.redhat.com/cloudforms46-beta/cfme-openshift-postgresql:latest
+ command:
+ - "/opt/rh/cfme-container-scripts/backup_db"
+ env:
+ - name: DATABASE_URL
+ valueFrom:
+ secretKeyRef:
+ name: cloudforms-secrets
+ key: database-url
+ volumeMounts:
+ - name: cfme-backup-vol
+ mountPath: "/backups"
+ volumes:
+ - name: cfme-backup-vol
+ persistentVolumeClaim:
+ claimName: cloudforms-backup
+ restartPolicy: Never
diff --git a/roles/openshift_examples/files/examples/v3.9/cfme-templates/cfme-backup-pvc.yaml b/roles/openshift_examples/files/examples/v3.9/cfme-templates/cfme-backup-pvc.yaml
new file mode 100644
index 000000000..92598ce82
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v3.9/cfme-templates/cfme-backup-pvc.yaml
@@ -0,0 +1,10 @@
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+ name: cloudforms-backup
+spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 15Gi
diff --git a/roles/openshift_examples/files/examples/v3.9/cfme-templates/cfme-pv-backup-example.yaml b/roles/openshift_examples/files/examples/v3.9/cfme-templates/cfme-pv-backup-example.yaml
new file mode 100644
index 000000000..4fe349897
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v3.9/cfme-templates/cfme-pv-backup-example.yaml
@@ -0,0 +1,13 @@
+apiVersion: v1
+kind: PersistentVolume
+metadata:
+ name: cfme-pv03
+spec:
+ capacity:
+ storage: 15Gi
+ accessModes:
+ - ReadWriteOnce
+ nfs:
+ path: "/exports/cfme-pv03"
+ server: "<your-nfs-host-here>"
+ persistentVolumeReclaimPolicy: Retain
diff --git a/roles/openshift_examples/files/examples/v3.9/cfme-templates/cfme-pv-db-example.yaml b/roles/openshift_examples/files/examples/v3.9/cfme-templates/cfme-pv-db-example.yaml
index 250a99b8d..0cdd821b5 100644
--- a/roles/openshift_examples/files/examples/v3.9/cfme-templates/cfme-pv-db-example.yaml
+++ b/roles/openshift_examples/files/examples/v3.9/cfme-templates/cfme-pv-db-example.yaml
@@ -1,13 +1,38 @@
apiVersion: v1
-kind: PersistentVolume
+kind: Template
+labels:
+ template: cloudforms-db-pv
metadata:
- name: cfme-pv01
-spec:
- capacity:
- storage: 15Gi
- accessModes:
+ name: cloudforms-db-pv
+ annotations:
+ description: PV Template for CFME PostgreSQL DB
+ tags: PVS, CFME
+objects:
+- apiVersion: v1
+ kind: PersistentVolume
+ metadata:
+ name: cfme-db
+ spec:
+ capacity:
+ storage: "${PV_SIZE}"
+ accessModes:
- ReadWriteOnce
- nfs:
- path: /exports/cfme-pv01
- server: <your-nfs-host-here>
- persistentVolumeReclaimPolicy: Retain
+ nfs:
+ path: "${BASE_PATH}/cfme-db"
+ server: "${NFS_HOST}"
+ persistentVolumeReclaimPolicy: Retain
+parameters:
+- name: PV_SIZE
+ displayName: PV Size for DB
+ required: true
+ description: The size of the CFME DB PV given in Gi
+ value: 15Gi
+- name: BASE_PATH
+ displayName: Exports Directory Base Path
+ required: true
+ description: The parent directory of your NFS exports
+ value: "/exports"
+- name: NFS_HOST
+ displayName: NFS Server Hostname
+ required: true
+ description: The hostname or IP address of the NFS server
diff --git a/roles/openshift_examples/files/examples/v3.9/cfme-templates/cfme-pv-region-example.yaml b/roles/openshift_examples/files/examples/v3.9/cfme-templates/cfme-pv-region-example.yaml
deleted file mode 100644
index cba9bbe35..000000000
--- a/roles/openshift_examples/files/examples/v3.9/cfme-templates/cfme-pv-region-example.yaml
+++ /dev/null
@@ -1,13 +0,0 @@
-apiVersion: v1
-kind: PersistentVolume
-metadata:
- name: cfme-pv02
-spec:
- capacity:
- storage: 5Gi
- accessModes:
- - ReadWriteOnce
- nfs:
- path: /exports/cfme-pv02
- server: <your-nfs-host-here>
- persistentVolumeReclaimPolicy: Retain
diff --git a/roles/openshift_examples/files/examples/v3.9/cfme-templates/cfme-pv-server-example.yaml b/roles/openshift_examples/files/examples/v3.9/cfme-templates/cfme-pv-server-example.yaml
index c08c21265..527090ae8 100644
--- a/roles/openshift_examples/files/examples/v3.9/cfme-templates/cfme-pv-server-example.yaml
+++ b/roles/openshift_examples/files/examples/v3.9/cfme-templates/cfme-pv-server-example.yaml
@@ -1,13 +1,38 @@
apiVersion: v1
-kind: PersistentVolume
+kind: Template
+labels:
+ template: cloudforms-app-pv
metadata:
- name: cfme-pv03
-spec:
- capacity:
- storage: 5Gi
- accessModes:
+ name: cloudforms-app-pv
+ annotations:
+ description: PV Template for CFME Server
+ tags: PVS, CFME
+objects:
+- apiVersion: v1
+ kind: PersistentVolume
+ metadata:
+ name: cfme-app
+ spec:
+ capacity:
+ storage: "${PV_SIZE}"
+ accessModes:
- ReadWriteOnce
- nfs:
- path: /exports/cfme-pv03
- server: <your-nfs-host-here>
- persistentVolumeReclaimPolicy: Retain
+ nfs:
+ path: "${BASE_PATH}/cfme-app"
+ server: "${NFS_HOST}"
+ persistentVolumeReclaimPolicy: Retain
+parameters:
+- name: PV_SIZE
+ displayName: PV Size for App
+ required: true
+ description: The size of the CFME APP PV given in Gi
+ value: 5Gi
+- name: BASE_PATH
+ displayName: Exports Directory Base Path
+ required: true
+ description: The parent directory of your NFS exports
+ value: "/exports"
+- name: NFS_HOST
+ displayName: NFS Server Hostname
+ required: true
+ description: The hostname or IP address of the NFS server
diff --git a/roles/openshift_examples/files/examples/v3.9/cfme-templates/cfme-restore-job.yaml b/roles/openshift_examples/files/examples/v3.9/cfme-templates/cfme-restore-job.yaml
new file mode 100644
index 000000000..7fd4fc2e1
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v3.9/cfme-templates/cfme-restore-job.yaml
@@ -0,0 +1,35 @@
+apiVersion: batch/v1
+kind: Job
+metadata:
+ name: cloudforms-restore
+spec:
+ template:
+ metadata:
+ name: cloudforms-restore
+ spec:
+ containers:
+ - name: postgresql
+ image: registry.access.redhat.com/cloudforms46-beta/cfme-openshift-postgresql:latest
+ command:
+ - "/opt/rh/cfme-container-scripts/restore_db"
+ env:
+ - name: DATABASE_URL
+ valueFrom:
+ secretKeyRef:
+ name: cloudforms-secrets
+ key: database-url
+ - name: BACKUP_VERSION
+ value: latest
+ volumeMounts:
+ - name: cfme-backup-vol
+ mountPath: "/backups"
+ - name: cfme-prod-vol
+ mountPath: "/restore"
+ volumes:
+ - name: cfme-backup-vol
+ persistentVolumeClaim:
+ claimName: cloudforms-backup
+ - name: cfme-prod-vol
+ persistentVolumeClaim:
+ claimName: cloudforms-postgresql
+ restartPolicy: Never
diff --git a/roles/openshift_examples/files/examples/v3.9/cfme-templates/cfme-scc-sysadmin.yaml b/roles/openshift_examples/files/examples/v3.9/cfme-templates/cfme-scc-sysadmin.yaml
new file mode 100644
index 000000000..d2ece9298
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v3.9/cfme-templates/cfme-scc-sysadmin.yaml
@@ -0,0 +1,38 @@
+allowHostDirVolumePlugin: false
+allowHostIPC: false
+allowHostNetwork: false
+allowHostPID: false
+allowHostPorts: false
+allowPrivilegedContainer: false
+allowedCapabilities:
+apiVersion: v1
+defaultAddCapabilities:
+- SYS_ADMIN
+fsGroup:
+ type: RunAsAny
+groups:
+- system:cluster-admins
+kind: SecurityContextConstraints
+metadata:
+ annotations:
+ kubernetes.io/description: cfme-sysadmin provides all features of the anyuid SCC but allows users to have SYS_ADMIN capabilities. This is the required scc for Pods requiring to run with systemd and the message bus.
+ creationTimestamp:
+ name: cfme-sysadmin
+priority: 10
+readOnlyRootFilesystem: false
+requiredDropCapabilities:
+- MKNOD
+- SYS_CHROOT
+runAsUser:
+ type: RunAsAny
+seLinuxContext:
+ type: MustRunAs
+supplementalGroups:
+ type: RunAsAny
+users:
+volumes:
+- configMap
+- downwardAPI
+- emptyDir
+- persistentVolumeClaim
+- secret
diff --git a/roles/openshift_examples/files/examples/v3.9/cfme-templates/cfme-template-ext-db.yaml b/roles/openshift_examples/files/examples/v3.9/cfme-templates/cfme-template-ext-db.yaml
new file mode 100644
index 000000000..9866c29c3
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v3.9/cfme-templates/cfme-template-ext-db.yaml
@@ -0,0 +1,956 @@
+apiVersion: v1
+kind: Template
+labels:
+ template: cloudforms-ext-db
+metadata:
+ name: cloudforms-ext-db
+ annotations:
+ description: CloudForms appliance with persistent storage using a external DB host
+ tags: instant-app,cloudforms,cfme
+ iconClass: icon-rails
+objects:
+- apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: cfme-orchestrator
+- apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: cfme-anyuid
+- apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: cfme-privileged
+- apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: cfme-httpd
+- apiVersion: v1
+ kind: Secret
+ metadata:
+ name: "${NAME}-secrets"
+ stringData:
+ pg-password: "${DATABASE_PASSWORD}"
+ admin-password: "${APPLICATION_ADMIN_PASSWORD}"
+ database-url: postgresql://${DATABASE_USER}:${DATABASE_PASSWORD}@${DATABASE_SERVICE_NAME}/${DATABASE_NAME}?encoding=utf8&pool=5&wait_timeout=5
+ v2-key: "${V2_KEY}"
+- apiVersion: v1
+ kind: Secret
+ metadata:
+ name: "${ANSIBLE_SERVICE_NAME}-secrets"
+ stringData:
+ rabbit-password: "${ANSIBLE_RABBITMQ_PASSWORD}"
+ secret-key: "${ANSIBLE_SECRET_KEY}"
+ admin-password: "${ANSIBLE_ADMIN_PASSWORD}"
+- apiVersion: v1
+ kind: Service
+ metadata:
+ annotations:
+ description: Exposes and load balances CloudForms pods
+ service.alpha.openshift.io/dependencies: '[{"name":"${DATABASE_SERVICE_NAME}","namespace":"","kind":"Service"},{"name":"${MEMCACHED_SERVICE_NAME}","namespace":"","kind":"Service"}]'
+ name: "${NAME}"
+ spec:
+ clusterIP: None
+ ports:
+ - name: http
+ port: 80
+ protocol: TCP
+ targetPort: 80
+ selector:
+ name: "${NAME}"
+- apiVersion: v1
+ kind: Route
+ metadata:
+ name: "${HTTPD_SERVICE_NAME}"
+ spec:
+ host: "${APPLICATION_DOMAIN}"
+ port:
+ targetPort: http
+ tls:
+ termination: edge
+ insecureEdgeTerminationPolicy: Redirect
+ to:
+ kind: Service
+ name: "${HTTPD_SERVICE_NAME}"
+- apiVersion: apps/v1beta1
+ kind: StatefulSet
+ metadata:
+ name: "${NAME}"
+ annotations:
+ description: Defines how to deploy the CloudForms appliance
+ spec:
+ serviceName: "${NAME}"
+ replicas: "${APPLICATION_REPLICA_COUNT}"
+ template:
+ metadata:
+ labels:
+ name: "${NAME}"
+ name: "${NAME}"
+ spec:
+ containers:
+ - name: cloudforms
+ image: "${FRONTEND_APPLICATION_IMG_NAME}:${FRONTEND_APPLICATION_IMG_TAG}"
+ livenessProbe:
+ exec:
+ command:
+ - pidof
+ - MIQ Server
+ initialDelaySeconds: 480
+ timeoutSeconds: 3
+ readinessProbe:
+ tcpSocket:
+ port: 80
+ initialDelaySeconds: 200
+ timeoutSeconds: 3
+ ports:
+ - containerPort: 80
+ protocol: TCP
+ volumeMounts:
+ - name: "${NAME}-server"
+ mountPath: "/persistent"
+ env:
+ - name: MY_POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: APPLICATION_INIT_DELAY
+ value: "${APPLICATION_INIT_DELAY}"
+ - name: DATABASE_REGION
+ value: "${DATABASE_REGION}"
+ - name: DATABASE_URL
+ valueFrom:
+ secretKeyRef:
+ name: "${NAME}-secrets"
+ key: database-url
+ - name: V2_KEY
+ valueFrom:
+ secretKeyRef:
+ name: "${NAME}-secrets"
+ key: v2-key
+ - name: APPLICATION_ADMIN_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: "${NAME}-secrets"
+ key: admin-password
+ - name: ANSIBLE_ADMIN_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: "${ANSIBLE_SERVICE_NAME}-secrets"
+ key: admin-password
+ resources:
+ requests:
+ memory: "${APPLICATION_MEM_REQ}"
+ cpu: "${APPLICATION_CPU_REQ}"
+ limits:
+ memory: "${APPLICATION_MEM_LIMIT}"
+ lifecycle:
+ preStop:
+ exec:
+ command:
+ - "/opt/rh/cfme-container-scripts/sync-pv-data"
+ serviceAccount: cfme-orchestrator
+ serviceAccountName: cfme-orchestrator
+ terminationGracePeriodSeconds: 90
+ volumeClaimTemplates:
+ - metadata:
+ name: "${NAME}-server"
+ annotations:
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: "${APPLICATION_VOLUME_CAPACITY}"
+- apiVersion: v1
+ kind: Service
+ metadata:
+ annotations:
+ description: Headless service for CloudForms backend pods
+ name: "${NAME}-backend"
+ spec:
+ clusterIP: None
+ selector:
+ name: "${NAME}-backend"
+- apiVersion: apps/v1beta1
+ kind: StatefulSet
+ metadata:
+ name: "${NAME}-backend"
+ annotations:
+ description: Defines how to deploy the CloudForms appliance
+ spec:
+ serviceName: "${NAME}-backend"
+ replicas: 0
+ template:
+ metadata:
+ labels:
+ name: "${NAME}-backend"
+ name: "${NAME}-backend"
+ spec:
+ containers:
+ - name: cloudforms
+ image: "${BACKEND_APPLICATION_IMG_NAME}:${BACKEND_APPLICATION_IMG_TAG}"
+ livenessProbe:
+ exec:
+ command:
+ - pidof
+ - MIQ Server
+ initialDelaySeconds: 480
+ timeoutSeconds: 3
+ volumeMounts:
+ - name: "${NAME}-server"
+ mountPath: "/persistent"
+ env:
+ - name: APPLICATION_INIT_DELAY
+ value: "${APPLICATION_INIT_DELAY}"
+ - name: DATABASE_URL
+ valueFrom:
+ secretKeyRef:
+ name: "${NAME}-secrets"
+ key: database-url
+ - name: MIQ_SERVER_DEFAULT_ROLES
+ value: database_operations,event,reporting,scheduler,smartstate,ems_operations,ems_inventory,automate
+ - name: FRONTEND_SERVICE_NAME
+ value: "${NAME}"
+ - name: V2_KEY
+ valueFrom:
+ secretKeyRef:
+ name: "${NAME}-secrets"
+ key: v2-key
+ - name: ANSIBLE_ADMIN_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: "${ANSIBLE_SERVICE_NAME}-secrets"
+ key: admin-password
+ resources:
+ requests:
+ memory: "${APPLICATION_MEM_REQ}"
+ cpu: "${APPLICATION_CPU_REQ}"
+ limits:
+ memory: "${APPLICATION_MEM_LIMIT}"
+ lifecycle:
+ preStop:
+ exec:
+ command:
+ - "/opt/rh/cfme-container-scripts/sync-pv-data"
+ serviceAccount: cfme-orchestrator
+ serviceAccountName: cfme-orchestrator
+ terminationGracePeriodSeconds: 90
+ volumeClaimTemplates:
+ - metadata:
+ name: "${NAME}-server"
+ annotations:
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: "${APPLICATION_VOLUME_CAPACITY}"
+- apiVersion: v1
+ kind: Service
+ metadata:
+ name: "${MEMCACHED_SERVICE_NAME}"
+ annotations:
+ description: Exposes the memcached server
+ spec:
+ ports:
+ - name: memcached
+ port: 11211
+ targetPort: 11211
+ selector:
+ name: "${MEMCACHED_SERVICE_NAME}"
+- apiVersion: v1
+ kind: DeploymentConfig
+ metadata:
+ name: "${MEMCACHED_SERVICE_NAME}"
+ annotations:
+ description: Defines how to deploy memcached
+ spec:
+ strategy:
+ type: Recreate
+ triggers:
+ - type: ConfigChange
+ replicas: 1
+ selector:
+ name: "${MEMCACHED_SERVICE_NAME}"
+ template:
+ metadata:
+ name: "${MEMCACHED_SERVICE_NAME}"
+ labels:
+ name: "${MEMCACHED_SERVICE_NAME}"
+ spec:
+ volumes: []
+ containers:
+ - name: memcached
+ image: "${MEMCACHED_IMG_NAME}:${MEMCACHED_IMG_TAG}"
+ ports:
+ - containerPort: 11211
+ readinessProbe:
+ timeoutSeconds: 1
+ initialDelaySeconds: 5
+ tcpSocket:
+ port: 11211
+ livenessProbe:
+ timeoutSeconds: 1
+ initialDelaySeconds: 30
+ tcpSocket:
+ port: 11211
+ volumeMounts: []
+ env:
+ - name: MEMCACHED_MAX_MEMORY
+ value: "${MEMCACHED_MAX_MEMORY}"
+ - name: MEMCACHED_MAX_CONNECTIONS
+ value: "${MEMCACHED_MAX_CONNECTIONS}"
+ - name: MEMCACHED_SLAB_PAGE_SIZE
+ value: "${MEMCACHED_SLAB_PAGE_SIZE}"
+ resources:
+ requests:
+ memory: "${MEMCACHED_MEM_REQ}"
+ cpu: "${MEMCACHED_CPU_REQ}"
+ limits:
+ memory: "${MEMCACHED_MEM_LIMIT}"
+- apiVersion: v1
+ kind: Service
+ metadata:
+ name: "${DATABASE_SERVICE_NAME}"
+ annotations:
+ description: Remote database service
+ spec:
+ ports:
+ - name: postgresql
+ port: 5432
+ targetPort: "${{DATABASE_PORT}}"
+ selector: {}
+- apiVersion: v1
+ kind: Endpoints
+ metadata:
+ name: "${DATABASE_SERVICE_NAME}"
+ subsets:
+ - addresses:
+ - ip: "${DATABASE_IP}"
+ ports:
+ - port: "${{DATABASE_PORT}}"
+ name: postgresql
+- apiVersion: v1
+ kind: Service
+ metadata:
+ annotations:
+ description: Exposes and load balances Ansible pods
+ service.alpha.openshift.io/dependencies: '[{"name":"${DATABASE_SERVICE_NAME}","namespace":"","kind":"Service"}]'
+ name: "${ANSIBLE_SERVICE_NAME}"
+ spec:
+ ports:
+ - name: http
+ port: 80
+ protocol: TCP
+ targetPort: 80
+ - name: https
+ port: 443
+ protocol: TCP
+ targetPort: 443
+ selector:
+ name: "${ANSIBLE_SERVICE_NAME}"
+- apiVersion: v1
+ kind: DeploymentConfig
+ metadata:
+ name: "${ANSIBLE_SERVICE_NAME}"
+ annotations:
+ description: Defines how to deploy the Ansible appliance
+ spec:
+ strategy:
+ type: Recreate
+ serviceName: "${ANSIBLE_SERVICE_NAME}"
+ replicas: 0
+ template:
+ metadata:
+ labels:
+ name: "${ANSIBLE_SERVICE_NAME}"
+ name: "${ANSIBLE_SERVICE_NAME}"
+ spec:
+ containers:
+ - name: ansible
+ image: "${ANSIBLE_IMG_NAME}:${ANSIBLE_IMG_TAG}"
+ livenessProbe:
+ tcpSocket:
+ port: 443
+ initialDelaySeconds: 480
+ timeoutSeconds: 3
+ readinessProbe:
+ httpGet:
+ path: "/"
+ port: 443
+ scheme: HTTPS
+ initialDelaySeconds: 200
+ timeoutSeconds: 3
+ ports:
+ - containerPort: 80
+ protocol: TCP
+ - containerPort: 443
+ protocol: TCP
+ securityContext:
+ privileged: true
+ env:
+ - name: ADMIN_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: "${ANSIBLE_SERVICE_NAME}-secrets"
+ key: admin-password
+ - name: RABBITMQ_USER_NAME
+ value: "${ANSIBLE_RABBITMQ_USER_NAME}"
+ - name: RABBITMQ_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: "${ANSIBLE_SERVICE_NAME}-secrets"
+ key: rabbit-password
+ - name: ANSIBLE_SECRET_KEY
+ valueFrom:
+ secretKeyRef:
+ name: "${ANSIBLE_SERVICE_NAME}-secrets"
+ key: secret-key
+ - name: DATABASE_SERVICE_NAME
+ value: "${DATABASE_SERVICE_NAME}"
+ - name: POSTGRESQL_USER
+ value: "${DATABASE_USER}"
+ - name: POSTGRESQL_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: "${NAME}-secrets"
+ key: pg-password
+ - name: POSTGRESQL_DATABASE
+ value: "${ANSIBLE_DATABASE_NAME}"
+ resources:
+ requests:
+ memory: "${ANSIBLE_MEM_REQ}"
+ cpu: "${ANSIBLE_CPU_REQ}"
+ limits:
+ memory: "${ANSIBLE_MEM_LIMIT}"
+ serviceAccount: cfme-privileged
+ serviceAccountName: cfme-privileged
+- apiVersion: v1
+ kind: ConfigMap
+ metadata:
+ name: "${HTTPD_SERVICE_NAME}-configs"
+ data:
+ application.conf: |
+ # Timeout: The number of seconds before receives and sends time out.
+ Timeout 120
+
+ RewriteEngine On
+ Options SymLinksIfOwnerMatch
+
+ <VirtualHost *:80>
+ KeepAlive on
+ # Without ServerName mod_auth_mellon compares against http:// and not https:// from the IdP
+ ServerName https://%{REQUEST_HOST}
+
+ ProxyPreserveHost on
+
+ RewriteCond %{REQUEST_URI} ^/ws [NC]
+ RewriteCond %{HTTP:UPGRADE} ^websocket$ [NC]
+ RewriteCond %{HTTP:CONNECTION} ^Upgrade$ [NC]
+ RewriteRule .* ws://${NAME}%{REQUEST_URI} [P,QSA,L]
+
+ # For httpd, some ErrorDocuments must by served by the httpd pod
+ RewriteCond %{REQUEST_URI} !^/proxy_pages
+
+ # For SAML /saml2 is only served by mod_auth_mellon in the httpd pod
+ RewriteCond %{REQUEST_URI} !^/saml2
+ RewriteRule ^/ http://${NAME}%{REQUEST_URI} [P,QSA,L]
+ ProxyPassReverse / http://${NAME}/
+
+ # Ensures httpd stdout/stderr are seen by docker logs.
+ ErrorLog "| /usr/bin/tee /proc/1/fd/2 /var/log/httpd/error_log"
+ CustomLog "| /usr/bin/tee /proc/1/fd/1 /var/log/httpd/access_log" common
+ </VirtualHost>
+ authentication.conf: |
+ # Load appropriate authentication configuration files
+ #
+ Include "conf.d/configuration-${HTTPD_AUTH_TYPE}-auth"
+ configuration-internal-auth: |
+ # Internal authentication
+ #
+ configuration-external-auth: |
+ Include "conf.d/external-auth-load-modules-conf"
+
+ <Location /dashboard/kerberos_authenticate>
+ AuthType Kerberos
+ AuthName "Kerberos Login"
+ KrbMethodNegotiate On
+ KrbMethodK5Passwd Off
+ KrbAuthRealms ${HTTPD_AUTH_KERBEROS_REALMS}
+ Krb5KeyTab /etc/http.keytab
+ KrbServiceName Any
+ Require pam-account httpd-auth
+
+ ErrorDocument 401 /proxy_pages/invalid_sso_credentials.js
+ </Location>
+
+ Include "conf.d/external-auth-login-form-conf"
+ Include "conf.d/external-auth-application-api-conf"
+ Include "conf.d/external-auth-lookup-user-details-conf"
+ Include "conf.d/external-auth-remote-user-conf"
+ configuration-active-directory-auth: |
+ Include "conf.d/external-auth-load-modules-conf"
+
+ <Location /dashboard/kerberos_authenticate>
+ AuthType Kerberos
+ AuthName "Kerberos Login"
+ KrbMethodNegotiate On
+ KrbMethodK5Passwd Off
+ KrbAuthRealms ${HTTPD_AUTH_KERBEROS_REALMS}
+ Krb5KeyTab /etc/krb5.keytab
+ KrbServiceName Any
+ Require pam-account httpd-auth
+
+ ErrorDocument 401 /proxy_pages/invalid_sso_credentials.js
+ </Location>
+
+ Include "conf.d/external-auth-login-form-conf"
+ Include "conf.d/external-auth-application-api-conf"
+ Include "conf.d/external-auth-lookup-user-details-conf"
+ Include "conf.d/external-auth-remote-user-conf"
+ configuration-saml-auth: |
+ LoadModule auth_mellon_module modules/mod_auth_mellon.so
+
+ <Location />
+ MellonEnable "info"
+
+ MellonIdPMetadataFile "/etc/httpd/saml2/idp-metadata.xml"
+
+ MellonSPPrivateKeyFile "/etc/httpd/saml2/sp-key.key"
+ MellonSPCertFile "/etc/httpd/saml2/sp-cert.cert"
+ MellonSPMetadataFile "/etc/httpd/saml2/sp-metadata.xml"
+
+ MellonVariable "sp-cookie"
+ MellonSecureCookie On
+ MellonCookiePath "/"
+
+ MellonIdP "IDP"
+
+ MellonEndpointPath "/saml2"
+
+ MellonUser username
+ MellonMergeEnvVars On
+
+ MellonSetEnvNoPrefix "REMOTE_USER" username
+ MellonSetEnvNoPrefix "REMOTE_USER_EMAIL" email
+ MellonSetEnvNoPrefix "REMOTE_USER_FIRSTNAME" firstname
+ MellonSetEnvNoPrefix "REMOTE_USER_LASTNAME" lastname
+ MellonSetEnvNoPrefix "REMOTE_USER_FULLNAME" fullname
+ MellonSetEnvNoPrefix "REMOTE_USER_GROUPS" groups
+ </Location>
+
+ <Location /saml_login>
+ AuthType "Mellon"
+ MellonEnable "auth"
+ Require valid-user
+ </Location>
+
+ Include "conf.d/external-auth-remote-user-conf"
+ external-auth-load-modules-conf: |
+ LoadModule authnz_pam_module modules/mod_authnz_pam.so
+ LoadModule intercept_form_submit_module modules/mod_intercept_form_submit.so
+ LoadModule lookup_identity_module modules/mod_lookup_identity.so
+ LoadModule auth_kerb_module modules/mod_auth_kerb.so
+ external-auth-login-form-conf: |
+ <Location /dashboard/external_authenticate>
+ InterceptFormPAMService httpd-auth
+ InterceptFormLogin user_name
+ InterceptFormPassword user_password
+ InterceptFormLoginSkip admin
+ InterceptFormClearRemoteUserForSkipped on
+ </Location>
+ external-auth-application-api-conf: |
+ <LocationMatch ^/api>
+ SetEnvIf Authorization '^Basic +YWRtaW46' let_admin_in
+ SetEnvIf X-Auth-Token '^.+$' let_api_token_in
+ SetEnvIf X-MIQ-Token '^.+$' let_sys_token_in
+
+ AuthType Basic
+ AuthName "External Authentication (httpd) for API"
+ AuthBasicProvider PAM
+
+ AuthPAMService httpd-auth
+ Require valid-user
+ Order Allow,Deny
+ Allow from env=let_admin_in
+ Allow from env=let_api_token_in
+ Allow from env=let_sys_token_in
+ Satisfy Any
+ </LocationMatch>
+ external-auth-lookup-user-details-conf: |
+ <LocationMatch ^/dashboard/external_authenticate$|^/dashboard/kerberos_authenticate$|^/api>
+ LookupUserAttr mail REMOTE_USER_EMAIL
+ LookupUserAttr givenname REMOTE_USER_FIRSTNAME
+ LookupUserAttr sn REMOTE_USER_LASTNAME
+ LookupUserAttr displayname REMOTE_USER_FULLNAME
+ LookupUserAttr domainname REMOTE_USER_DOMAIN
+
+ LookupUserGroups REMOTE_USER_GROUPS ":"
+ LookupDbusTimeout 5000
+ </LocationMatch>
+ external-auth-remote-user-conf: |
+ RequestHeader unset X_REMOTE_USER
+
+ RequestHeader set X_REMOTE_USER %{REMOTE_USER}e env=REMOTE_USER
+ RequestHeader set X_EXTERNAL_AUTH_ERROR %{EXTERNAL_AUTH_ERROR}e env=EXTERNAL_AUTH_ERROR
+ RequestHeader set X_REMOTE_USER_EMAIL %{REMOTE_USER_EMAIL}e env=REMOTE_USER_EMAIL
+ RequestHeader set X_REMOTE_USER_FIRSTNAME %{REMOTE_USER_FIRSTNAME}e env=REMOTE_USER_FIRSTNAME
+ RequestHeader set X_REMOTE_USER_LASTNAME %{REMOTE_USER_LASTNAME}e env=REMOTE_USER_LASTNAME
+ RequestHeader set X_REMOTE_USER_FULLNAME %{REMOTE_USER_FULLNAME}e env=REMOTE_USER_FULLNAME
+ RequestHeader set X_REMOTE_USER_GROUPS %{REMOTE_USER_GROUPS}e env=REMOTE_USER_GROUPS
+ RequestHeader set X_REMOTE_USER_DOMAIN %{REMOTE_USER_DOMAIN}e env=REMOTE_USER_DOMAIN
+- apiVersion: v1
+ kind: ConfigMap
+ metadata:
+ name: "${HTTPD_SERVICE_NAME}-auth-configs"
+ data:
+ auth-type: internal
+ auth-kerberos-realms: undefined
+ auth-configuration.conf: |
+ # External Authentication Configuration File
+ #
+ # For details on usage please see https://github.com/ManageIQ/manageiq-pods/blob/master/README.md#configuring-external-authentication
+- apiVersion: v1
+ kind: Service
+ metadata:
+ name: "${HTTPD_SERVICE_NAME}"
+ annotations:
+ description: Exposes the httpd server
+ service.alpha.openshift.io/dependencies: '[{"name":"${NAME}","namespace":"","kind":"Service"}]'
+ spec:
+ ports:
+ - name: http
+ port: 80
+ targetPort: 80
+ selector:
+ name: httpd
+- apiVersion: v1
+ kind: Service
+ metadata:
+ name: "${HTTPD_DBUS_API_SERVICE_NAME}"
+ annotations:
+ description: Exposes the httpd server dbus api
+ service.alpha.openshift.io/dependencies: '[{"name":"${NAME}","namespace":"","kind":"Service"}]'
+ spec:
+ ports:
+ - name: http-dbus-api
+ port: 8080
+ targetPort: 8080
+ selector:
+ name: httpd
+- apiVersion: v1
+ kind: DeploymentConfig
+ metadata:
+ name: "${HTTPD_SERVICE_NAME}"
+ annotations:
+ description: Defines how to deploy httpd
+ spec:
+ strategy:
+ type: Recreate
+ recreateParams:
+ timeoutSeconds: 1200
+ triggers:
+ - type: ConfigChange
+ replicas: 1
+ selector:
+ name: "${HTTPD_SERVICE_NAME}"
+ template:
+ metadata:
+ name: "${HTTPD_SERVICE_NAME}"
+ labels:
+ name: "${HTTPD_SERVICE_NAME}"
+ spec:
+ volumes:
+ - name: httpd-config
+ configMap:
+ name: "${HTTPD_SERVICE_NAME}-configs"
+ - name: httpd-auth-config
+ configMap:
+ name: "${HTTPD_SERVICE_NAME}-auth-configs"
+ containers:
+ - name: httpd
+ image: "${HTTPD_IMG_NAME}:${HTTPD_IMG_TAG}"
+ ports:
+ - containerPort: 80
+ protocol: TCP
+ - containerPort: 8080
+ protocol: TCP
+ livenessProbe:
+ exec:
+ command:
+ - pidof
+ - httpd
+ initialDelaySeconds: 15
+ timeoutSeconds: 3
+ readinessProbe:
+ tcpSocket:
+ port: 80
+ initialDelaySeconds: 10
+ timeoutSeconds: 3
+ volumeMounts:
+ - name: httpd-config
+ mountPath: "${HTTPD_CONFIG_DIR}"
+ - name: httpd-auth-config
+ mountPath: "${HTTPD_AUTH_CONFIG_DIR}"
+ resources:
+ requests:
+ memory: "${HTTPD_MEM_REQ}"
+ cpu: "${HTTPD_CPU_REQ}"
+ limits:
+ memory: "${HTTPD_MEM_LIMIT}"
+ env:
+ - name: HTTPD_AUTH_TYPE
+ valueFrom:
+ configMapKeyRef:
+ name: "${HTTPD_SERVICE_NAME}-auth-configs"
+ key: auth-type
+ - name: HTTPD_AUTH_KERBEROS_REALMS
+ valueFrom:
+ configMapKeyRef:
+ name: "${HTTPD_SERVICE_NAME}-auth-configs"
+ key: auth-kerberos-realms
+ lifecycle:
+ postStart:
+ exec:
+ command:
+ - "/usr/bin/save-container-environment"
+ serviceAccount: cfme-httpd
+ serviceAccountName: cfme-httpd
+parameters:
+- name: NAME
+ displayName: Name
+ required: true
+ description: The name assigned to all of the frontend objects defined in this template.
+ value: cloudforms
+- name: V2_KEY
+ displayName: CloudForms Encryption Key
+ required: true
+ description: Encryption Key for CloudForms Passwords
+ from: "[a-zA-Z0-9]{43}"
+ generate: expression
+- name: DATABASE_SERVICE_NAME
+ displayName: PostgreSQL Service Name
+ required: true
+ description: The name of the OpenShift Service exposed for the PostgreSQL container.
+ value: postgresql
+- name: DATABASE_USER
+ displayName: PostgreSQL User
+ required: true
+ description: PostgreSQL user that will access the database.
+ value: root
+- name: DATABASE_PASSWORD
+ displayName: PostgreSQL Password
+ required: true
+ description: Password for the PostgreSQL user.
+ from: "[a-zA-Z0-9]{8}"
+ generate: expression
+- name: DATABASE_IP
+ displayName: PostgreSQL Server IP
+ required: true
+ description: PostgreSQL external server IP used to configure service.
+ value: ''
+- name: DATABASE_PORT
+ displayName: PostgreSQL Server Port
+ required: true
+ description: PostgreSQL external server port used to configure service.
+ value: '5432'
+- name: DATABASE_NAME
+ required: true
+ displayName: PostgreSQL Database Name
+ description: Name of the PostgreSQL database accessed.
+ value: vmdb_production
+- name: DATABASE_REGION
+ required: true
+ displayName: Application Database Region
+ description: Database region that will be used for application.
+ value: '0'
+- name: APPLICATION_ADMIN_PASSWORD
+ displayName: Application Admin Password
+ required: true
+ description: Admin password that will be set on the application.
+ value: smartvm
+- name: ANSIBLE_DATABASE_NAME
+ displayName: Ansible PostgreSQL database name
+ required: true
+ description: The database to be used by the Ansible continer
+ value: awx
+- name: MEMCACHED_SERVICE_NAME
+ required: true
+ displayName: Memcached Service Name
+ description: The name of the OpenShift Service exposed for the Memcached container.
+ value: memcached
+- name: MEMCACHED_MAX_MEMORY
+ displayName: Memcached Max Memory
+ description: Memcached maximum memory for memcached object storage in MB.
+ value: '64'
+- name: MEMCACHED_MAX_CONNECTIONS
+ displayName: Memcached Max Connections
+ description: Memcached maximum number of connections allowed.
+ value: '1024'
+- name: MEMCACHED_SLAB_PAGE_SIZE
+ displayName: Memcached Slab Page Size
+ description: Memcached size of each slab page.
+ value: 1m
+- name: ANSIBLE_SERVICE_NAME
+ displayName: Ansible Service Name
+ description: The name of the OpenShift Service exposed for the Ansible container.
+ value: ansible
+- name: ANSIBLE_ADMIN_PASSWORD
+ displayName: Ansible admin User password
+ required: true
+ description: The password for the Ansible container admin user
+ from: "[a-zA-Z0-9]{32}"
+ generate: expression
+- name: ANSIBLE_SECRET_KEY
+ displayName: Ansible Secret Key
+ required: true
+ description: Encryption key for the Ansible container
+ from: "[a-f0-9]{32}"
+ generate: expression
+- name: ANSIBLE_RABBITMQ_USER_NAME
+ displayName: RabbitMQ Username
+ required: true
+ description: Username for the Ansible RabbitMQ Server
+ value: ansible
+- name: ANSIBLE_RABBITMQ_PASSWORD
+ displayName: RabbitMQ Server Password
+ required: true
+ description: Password for the Ansible RabbitMQ Server
+ from: "[a-zA-Z0-9]{32}"
+ generate: expression
+- name: APPLICATION_CPU_REQ
+ displayName: Application Min CPU Requested
+ required: true
+ description: Minimum amount of CPU time the Application container will need (expressed in millicores).
+ value: 1000m
+- name: MEMCACHED_CPU_REQ
+ displayName: Memcached Min CPU Requested
+ required: true
+ description: Minimum amount of CPU time the Memcached container will need (expressed in millicores).
+ value: 200m
+- name: ANSIBLE_CPU_REQ
+ displayName: Ansible Min CPU Requested
+ required: true
+ description: Minimum amount of CPU time the Ansible container will need (expressed in millicores).
+ value: 1000m
+- name: APPLICATION_MEM_REQ
+ displayName: Application Min RAM Requested
+ required: true
+ description: Minimum amount of memory the Application container will need.
+ value: 6144Mi
+- name: MEMCACHED_MEM_REQ
+ displayName: Memcached Min RAM Requested
+ required: true
+ description: Minimum amount of memory the Memcached container will need.
+ value: 64Mi
+- name: ANSIBLE_MEM_REQ
+ displayName: Ansible Min RAM Requested
+ required: true
+ description: Minimum amount of memory the Ansible container will need.
+ value: 2048Mi
+- name: APPLICATION_MEM_LIMIT
+ displayName: Application Max RAM Limit
+ required: true
+ description: Maximum amount of memory the Application container can consume.
+ value: 16384Mi
+- name: MEMCACHED_MEM_LIMIT
+ displayName: Memcached Max RAM Limit
+ required: true
+ description: Maximum amount of memory the Memcached container can consume.
+ value: 256Mi
+- name: ANSIBLE_MEM_LIMIT
+ displayName: Ansible Max RAM Limit
+ required: true
+ description: Maximum amount of memory the Ansible container can consume.
+ value: 8096Mi
+- name: MEMCACHED_IMG_NAME
+ displayName: Memcached Image Name
+ description: This is the Memcached image name requested to deploy.
+ value: registry.access.redhat.com/cloudforms46-beta/cfme-openshift-memcached
+- name: MEMCACHED_IMG_TAG
+ displayName: Memcached Image Tag
+ description: This is the Memcached image tag/version requested to deploy.
+ value: latest
+- name: FRONTEND_APPLICATION_IMG_NAME
+ displayName: Frontend Application Image Name
+ description: This is the Frontend Application image name requested to deploy.
+ value: registry.access.redhat.com/cloudforms46-beta/cfme-openshift-app-ui
+- name: BACKEND_APPLICATION_IMG_NAME
+ displayName: Backend Application Image Name
+ description: This is the Backend Application image name requested to deploy.
+ value: registry.access.redhat.com/cloudforms46-beta/cfme-openshift-app
+- name: FRONTEND_APPLICATION_IMG_TAG
+ displayName: Front end Application Image Tag
+ description: This is the CloudForms Frontend Application image tag/version requested to deploy.
+ value: latest
+- name: BACKEND_APPLICATION_IMG_TAG
+ displayName: Back end Application Image Tag
+ description: This is the CloudForms Backend Application image tag/version requested to deploy.
+ value: latest
+- name: ANSIBLE_IMG_NAME
+ displayName: Ansible Image Name
+ description: This is the Ansible image name requested to deploy.
+ value: registry.access.redhat.com/cloudforms46-beta/cfme-openshift-embedded-ansible
+- name: ANSIBLE_IMG_TAG
+ displayName: Ansible Image Tag
+ description: This is the Ansible image tag/version requested to deploy.
+ value: latest
+- name: APPLICATION_DOMAIN
+ displayName: Application Hostname
+ description: The exposed hostname that will route to the application service, if left blank a value will be defaulted.
+ value: ''
+- name: APPLICATION_REPLICA_COUNT
+ displayName: Application Replica Count
+ description: This is the number of Application replicas requested to deploy.
+ value: '1'
+- name: APPLICATION_INIT_DELAY
+ displayName: Application Init Delay
+ required: true
+ description: Delay in seconds before we attempt to initialize the application.
+ value: '15'
+- name: APPLICATION_VOLUME_CAPACITY
+ displayName: Application Volume Capacity
+ required: true
+ description: Volume space available for application data.
+ value: 5Gi
+- name: HTTPD_SERVICE_NAME
+ required: true
+ displayName: Apache httpd Service Name
+ description: The name of the OpenShift Service exposed for the httpd container.
+ value: httpd
+- name: HTTPD_DBUS_API_SERVICE_NAME
+ required: true
+ displayName: Apache httpd DBus API Service Name
+ description: The name of httpd dbus api service.
+ value: httpd-dbus-api
+- name: HTTPD_IMG_NAME
+ displayName: Apache httpd Image Name
+ description: This is the httpd image name requested to deploy.
+ value: registry.access.redhat.com/cloudforms46-beta/cfme-openshift-httpd
+- name: HTTPD_IMG_TAG
+ displayName: Apache httpd Image Tag
+ description: This is the httpd image tag/version requested to deploy.
+ value: latest
+- name: HTTPD_CONFIG_DIR
+ displayName: Apache httpd Configuration Directory
+ description: Directory used to store the Apache configuration files.
+ value: "/etc/httpd/conf.d"
+- name: HTTPD_AUTH_CONFIG_DIR
+ displayName: External Authentication Configuration Directory
+ description: Directory used to store the external authentication configuration files.
+ value: "/etc/httpd/auth-conf.d"
+- name: HTTPD_CPU_REQ
+ displayName: Apache httpd Min CPU Requested
+ required: true
+ description: Minimum amount of CPU time the httpd container will need (expressed in millicores).
+ value: 500m
+- name: HTTPD_MEM_REQ
+ displayName: Apache httpd Min RAM Requested
+ required: true
+ description: Minimum amount of memory the httpd container will need.
+ value: 512Mi
+- name: HTTPD_MEM_LIMIT
+ displayName: Apache httpd Max RAM Limit
+ required: true
+ description: Maximum amount of memory the httpd container can consume.
+ value: 8192Mi
diff --git a/roles/openshift_examples/files/examples/v3.9/cfme-templates/cfme-template.yaml b/roles/openshift_examples/files/examples/v3.9/cfme-templates/cfme-template.yaml
index 3bc6c5813..5c757b6c2 100644
--- a/roles/openshift_examples/files/examples/v3.9/cfme-templates/cfme-template.yaml
+++ b/roles/openshift_examples/files/examples/v3.9/cfme-templates/cfme-template.yaml
@@ -5,17 +5,308 @@ labels:
metadata:
name: cloudforms
annotations:
- description: "CloudForms appliance with persistent storage"
- tags: "instant-app,cloudforms,cfme"
- iconClass: "icon-rails"
+ description: CloudForms appliance with persistent storage
+ tags: instant-app,cloudforms,cfme
+ iconClass: icon-rails
objects:
- apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: cfme-orchestrator
+- apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: cfme-anyuid
+- apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: cfme-privileged
+- apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: cfme-httpd
+- apiVersion: v1
+ kind: Secret
+ metadata:
+ name: "${NAME}-secrets"
+ stringData:
+ pg-password: "${DATABASE_PASSWORD}"
+ admin-password: "${APPLICATION_ADMIN_PASSWORD}"
+ database-url: postgresql://${DATABASE_USER}:${DATABASE_PASSWORD}@${DATABASE_SERVICE_NAME}/${DATABASE_NAME}?encoding=utf8&pool=5&wait_timeout=5
+ v2-key: "${V2_KEY}"
+- apiVersion: v1
+ kind: Secret
+ metadata:
+ name: "${ANSIBLE_SERVICE_NAME}-secrets"
+ stringData:
+ rabbit-password: "${ANSIBLE_RABBITMQ_PASSWORD}"
+ secret-key: "${ANSIBLE_SECRET_KEY}"
+ admin-password: "${ANSIBLE_ADMIN_PASSWORD}"
+- apiVersion: v1
+ kind: ConfigMap
+ metadata:
+ name: "${DATABASE_SERVICE_NAME}-configs"
+ data:
+ 01_miq_overrides.conf: |
+ #------------------------------------------------------------------------------
+ # CONNECTIONS AND AUTHENTICATION
+ #------------------------------------------------------------------------------
+
+ tcp_keepalives_count = 9
+ tcp_keepalives_idle = 3
+ tcp_keepalives_interval = 75
+
+ #------------------------------------------------------------------------------
+ # RESOURCE USAGE (except WAL)
+ #------------------------------------------------------------------------------
+
+ shared_preload_libraries = 'pglogical,repmgr_funcs'
+ max_worker_processes = 10
+
+ #------------------------------------------------------------------------------
+ # WRITE AHEAD LOG
+ #------------------------------------------------------------------------------
+
+ wal_level = 'logical'
+ wal_log_hints = on
+ wal_buffers = 16MB
+ checkpoint_completion_target = 0.9
+
+ #------------------------------------------------------------------------------
+ # REPLICATION
+ #------------------------------------------------------------------------------
+
+ max_wal_senders = 10
+ wal_sender_timeout = 0
+ max_replication_slots = 10
+ hot_standby = on
+
+ #------------------------------------------------------------------------------
+ # ERROR REPORTING AND LOGGING
+ #------------------------------------------------------------------------------
+
+ log_filename = 'postgresql.log'
+ log_rotation_age = 0
+ log_min_duration_statement = 5000
+ log_connections = on
+ log_disconnections = on
+ log_line_prefix = '%t:%r:%c:%u@%d:[%p]:'
+ log_lock_waits = on
+
+ #------------------------------------------------------------------------------
+ # AUTOVACUUM PARAMETERS
+ #------------------------------------------------------------------------------
+
+ log_autovacuum_min_duration = 0
+ autovacuum_naptime = 5min
+ autovacuum_vacuum_threshold = 500
+ autovacuum_analyze_threshold = 500
+ autovacuum_vacuum_scale_factor = 0.05
+
+ #------------------------------------------------------------------------------
+ # LOCK MANAGEMENT
+ #------------------------------------------------------------------------------
+
+ deadlock_timeout = 5s
+
+ #------------------------------------------------------------------------------
+ # VERSION/PLATFORM COMPATIBILITY
+ #------------------------------------------------------------------------------
+
+ escape_string_warning = off
+ standard_conforming_strings = off
+- apiVersion: v1
+ kind: ConfigMap
+ metadata:
+ name: "${HTTPD_SERVICE_NAME}-configs"
+ data:
+ application.conf: |
+ # Timeout: The number of seconds before receives and sends time out.
+ Timeout 120
+
+ RewriteEngine On
+ Options SymLinksIfOwnerMatch
+
+ <VirtualHost *:80>
+ KeepAlive on
+ # Without ServerName mod_auth_mellon compares against http:// and not https:// from the IdP
+ ServerName https://%{REQUEST_HOST}
+
+ ProxyPreserveHost on
+
+ RewriteCond %{REQUEST_URI} ^/ws [NC]
+ RewriteCond %{HTTP:UPGRADE} ^websocket$ [NC]
+ RewriteCond %{HTTP:CONNECTION} ^Upgrade$ [NC]
+ RewriteRule .* ws://${NAME}%{REQUEST_URI} [P,QSA,L]
+
+ # For httpd, some ErrorDocuments must by served by the httpd pod
+ RewriteCond %{REQUEST_URI} !^/proxy_pages
+
+ # For SAML /saml2 is only served by mod_auth_mellon in the httpd pod
+ RewriteCond %{REQUEST_URI} !^/saml2
+ RewriteRule ^/ http://${NAME}%{REQUEST_URI} [P,QSA,L]
+ ProxyPassReverse / http://${NAME}/
+
+ # Ensures httpd stdout/stderr are seen by docker logs.
+ ErrorLog "| /usr/bin/tee /proc/1/fd/2 /var/log/httpd/error_log"
+ CustomLog "| /usr/bin/tee /proc/1/fd/1 /var/log/httpd/access_log" common
+ </VirtualHost>
+ authentication.conf: |
+ # Load appropriate authentication configuration files
+ #
+ Include "conf.d/configuration-${HTTPD_AUTH_TYPE}-auth"
+ configuration-internal-auth: |
+ # Internal authentication
+ #
+ configuration-external-auth: |
+ Include "conf.d/external-auth-load-modules-conf"
+
+ <Location /dashboard/kerberos_authenticate>
+ AuthType Kerberos
+ AuthName "Kerberos Login"
+ KrbMethodNegotiate On
+ KrbMethodK5Passwd Off
+ KrbAuthRealms ${HTTPD_AUTH_KERBEROS_REALMS}
+ Krb5KeyTab /etc/http.keytab
+ KrbServiceName Any
+ Require pam-account httpd-auth
+
+ ErrorDocument 401 /proxy_pages/invalid_sso_credentials.js
+ </Location>
+
+ Include "conf.d/external-auth-login-form-conf"
+ Include "conf.d/external-auth-application-api-conf"
+ Include "conf.d/external-auth-lookup-user-details-conf"
+ Include "conf.d/external-auth-remote-user-conf"
+ configuration-active-directory-auth: |
+ Include "conf.d/external-auth-load-modules-conf"
+
+ <Location /dashboard/kerberos_authenticate>
+ AuthType Kerberos
+ AuthName "Kerberos Login"
+ KrbMethodNegotiate On
+ KrbMethodK5Passwd Off
+ KrbAuthRealms ${HTTPD_AUTH_KERBEROS_REALMS}
+ Krb5KeyTab /etc/krb5.keytab
+ KrbServiceName Any
+ Require pam-account httpd-auth
+
+ ErrorDocument 401 /proxy_pages/invalid_sso_credentials.js
+ </Location>
+
+ Include "conf.d/external-auth-login-form-conf"
+ Include "conf.d/external-auth-application-api-conf"
+ Include "conf.d/external-auth-lookup-user-details-conf"
+ Include "conf.d/external-auth-remote-user-conf"
+ configuration-saml-auth: |
+ LoadModule auth_mellon_module modules/mod_auth_mellon.so
+
+ <Location />
+ MellonEnable "info"
+
+ MellonIdPMetadataFile "/etc/httpd/saml2/idp-metadata.xml"
+
+ MellonSPPrivateKeyFile "/etc/httpd/saml2/sp-key.key"
+ MellonSPCertFile "/etc/httpd/saml2/sp-cert.cert"
+ MellonSPMetadataFile "/etc/httpd/saml2/sp-metadata.xml"
+
+ MellonVariable "sp-cookie"
+ MellonSecureCookie On
+ MellonCookiePath "/"
+
+ MellonIdP "IDP"
+
+ MellonEndpointPath "/saml2"
+
+ MellonUser username
+ MellonMergeEnvVars On
+
+ MellonSetEnvNoPrefix "REMOTE_USER" username
+ MellonSetEnvNoPrefix "REMOTE_USER_EMAIL" email
+ MellonSetEnvNoPrefix "REMOTE_USER_FIRSTNAME" firstname
+ MellonSetEnvNoPrefix "REMOTE_USER_LASTNAME" lastname
+ MellonSetEnvNoPrefix "REMOTE_USER_FULLNAME" fullname
+ MellonSetEnvNoPrefix "REMOTE_USER_GROUPS" groups
+ </Location>
+
+ <Location /saml_login>
+ AuthType "Mellon"
+ MellonEnable "auth"
+ Require valid-user
+ </Location>
+
+ Include "conf.d/external-auth-remote-user-conf"
+ external-auth-load-modules-conf: |
+ LoadModule authnz_pam_module modules/mod_authnz_pam.so
+ LoadModule intercept_form_submit_module modules/mod_intercept_form_submit.so
+ LoadModule lookup_identity_module modules/mod_lookup_identity.so
+ LoadModule auth_kerb_module modules/mod_auth_kerb.so
+ external-auth-login-form-conf: |
+ <Location /dashboard/external_authenticate>
+ InterceptFormPAMService httpd-auth
+ InterceptFormLogin user_name
+ InterceptFormPassword user_password
+ InterceptFormLoginSkip admin
+ InterceptFormClearRemoteUserForSkipped on
+ </Location>
+ external-auth-application-api-conf: |
+ <LocationMatch ^/api>
+ SetEnvIf Authorization '^Basic +YWRtaW46' let_admin_in
+ SetEnvIf X-Auth-Token '^.+$' let_api_token_in
+ SetEnvIf X-MIQ-Token '^.+$' let_sys_token_in
+
+ AuthType Basic
+ AuthName "External Authentication (httpd) for API"
+ AuthBasicProvider PAM
+
+ AuthPAMService httpd-auth
+ Require valid-user
+ Order Allow,Deny
+ Allow from env=let_admin_in
+ Allow from env=let_api_token_in
+ Allow from env=let_sys_token_in
+ Satisfy Any
+ </LocationMatch>
+ external-auth-lookup-user-details-conf: |
+ <LocationMatch ^/dashboard/external_authenticate$|^/dashboard/kerberos_authenticate$|^/api>
+ LookupUserAttr mail REMOTE_USER_EMAIL
+ LookupUserAttr givenname REMOTE_USER_FIRSTNAME
+ LookupUserAttr sn REMOTE_USER_LASTNAME
+ LookupUserAttr displayname REMOTE_USER_FULLNAME
+ LookupUserAttr domainname REMOTE_USER_DOMAIN
+
+ LookupUserGroups REMOTE_USER_GROUPS ":"
+ LookupDbusTimeout 5000
+ </LocationMatch>
+ external-auth-remote-user-conf: |
+ RequestHeader unset X_REMOTE_USER
+
+ RequestHeader set X_REMOTE_USER %{REMOTE_USER}e env=REMOTE_USER
+ RequestHeader set X_EXTERNAL_AUTH_ERROR %{EXTERNAL_AUTH_ERROR}e env=EXTERNAL_AUTH_ERROR
+ RequestHeader set X_REMOTE_USER_EMAIL %{REMOTE_USER_EMAIL}e env=REMOTE_USER_EMAIL
+ RequestHeader set X_REMOTE_USER_FIRSTNAME %{REMOTE_USER_FIRSTNAME}e env=REMOTE_USER_FIRSTNAME
+ RequestHeader set X_REMOTE_USER_LASTNAME %{REMOTE_USER_LASTNAME}e env=REMOTE_USER_LASTNAME
+ RequestHeader set X_REMOTE_USER_FULLNAME %{REMOTE_USER_FULLNAME}e env=REMOTE_USER_FULLNAME
+ RequestHeader set X_REMOTE_USER_GROUPS %{REMOTE_USER_GROUPS}e env=REMOTE_USER_GROUPS
+ RequestHeader set X_REMOTE_USER_DOMAIN %{REMOTE_USER_DOMAIN}e env=REMOTE_USER_DOMAIN
+- apiVersion: v1
+ kind: ConfigMap
+ metadata:
+ name: "${HTTPD_SERVICE_NAME}-auth-configs"
+ data:
+ auth-type: internal
+ auth-kerberos-realms: undefined
+ auth-configuration.conf: |
+ # External Authentication Configuration File
+ #
+ # For details on usage please see https://github.com/ManageIQ/manageiq-pods/blob/master/README.md#configuring-external-authentication
+- apiVersion: v1
kind: Service
metadata:
annotations:
- description: "Exposes and load balances CloudForms pods"
+ description: Exposes and load balances CloudForms pods
service.alpha.openshift.io/dependencies: '[{"name":"${DATABASE_SERVICE_NAME}","namespace":"","kind":"Service"},{"name":"${MEMCACHED_SERVICE_NAME}","namespace":"","kind":"Service"}]'
- name: ${NAME}
+ name: "${NAME}"
spec:
clusterIP: None
ports:
@@ -23,141 +314,97 @@ objects:
port: 80
protocol: TCP
targetPort: 80
- - name: https
- port: 443
- protocol: TCP
- targetPort: 443
selector:
- name: ${NAME}
+ name: "${NAME}"
- apiVersion: v1
kind: Route
metadata:
- name: ${NAME}
+ name: "${HTTPD_SERVICE_NAME}"
spec:
- host: ${APPLICATION_DOMAIN}
+ host: "${APPLICATION_DOMAIN}"
port:
- targetPort: https
+ targetPort: http
tls:
- termination: passthrough
+ termination: edge
+ insecureEdgeTerminationPolicy: Redirect
to:
kind: Service
- name: ${NAME}
-- apiVersion: v1
- kind: ImageStream
- metadata:
- name: cfme-openshift-app
- annotations:
- description: "Keeps track of changes in the CloudForms app image"
- spec:
- dockerImageRepository: "${APPLICATION_IMG_NAME}"
-- apiVersion: v1
- kind: ImageStream
- metadata:
- name: cfme-openshift-postgresql
- annotations:
- description: "Keeps track of changes in the CloudForms postgresql image"
- spec:
- dockerImageRepository: "${POSTGRESQL_IMG_NAME}"
-- apiVersion: v1
- kind: ImageStream
- metadata:
- name: cfme-openshift-memcached
- annotations:
- description: "Keeps track of changes in the CloudForms memcached image"
- spec:
- dockerImageRepository: "${MEMCACHED_IMG_NAME}"
+ name: "${HTTPD_SERVICE_NAME}"
- apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: "${NAME}-${DATABASE_SERVICE_NAME}"
spec:
accessModes:
- - ReadWriteOnce
+ - ReadWriteOnce
resources:
requests:
- storage: ${DATABASE_VOLUME_CAPACITY}
-- apiVersion: v1
- kind: PersistentVolumeClaim
- metadata:
- name: "${NAME}-region"
- spec:
- accessModes:
- - ReadWriteOnce
- resources:
- requests:
- storage: ${APPLICATION_REGION_VOLUME_CAPACITY}
+ storage: "${DATABASE_VOLUME_CAPACITY}"
- apiVersion: apps/v1beta1
- kind: "StatefulSet"
+ kind: StatefulSet
metadata:
- name: ${NAME}
+ name: "${NAME}"
annotations:
- description: "Defines how to deploy the CloudForms appliance"
+ description: Defines how to deploy the CloudForms appliance
spec:
serviceName: "${NAME}"
- replicas: 1
+ replicas: "${APPLICATION_REPLICA_COUNT}"
template:
metadata:
labels:
- name: ${NAME}
- name: ${NAME}
+ name: "${NAME}"
+ name: "${NAME}"
spec:
containers:
- name: cloudforms
- image: "${APPLICATION_IMG_NAME}:${APPLICATION_IMG_TAG}"
+ image: "${FRONTEND_APPLICATION_IMG_NAME}:${FRONTEND_APPLICATION_IMG_TAG}"
livenessProbe:
- tcpSocket:
- port: 443
+ exec:
+ command:
+ - pidof
+ - MIQ Server
initialDelaySeconds: 480
timeoutSeconds: 3
readinessProbe:
- httpGet:
- path: /
- port: 443
- scheme: HTTPS
+ tcpSocket:
+ port: 80
initialDelaySeconds: 200
timeoutSeconds: 3
ports:
- containerPort: 80
protocol: TCP
- - containerPort: 443
- protocol: TCP
- securityContext:
- privileged: true
volumeMounts:
- -
- name: "${NAME}-server"
- mountPath: "/persistent"
- -
- name: "${NAME}-region"
- mountPath: "/persistent-region"
+ - name: "${NAME}-server"
+ mountPath: "/persistent"
env:
- -
- name: "APPLICATION_INIT_DELAY"
- value: "${APPLICATION_INIT_DELAY}"
- -
- name: "DATABASE_SERVICE_NAME"
- value: "${DATABASE_SERVICE_NAME}"
- -
- name: "DATABASE_REGION"
- value: "${DATABASE_REGION}"
- -
- name: "MEMCACHED_SERVICE_NAME"
- value: "${MEMCACHED_SERVICE_NAME}"
- -
- name: "POSTGRESQL_USER"
- value: "${DATABASE_USER}"
- -
- name: "POSTGRESQL_PASSWORD"
- value: "${DATABASE_PASSWORD}"
- -
- name: "POSTGRESQL_DATABASE"
- value: "${DATABASE_NAME}"
- -
- name: "POSTGRESQL_MAX_CONNECTIONS"
- value: "${POSTGRESQL_MAX_CONNECTIONS}"
- -
- name: "POSTGRESQL_SHARED_BUFFERS"
- value: "${POSTGRESQL_SHARED_BUFFERS}"
+ - name: MY_POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: APPLICATION_INIT_DELAY
+ value: "${APPLICATION_INIT_DELAY}"
+ - name: DATABASE_REGION
+ value: "${DATABASE_REGION}"
+ - name: DATABASE_URL
+ valueFrom:
+ secretKeyRef:
+ name: "${NAME}-secrets"
+ key: database-url
+ - name: V2_KEY
+ valueFrom:
+ secretKeyRef:
+ name: "${NAME}-secrets"
+ key: v2-key
+ - name: APPLICATION_ADMIN_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: "${NAME}-secrets"
+ key: admin-password
+ - name: ANSIBLE_ADMIN_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: "${ANSIBLE_SERVICE_NAME}-secrets"
+ key: admin-password
resources:
requests:
memory: "${APPLICATION_MEM_REQ}"
@@ -168,59 +415,128 @@ objects:
preStop:
exec:
command:
- - /opt/rh/cfme-container-scripts/sync-pv-data
- volumes:
- -
- name: "${NAME}-region"
- persistentVolumeClaim:
- claimName: ${NAME}-region
+ - "/opt/rh/cfme-container-scripts/sync-pv-data"
+ serviceAccount: cfme-orchestrator
+ serviceAccountName: cfme-orchestrator
+ terminationGracePeriodSeconds: 90
volumeClaimTemplates:
- - metadata:
- name: "${NAME}-server"
- annotations:
- # Uncomment this if using dynamic volume provisioning.
- # https://docs.openshift.org/latest/install_config/persistent_storage/dynamically_provisioning_pvs.html
- # volume.alpha.kubernetes.io/storage-class: anything
- spec:
- accessModes: [ ReadWriteOnce ]
+ - metadata:
+ name: "${NAME}-server"
+ annotations:
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: "${APPLICATION_VOLUME_CAPACITY}"
+- apiVersion: v1
+ kind: Service
+ metadata:
+ annotations:
+ description: Headless service for CloudForms backend pods
+ name: "${NAME}-backend"
+ spec:
+ clusterIP: None
+ selector:
+ name: "${NAME}-backend"
+- apiVersion: apps/v1beta1
+ kind: StatefulSet
+ metadata:
+ name: "${NAME}-backend"
+ annotations:
+ description: Defines how to deploy the CloudForms appliance
+ spec:
+ serviceName: "${NAME}-backend"
+ replicas: 0
+ template:
+ metadata:
+ labels:
+ name: "${NAME}-backend"
+ name: "${NAME}-backend"
+ spec:
+ containers:
+ - name: cloudforms
+ image: "${BACKEND_APPLICATION_IMG_NAME}:${BACKEND_APPLICATION_IMG_TAG}"
+ livenessProbe:
+ exec:
+ command:
+ - pidof
+ - MIQ Server
+ initialDelaySeconds: 480
+ timeoutSeconds: 3
+ volumeMounts:
+ - name: "${NAME}-server"
+ mountPath: "/persistent"
+ env:
+ - name: APPLICATION_INIT_DELAY
+ value: "${APPLICATION_INIT_DELAY}"
+ - name: DATABASE_URL
+ valueFrom:
+ secretKeyRef:
+ name: "${NAME}-secrets"
+ key: database-url
+ - name: MIQ_SERVER_DEFAULT_ROLES
+ value: database_operations,event,reporting,scheduler,smartstate,ems_operations,ems_inventory,automate
+ - name: FRONTEND_SERVICE_NAME
+ value: "${NAME}"
+ - name: V2_KEY
+ valueFrom:
+ secretKeyRef:
+ name: "${NAME}-secrets"
+ key: v2-key
+ - name: ANSIBLE_ADMIN_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: "${ANSIBLE_SERVICE_NAME}-secrets"
+ key: admin-password
resources:
requests:
- storage: "${APPLICATION_VOLUME_CAPACITY}"
+ memory: "${APPLICATION_MEM_REQ}"
+ cpu: "${APPLICATION_CPU_REQ}"
+ limits:
+ memory: "${APPLICATION_MEM_LIMIT}"
+ lifecycle:
+ preStop:
+ exec:
+ command:
+ - "/opt/rh/cfme-container-scripts/sync-pv-data"
+ serviceAccount: cfme-orchestrator
+ serviceAccountName: cfme-orchestrator
+ terminationGracePeriodSeconds: 90
+ volumeClaimTemplates:
+ - metadata:
+ name: "${NAME}-server"
+ annotations:
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: "${APPLICATION_VOLUME_CAPACITY}"
- apiVersion: v1
- kind: "Service"
+ kind: Service
metadata:
name: "${MEMCACHED_SERVICE_NAME}"
annotations:
- description: "Exposes the memcached server"
+ description: Exposes the memcached server
spec:
ports:
- -
- name: "memcached"
- port: 11211
- targetPort: 11211
+ - name: memcached
+ port: 11211
+ targetPort: 11211
selector:
name: "${MEMCACHED_SERVICE_NAME}"
- apiVersion: v1
- kind: "DeploymentConfig"
+ kind: DeploymentConfig
metadata:
name: "${MEMCACHED_SERVICE_NAME}"
annotations:
- description: "Defines how to deploy memcached"
+ description: Defines how to deploy memcached
spec:
strategy:
- type: "Recreate"
+ type: Recreate
triggers:
- -
- type: "ImageChange"
- imageChangeParams:
- automatic: true
- containerNames:
- - "memcached"
- from:
- kind: "ImageStreamTag"
- name: "cfme-openshift-memcached:${MEMCACHED_IMG_TAG}"
- -
- type: "ConfigChange"
+ - type: ConfigChange
replicas: 1
selector:
name: "${MEMCACHED_SERVICE_NAME}"
@@ -232,74 +548,58 @@ objects:
spec:
volumes: []
containers:
- -
- name: "memcached"
- image: "${MEMCACHED_IMG_NAME}:${MEMCACHED_IMG_TAG}"
- ports:
- -
- containerPort: 11211
- readinessProbe:
- timeoutSeconds: 1
- initialDelaySeconds: 5
- tcpSocket:
- port: 11211
- livenessProbe:
- timeoutSeconds: 1
- initialDelaySeconds: 30
- tcpSocket:
- port: 11211
- volumeMounts: []
- env:
- -
- name: "MEMCACHED_MAX_MEMORY"
- value: "${MEMCACHED_MAX_MEMORY}"
- -
- name: "MEMCACHED_MAX_CONNECTIONS"
- value: "${MEMCACHED_MAX_CONNECTIONS}"
- -
- name: "MEMCACHED_SLAB_PAGE_SIZE"
- value: "${MEMCACHED_SLAB_PAGE_SIZE}"
- resources:
- requests:
- memory: "${MEMCACHED_MEM_REQ}"
- cpu: "${MEMCACHED_CPU_REQ}"
- limits:
- memory: "${MEMCACHED_MEM_LIMIT}"
+ - name: memcached
+ image: "${MEMCACHED_IMG_NAME}:${MEMCACHED_IMG_TAG}"
+ ports:
+ - containerPort: 11211
+ readinessProbe:
+ timeoutSeconds: 1
+ initialDelaySeconds: 5
+ tcpSocket:
+ port: 11211
+ livenessProbe:
+ timeoutSeconds: 1
+ initialDelaySeconds: 30
+ tcpSocket:
+ port: 11211
+ volumeMounts: []
+ env:
+ - name: MEMCACHED_MAX_MEMORY
+ value: "${MEMCACHED_MAX_MEMORY}"
+ - name: MEMCACHED_MAX_CONNECTIONS
+ value: "${MEMCACHED_MAX_CONNECTIONS}"
+ - name: MEMCACHED_SLAB_PAGE_SIZE
+ value: "${MEMCACHED_SLAB_PAGE_SIZE}"
+ resources:
+ requests:
+ memory: "${MEMCACHED_MEM_REQ}"
+ cpu: "${MEMCACHED_CPU_REQ}"
+ limits:
+ memory: "${MEMCACHED_MEM_LIMIT}"
- apiVersion: v1
- kind: "Service"
+ kind: Service
metadata:
name: "${DATABASE_SERVICE_NAME}"
annotations:
- description: "Exposes the database server"
+ description: Exposes the database server
spec:
ports:
- -
- name: "postgresql"
- port: 5432
- targetPort: 5432
+ - name: postgresql
+ port: 5432
+ targetPort: 5432
selector:
name: "${DATABASE_SERVICE_NAME}"
- apiVersion: v1
- kind: "DeploymentConfig"
+ kind: DeploymentConfig
metadata:
name: "${DATABASE_SERVICE_NAME}"
annotations:
- description: "Defines how to deploy the database"
+ description: Defines how to deploy the database
spec:
strategy:
- type: "Recreate"
+ type: Recreate
triggers:
- -
- type: "ImageChange"
- imageChangeParams:
- automatic: true
- containerNames:
- - "postgresql"
- from:
- kind: "ImageStreamTag"
- name: "cfme-openshift-postgresql:${POSTGRESQL_IMG_TAG}"
- -
- type: "ConfigChange"
+ - type: ConfigChange
replicas: 1
selector:
name: "${DATABASE_SERVICE_NAME}"
@@ -310,236 +610,524 @@ objects:
name: "${DATABASE_SERVICE_NAME}"
spec:
volumes:
- -
- name: "cfme-pgdb-volume"
- persistentVolumeClaim:
- claimName: "${NAME}-${DATABASE_SERVICE_NAME}"
+ - name: cfme-pgdb-volume
+ persistentVolumeClaim:
+ claimName: "${NAME}-${DATABASE_SERVICE_NAME}"
+ - name: cfme-pg-configs
+ configMap:
+ name: "${DATABASE_SERVICE_NAME}-configs"
containers:
- -
- name: "postgresql"
- image: "${POSTGRESQL_IMG_NAME}:${POSTGRESQL_IMG_TAG}"
- ports:
- -
- containerPort: 5432
- readinessProbe:
- timeoutSeconds: 1
- initialDelaySeconds: 15
+ - name: postgresql
+ image: "${POSTGRESQL_IMG_NAME}:${POSTGRESQL_IMG_TAG}"
+ ports:
+ - containerPort: 5432
+ readinessProbe:
+ timeoutSeconds: 1
+ initialDelaySeconds: 15
+ exec:
+ command:
+ - "/bin/sh"
+ - "-i"
+ - "-c"
+ - psql -h 127.0.0.1 -U ${POSTGRESQL_USER} -q -d ${POSTGRESQL_DATABASE} -c 'SELECT 1'
+ livenessProbe:
+ timeoutSeconds: 1
+ initialDelaySeconds: 60
+ tcpSocket:
+ port: 5432
+ volumeMounts:
+ - name: cfme-pgdb-volume
+ mountPath: "/var/lib/pgsql/data"
+ - name: cfme-pg-configs
+ mountPath: "${POSTGRESQL_CONFIG_DIR}"
+ env:
+ - name: POSTGRESQL_USER
+ value: "${DATABASE_USER}"
+ - name: POSTGRESQL_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: "${NAME}-secrets"
+ key: pg-password
+ - name: POSTGRESQL_DATABASE
+ value: "${DATABASE_NAME}"
+ - name: POSTGRESQL_MAX_CONNECTIONS
+ value: "${POSTGRESQL_MAX_CONNECTIONS}"
+ - name: POSTGRESQL_SHARED_BUFFERS
+ value: "${POSTGRESQL_SHARED_BUFFERS}"
+ - name: POSTGRESQL_CONFIG_DIR
+ value: "${POSTGRESQL_CONFIG_DIR}"
+ resources:
+ requests:
+ memory: "${POSTGRESQL_MEM_REQ}"
+ cpu: "${POSTGRESQL_CPU_REQ}"
+ limits:
+ memory: "${POSTGRESQL_MEM_LIMIT}"
+- apiVersion: v1
+ kind: Service
+ metadata:
+ annotations:
+ description: Exposes and load balances Ansible pods
+ service.alpha.openshift.io/dependencies: '[{"name":"${DATABASE_SERVICE_NAME}","namespace":"","kind":"Service"}]'
+ name: "${ANSIBLE_SERVICE_NAME}"
+ spec:
+ ports:
+ - name: http
+ port: 80
+ protocol: TCP
+ targetPort: 80
+ - name: https
+ port: 443
+ protocol: TCP
+ targetPort: 443
+ selector:
+ name: "${ANSIBLE_SERVICE_NAME}"
+- apiVersion: v1
+ kind: DeploymentConfig
+ metadata:
+ name: "${ANSIBLE_SERVICE_NAME}"
+ annotations:
+ description: Defines how to deploy the Ansible appliance
+ spec:
+ strategy:
+ type: Recreate
+ serviceName: "${ANSIBLE_SERVICE_NAME}"
+ replicas: 0
+ template:
+ metadata:
+ labels:
+ name: "${ANSIBLE_SERVICE_NAME}"
+ name: "${ANSIBLE_SERVICE_NAME}"
+ spec:
+ containers:
+ - name: ansible
+ image: "${ANSIBLE_IMG_NAME}:${ANSIBLE_IMG_TAG}"
+ livenessProbe:
+ tcpSocket:
+ port: 443
+ initialDelaySeconds: 480
+ timeoutSeconds: 3
+ readinessProbe:
+ httpGet:
+ path: "/"
+ port: 443
+ scheme: HTTPS
+ initialDelaySeconds: 200
+ timeoutSeconds: 3
+ ports:
+ - containerPort: 80
+ protocol: TCP
+ - containerPort: 443
+ protocol: TCP
+ securityContext:
+ privileged: true
+ env:
+ - name: ADMIN_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: "${ANSIBLE_SERVICE_NAME}-secrets"
+ key: admin-password
+ - name: RABBITMQ_USER_NAME
+ value: "${ANSIBLE_RABBITMQ_USER_NAME}"
+ - name: RABBITMQ_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: "${ANSIBLE_SERVICE_NAME}-secrets"
+ key: rabbit-password
+ - name: ANSIBLE_SECRET_KEY
+ valueFrom:
+ secretKeyRef:
+ name: "${ANSIBLE_SERVICE_NAME}-secrets"
+ key: secret-key
+ - name: DATABASE_SERVICE_NAME
+ value: "${DATABASE_SERVICE_NAME}"
+ - name: POSTGRESQL_USER
+ value: "${DATABASE_USER}"
+ - name: POSTGRESQL_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: "${NAME}-secrets"
+ key: pg-password
+ - name: POSTGRESQL_DATABASE
+ value: "${ANSIBLE_DATABASE_NAME}"
+ resources:
+ requests:
+ memory: "${ANSIBLE_MEM_REQ}"
+ cpu: "${ANSIBLE_CPU_REQ}"
+ limits:
+ memory: "${ANSIBLE_MEM_LIMIT}"
+ serviceAccount: cfme-privileged
+ serviceAccountName: cfme-privileged
+- apiVersion: v1
+ kind: Service
+ metadata:
+ name: "${HTTPD_SERVICE_NAME}"
+ annotations:
+ description: Exposes the httpd server
+ service.alpha.openshift.io/dependencies: '[{"name":"${NAME}","namespace":"","kind":"Service"}]'
+ spec:
+ ports:
+ - name: http
+ port: 80
+ targetPort: 80
+ selector:
+ name: httpd
+- apiVersion: v1
+ kind: Service
+ metadata:
+ name: "${HTTPD_DBUS_API_SERVICE_NAME}"
+ annotations:
+ description: Exposes the httpd server dbus api
+ service.alpha.openshift.io/dependencies: '[{"name":"${NAME}","namespace":"","kind":"Service"}]'
+ spec:
+ ports:
+ - name: http-dbus-api
+ port: 8080
+ targetPort: 8080
+ selector:
+ name: httpd
+- apiVersion: v1
+ kind: DeploymentConfig
+ metadata:
+ name: "${HTTPD_SERVICE_NAME}"
+ annotations:
+ description: Defines how to deploy httpd
+ spec:
+ strategy:
+ type: Recreate
+ recreateParams:
+ timeoutSeconds: 1200
+ triggers:
+ - type: ConfigChange
+ replicas: 1
+ selector:
+ name: "${HTTPD_SERVICE_NAME}"
+ template:
+ metadata:
+ name: "${HTTPD_SERVICE_NAME}"
+ labels:
+ name: "${HTTPD_SERVICE_NAME}"
+ spec:
+ volumes:
+ - name: httpd-config
+ configMap:
+ name: "${HTTPD_SERVICE_NAME}-configs"
+ - name: httpd-auth-config
+ configMap:
+ name: "${HTTPD_SERVICE_NAME}-auth-configs"
+ containers:
+ - name: httpd
+ image: "${HTTPD_IMG_NAME}:${HTTPD_IMG_TAG}"
+ ports:
+ - containerPort: 80
+ protocol: TCP
+ - containerPort: 8080
+ protocol: TCP
+ livenessProbe:
+ exec:
+ command:
+ - pidof
+ - httpd
+ initialDelaySeconds: 15
+ timeoutSeconds: 3
+ readinessProbe:
+ tcpSocket:
+ port: 80
+ initialDelaySeconds: 10
+ timeoutSeconds: 3
+ volumeMounts:
+ - name: httpd-config
+ mountPath: "${HTTPD_CONFIG_DIR}"
+ - name: httpd-auth-config
+ mountPath: "${HTTPD_AUTH_CONFIG_DIR}"
+ resources:
+ requests:
+ memory: "${HTTPD_MEM_REQ}"
+ cpu: "${HTTPD_CPU_REQ}"
+ limits:
+ memory: "${HTTPD_MEM_LIMIT}"
+ env:
+ - name: HTTPD_AUTH_TYPE
+ valueFrom:
+ configMapKeyRef:
+ name: "${HTTPD_SERVICE_NAME}-auth-configs"
+ key: auth-type
+ - name: HTTPD_AUTH_KERBEROS_REALMS
+ valueFrom:
+ configMapKeyRef:
+ name: "${HTTPD_SERVICE_NAME}-auth-configs"
+ key: auth-kerberos-realms
+ lifecycle:
+ postStart:
exec:
command:
- - "/bin/sh"
- - "-i"
- - "-c"
- - "psql -h 127.0.0.1 -U ${POSTGRESQL_USER} -q -d ${POSTGRESQL_DATABASE} -c 'SELECT 1'"
- livenessProbe:
- timeoutSeconds: 1
- initialDelaySeconds: 60
- tcpSocket:
- port: 5432
- volumeMounts:
- -
- name: "cfme-pgdb-volume"
- mountPath: "/var/lib/pgsql/data"
- env:
- -
- name: "POSTGRESQL_USER"
- value: "${DATABASE_USER}"
- -
- name: "POSTGRESQL_PASSWORD"
- value: "${DATABASE_PASSWORD}"
- -
- name: "POSTGRESQL_DATABASE"
- value: "${DATABASE_NAME}"
- -
- name: "POSTGRESQL_MAX_CONNECTIONS"
- value: "${POSTGRESQL_MAX_CONNECTIONS}"
- -
- name: "POSTGRESQL_SHARED_BUFFERS"
- value: "${POSTGRESQL_SHARED_BUFFERS}"
- resources:
- requests:
- memory: "${POSTGRESQL_MEM_REQ}"
- cpu: "${POSTGRESQL_CPU_REQ}"
- limits:
- memory: "${POSTGRESQL_MEM_LIMIT}"
-
+ - "/usr/bin/save-container-environment"
+ serviceAccount: cfme-httpd
+ serviceAccountName: cfme-httpd
parameters:
- -
- name: "NAME"
- displayName: Name
- required: true
- description: "The name assigned to all of the frontend objects defined in this template."
- value: cloudforms
- -
- name: "DATABASE_SERVICE_NAME"
- displayName: "PostgreSQL Service Name"
- required: true
- description: "The name of the OpenShift Service exposed for the PostgreSQL container."
- value: "postgresql"
- -
- name: "DATABASE_USER"
- displayName: "PostgreSQL User"
- required: true
- description: "PostgreSQL user that will access the database."
- value: "root"
- -
- name: "DATABASE_PASSWORD"
- displayName: "PostgreSQL Password"
- required: true
- description: "Password for the PostgreSQL user."
- value: "smartvm"
- -
- name: "DATABASE_NAME"
- required: true
- displayName: "PostgreSQL Database Name"
- description: "Name of the PostgreSQL database accessed."
- value: "vmdb_production"
- -
- name: "DATABASE_REGION"
- required: true
- displayName: "Application Database Region"
- description: "Database region that will be used for application."
- value: "0"
- -
- name: "MEMCACHED_SERVICE_NAME"
- required: true
- displayName: "Memcached Service Name"
- description: "The name of the OpenShift Service exposed for the Memcached container."
- value: "memcached"
- -
- name: "MEMCACHED_MAX_MEMORY"
- displayName: "Memcached Max Memory"
- description: "Memcached maximum memory for memcached object storage in MB."
- value: "64"
- -
- name: "MEMCACHED_MAX_CONNECTIONS"
- displayName: "Memcached Max Connections"
- description: "Memcached maximum number of connections allowed."
- value: "1024"
- -
- name: "MEMCACHED_SLAB_PAGE_SIZE"
- displayName: "Memcached Slab Page Size"
- description: "Memcached size of each slab page."
- value: "1m"
- -
- name: "POSTGRESQL_MAX_CONNECTIONS"
- displayName: "PostgreSQL Max Connections"
- description: "PostgreSQL maximum number of database connections allowed."
- value: "100"
- -
- name: "POSTGRESQL_SHARED_BUFFERS"
- displayName: "PostgreSQL Shared Buffer Amount"
- description: "Amount of memory dedicated for PostgreSQL shared memory buffers."
- value: "256MB"
- -
- name: "APPLICATION_CPU_REQ"
- displayName: "Application Min CPU Requested"
- required: true
- description: "Minimum amount of CPU time the Application container will need (expressed in millicores)."
- value: "1000m"
- -
- name: "POSTGRESQL_CPU_REQ"
- displayName: "PostgreSQL Min CPU Requested"
- required: true
- description: "Minimum amount of CPU time the PostgreSQL container will need (expressed in millicores)."
- value: "500m"
- -
- name: "MEMCACHED_CPU_REQ"
- displayName: "Memcached Min CPU Requested"
- required: true
- description: "Minimum amount of CPU time the Memcached container will need (expressed in millicores)."
- value: "200m"
- -
- name: "APPLICATION_MEM_REQ"
- displayName: "Application Min RAM Requested"
- required: true
- description: "Minimum amount of memory the Application container will need."
- value: "6144Mi"
- -
- name: "POSTGRESQL_MEM_REQ"
- displayName: "PostgreSQL Min RAM Requested"
- required: true
- description: "Minimum amount of memory the PostgreSQL container will need."
- value: "1024Mi"
- -
- name: "MEMCACHED_MEM_REQ"
- displayName: "Memcached Min RAM Requested"
- required: true
- description: "Minimum amount of memory the Memcached container will need."
- value: "64Mi"
- -
- name: "APPLICATION_MEM_LIMIT"
- displayName: "Application Max RAM Limit"
- required: true
- description: "Maximum amount of memory the Application container can consume."
- value: "16384Mi"
- -
- name: "POSTGRESQL_MEM_LIMIT"
- displayName: "PostgreSQL Max RAM Limit"
- required: true
- description: "Maximum amount of memory the PostgreSQL container can consume."
- value: "8192Mi"
- -
- name: "MEMCACHED_MEM_LIMIT"
- displayName: "Memcached Max RAM Limit"
- required: true
- description: "Maximum amount of memory the Memcached container can consume."
- value: "256Mi"
- -
- name: "POSTGRESQL_IMG_NAME"
- displayName: "PostgreSQL Image Name"
- description: "This is the PostgreSQL image name requested to deploy."
- value: "registry.access.redhat.com/cloudforms45/cfme-openshift-postgresql"
- -
- name: "POSTGRESQL_IMG_TAG"
- displayName: "PostgreSQL Image Tag"
- description: "This is the PostgreSQL image tag/version requested to deploy."
- value: "latest"
- -
- name: "MEMCACHED_IMG_NAME"
- displayName: "Memcached Image Name"
- description: "This is the Memcached image name requested to deploy."
- value: "registry.access.redhat.com/cloudforms45/cfme-openshift-memcached"
- -
- name: "MEMCACHED_IMG_TAG"
- displayName: "Memcached Image Tag"
- description: "This is the Memcached image tag/version requested to deploy."
- value: "latest"
- -
- name: "APPLICATION_IMG_NAME"
- displayName: "Application Image Name"
- description: "This is the Application image name requested to deploy."
- value: "registry.access.redhat.com/cloudforms45/cfme-openshift-app"
- -
- name: "APPLICATION_IMG_TAG"
- displayName: "Application Image Tag"
- description: "This is the Application image tag/version requested to deploy."
- value: "latest"
- -
- name: "APPLICATION_DOMAIN"
- displayName: "Application Hostname"
- description: "The exposed hostname that will route to the application service, if left blank a value will be defaulted."
- value: ""
- -
- name: "APPLICATION_INIT_DELAY"
- displayName: "Application Init Delay"
- required: true
- description: "Delay in seconds before we attempt to initialize the application."
- value: "15"
- -
- name: "APPLICATION_VOLUME_CAPACITY"
- displayName: "Application Volume Capacity"
- required: true
- description: "Volume space available for application data."
- value: "5Gi"
- -
- name: "APPLICATION_REGION_VOLUME_CAPACITY"
- displayName: "Application Region Volume Capacity"
- required: true
- description: "Volume space available for region application data."
- value: "5Gi"
- -
- name: "DATABASE_VOLUME_CAPACITY"
- displayName: "Database Volume Capacity"
- required: true
- description: "Volume space available for database."
- value: "15Gi"
+- name: NAME
+ displayName: Name
+ required: true
+ description: The name assigned to all of the frontend objects defined in this template.
+ value: cloudforms
+- name: V2_KEY
+ displayName: CloudForms Encryption Key
+ required: true
+ description: Encryption Key for CloudForms Passwords
+ from: "[a-zA-Z0-9]{43}"
+ generate: expression
+- name: DATABASE_SERVICE_NAME
+ displayName: PostgreSQL Service Name
+ required: true
+ description: The name of the OpenShift Service exposed for the PostgreSQL container.
+ value: postgresql
+- name: DATABASE_USER
+ displayName: PostgreSQL User
+ required: true
+ description: PostgreSQL user that will access the database.
+ value: root
+- name: DATABASE_PASSWORD
+ displayName: PostgreSQL Password
+ required: true
+ description: Password for the PostgreSQL user.
+ from: "[a-zA-Z0-9]{8}"
+ generate: expression
+- name: DATABASE_NAME
+ required: true
+ displayName: PostgreSQL Database Name
+ description: Name of the PostgreSQL database accessed.
+ value: vmdb_production
+- name: DATABASE_REGION
+ required: true
+ displayName: Application Database Region
+ description: Database region that will be used for application.
+ value: '0'
+- name: APPLICATION_ADMIN_PASSWORD
+ displayName: Application Admin Password
+ required: true
+ description: Admin password that will be set on the application.
+ value: smartvm
+- name: ANSIBLE_DATABASE_NAME
+ displayName: Ansible PostgreSQL database name
+ required: true
+ description: The database to be used by the Ansible continer
+ value: awx
+- name: MEMCACHED_SERVICE_NAME
+ required: true
+ displayName: Memcached Service Name
+ description: The name of the OpenShift Service exposed for the Memcached container.
+ value: memcached
+- name: MEMCACHED_MAX_MEMORY
+ displayName: Memcached Max Memory
+ description: Memcached maximum memory for memcached object storage in MB.
+ value: '64'
+- name: MEMCACHED_MAX_CONNECTIONS
+ displayName: Memcached Max Connections
+ description: Memcached maximum number of connections allowed.
+ value: '1024'
+- name: MEMCACHED_SLAB_PAGE_SIZE
+ displayName: Memcached Slab Page Size
+ description: Memcached size of each slab page.
+ value: 1m
+- name: POSTGRESQL_CONFIG_DIR
+ displayName: PostgreSQL Configuration Overrides
+ description: Directory used to store PostgreSQL configuration overrides.
+ value: "/var/lib/pgsql/conf.d"
+- name: POSTGRESQL_MAX_CONNECTIONS
+ displayName: PostgreSQL Max Connections
+ description: PostgreSQL maximum number of database connections allowed.
+ value: '1000'
+- name: POSTGRESQL_SHARED_BUFFERS
+ displayName: PostgreSQL Shared Buffer Amount
+ description: Amount of memory dedicated for PostgreSQL shared memory buffers.
+ value: 1GB
+- name: ANSIBLE_SERVICE_NAME
+ displayName: Ansible Service Name
+ description: The name of the OpenShift Service exposed for the Ansible container.
+ value: ansible
+- name: ANSIBLE_ADMIN_PASSWORD
+ displayName: Ansible admin User password
+ required: true
+ description: The password for the Ansible container admin user
+ from: "[a-zA-Z0-9]{32}"
+ generate: expression
+- name: ANSIBLE_SECRET_KEY
+ displayName: Ansible Secret Key
+ required: true
+ description: Encryption key for the Ansible container
+ from: "[a-f0-9]{32}"
+ generate: expression
+- name: ANSIBLE_RABBITMQ_USER_NAME
+ displayName: RabbitMQ Username
+ required: true
+ description: Username for the Ansible RabbitMQ Server
+ value: ansible
+- name: ANSIBLE_RABBITMQ_PASSWORD
+ displayName: RabbitMQ Server Password
+ required: true
+ description: Password for the Ansible RabbitMQ Server
+ from: "[a-zA-Z0-9]{32}"
+ generate: expression
+- name: APPLICATION_CPU_REQ
+ displayName: Application Min CPU Requested
+ required: true
+ description: Minimum amount of CPU time the Application container will need (expressed in millicores).
+ value: 1000m
+- name: POSTGRESQL_CPU_REQ
+ displayName: PostgreSQL Min CPU Requested
+ required: true
+ description: Minimum amount of CPU time the PostgreSQL container will need (expressed in millicores).
+ value: 500m
+- name: MEMCACHED_CPU_REQ
+ displayName: Memcached Min CPU Requested
+ required: true
+ description: Minimum amount of CPU time the Memcached container will need (expressed in millicores).
+ value: 200m
+- name: ANSIBLE_CPU_REQ
+ displayName: Ansible Min CPU Requested
+ required: true
+ description: Minimum amount of CPU time the Ansible container will need (expressed in millicores).
+ value: 1000m
+- name: APPLICATION_MEM_REQ
+ displayName: Application Min RAM Requested
+ required: true
+ description: Minimum amount of memory the Application container will need.
+ value: 6144Mi
+- name: POSTGRESQL_MEM_REQ
+ displayName: PostgreSQL Min RAM Requested
+ required: true
+ description: Minimum amount of memory the PostgreSQL container will need.
+ value: 4Gi
+- name: MEMCACHED_MEM_REQ
+ displayName: Memcached Min RAM Requested
+ required: true
+ description: Minimum amount of memory the Memcached container will need.
+ value: 64Mi
+- name: ANSIBLE_MEM_REQ
+ displayName: Ansible Min RAM Requested
+ required: true
+ description: Minimum amount of memory the Ansible container will need.
+ value: 2048Mi
+- name: APPLICATION_MEM_LIMIT
+ displayName: Application Max RAM Limit
+ required: true
+ description: Maximum amount of memory the Application container can consume.
+ value: 16384Mi
+- name: POSTGRESQL_MEM_LIMIT
+ displayName: PostgreSQL Max RAM Limit
+ required: true
+ description: Maximum amount of memory the PostgreSQL container can consume.
+ value: 8Gi
+- name: MEMCACHED_MEM_LIMIT
+ displayName: Memcached Max RAM Limit
+ required: true
+ description: Maximum amount of memory the Memcached container can consume.
+ value: 256Mi
+- name: ANSIBLE_MEM_LIMIT
+ displayName: Ansible Max RAM Limit
+ required: true
+ description: Maximum amount of memory the Ansible container can consume.
+ value: 8096Mi
+- name: POSTGRESQL_IMG_NAME
+ displayName: PostgreSQL Image Name
+ description: This is the PostgreSQL image name requested to deploy.
+ value: registry.access.redhat.com/cloudforms46-beta/cfme-openshift-postgresql
+- name: POSTGRESQL_IMG_TAG
+ displayName: PostgreSQL Image Tag
+ description: This is the PostgreSQL image tag/version requested to deploy.
+ value: latest
+- name: MEMCACHED_IMG_NAME
+ displayName: Memcached Image Name
+ description: This is the Memcached image name requested to deploy.
+ value: registry.access.redhat.com/cloudforms46-beta/cfme-openshift-memcached
+- name: MEMCACHED_IMG_TAG
+ displayName: Memcached Image Tag
+ description: This is the Memcached image tag/version requested to deploy.
+ value: latest
+- name: FRONTEND_APPLICATION_IMG_NAME
+ displayName: Frontend Application Image Name
+ description: This is the Frontend Application image name requested to deploy.
+ value: registry.access.redhat.com/cloudforms46-beta/cfme-openshift-app-ui
+- name: BACKEND_APPLICATION_IMG_NAME
+ displayName: Backend Application Image Name
+ description: This is the Backend Application image name requested to deploy.
+ value: registry.access.redhat.com/cloudforms46-beta/cfme-openshift-app
+- name: FRONTEND_APPLICATION_IMG_TAG
+ displayName: Front end Application Image Tag
+ description: This is the CloudForms Frontend Application image tag/version requested to deploy.
+ value: latest
+- name: BACKEND_APPLICATION_IMG_TAG
+ displayName: Back end Application Image Tag
+ description: This is the CloudForms Backend Application image tag/version requested to deploy.
+ value: latest
+- name: ANSIBLE_IMG_NAME
+ displayName: Ansible Image Name
+ description: This is the Ansible image name requested to deploy.
+ value: registry.access.redhat.com/cloudforms46-beta/cfme-openshift-embedded-ansible
+- name: ANSIBLE_IMG_TAG
+ displayName: Ansible Image Tag
+ description: This is the Ansible image tag/version requested to deploy.
+ value: latest
+- name: APPLICATION_DOMAIN
+ displayName: Application Hostname
+ description: The exposed hostname that will route to the application service, if left blank a value will be defaulted.
+ value: ''
+- name: APPLICATION_REPLICA_COUNT
+ displayName: Application Replica Count
+ description: This is the number of Application replicas requested to deploy.
+ value: '1'
+- name: APPLICATION_INIT_DELAY
+ displayName: Application Init Delay
+ required: true
+ description: Delay in seconds before we attempt to initialize the application.
+ value: '15'
+- name: APPLICATION_VOLUME_CAPACITY
+ displayName: Application Volume Capacity
+ required: true
+ description: Volume space available for application data.
+ value: 5Gi
+- name: DATABASE_VOLUME_CAPACITY
+ displayName: Database Volume Capacity
+ required: true
+ description: Volume space available for database.
+ value: 15Gi
+- name: HTTPD_SERVICE_NAME
+ required: true
+ displayName: Apache httpd Service Name
+ description: The name of the OpenShift Service exposed for the httpd container.
+ value: httpd
+- name: HTTPD_DBUS_API_SERVICE_NAME
+ required: true
+ displayName: Apache httpd DBus API Service Name
+ description: The name of httpd dbus api service.
+ value: httpd-dbus-api
+- name: HTTPD_IMG_NAME
+ displayName: Apache httpd Image Name
+ description: This is the httpd image name requested to deploy.
+ value: registry.access.redhat.com/cloudforms46-beta/cfme-openshift-httpd
+- name: HTTPD_IMG_TAG
+ displayName: Apache httpd Image Tag
+ description: This is the httpd image tag/version requested to deploy.
+ value: latest
+- name: HTTPD_CONFIG_DIR
+ displayName: Apache Configuration Directory
+ description: Directory used to store the Apache configuration files.
+ value: "/etc/httpd/conf.d"
+- name: HTTPD_AUTH_CONFIG_DIR
+ displayName: External Authentication Configuration Directory
+ description: Directory used to store the external authentication configuration files.
+ value: "/etc/httpd/auth-conf.d"
+- name: HTTPD_CPU_REQ
+ displayName: Apache httpd Min CPU Requested
+ required: true
+ description: Minimum amount of CPU time the httpd container will need (expressed in millicores).
+ value: 500m
+- name: HTTPD_MEM_REQ
+ displayName: Apache httpd Min RAM Requested
+ required: true
+ description: Minimum amount of memory the httpd container will need.
+ value: 512Mi
+- name: HTTPD_MEM_LIMIT
+ displayName: Apache httpd Max RAM Limit
+ required: true
+ description: Maximum amount of memory the httpd container can consume.
+ value: 8192Mi
diff --git a/roles/openshift_gcp/files/bootstrap-script.sh b/roles/openshift_gcp/files/bootstrap-script.sh
new file mode 100644
index 000000000..0c3f1999b
--- /dev/null
+++ b/roles/openshift_gcp/files/bootstrap-script.sh
@@ -0,0 +1,42 @@
+#!/bin/bash
+#
+# This script is a startup script for bootstrapping a GCP node
+# from a config stored in the project metadata. It loops until
+# it finds the script and then starts the origin-node service.
+# TODO: generalize
+
+set -o errexit
+set -o nounset
+set -o pipefail
+
+if [[ "$( curl "http://metadata.google.internal/computeMetadata/v1/instance/attributes/bootstrap" -H "Metadata-Flavor: Google" )" != "true" ]]; then
+ echo "info: Bootstrap is not enabled for this instance, skipping" 1>&2
+ exit 0
+fi
+
+if ! id=$( curl "http://metadata.google.internal/computeMetadata/v1/instance/attributes/cluster-id" -H "Metadata-Flavor: Google" ); then
+ echo "error: Unable to get cluster-id for instance from cluster metadata" 1>&2
+ exit 1
+fi
+
+if ! node_group=$( curl "http://metadata.google.internal/computeMetadata/v1/instance/attributes/node-group" -H "Metadata-Flavor: Google" ); then
+ echo "error: Unable to get node-group for instance from cluster metadata" 1>&2
+ exit 1
+fi
+
+if ! config=$( curl -f "http://metadata.google.internal/computeMetadata/v1/instance/attributes/bootstrap-config" -H "Metadata-Flavor: Google" 2>/dev/null ); then
+ while true; do
+ if config=$( curl -f "http://metadata.google.internal/computeMetadata/v1/project/attributes/${id}-bootstrap-config" -H "Metadata-Flavor: Google" 2>/dev/null ); then
+ break
+ fi
+ echo "info: waiting for ${id}-bootstrap-config to become available in cluster metadata ..." 1>&2
+ sleep 5
+ done
+fi
+
+echo "Got bootstrap config from metadata"
+mkdir -p /etc/origin/node
+echo -n "${config}" > /etc/origin/node/bootstrap.kubeconfig
+echo "BOOTSTRAP_CONFIG_NAME=node-config-${node_group}" >> /etc/sysconfig/origin-node
+systemctl enable origin-node
+systemctl start origin-node
diff --git a/roles/openshift_gcp/files/openshift-bootstrap-update.service b/roles/openshift_gcp/files/openshift-bootstrap-update.service
new file mode 100644
index 000000000..c65b1b34e
--- /dev/null
+++ b/roles/openshift_gcp/files/openshift-bootstrap-update.service
@@ -0,0 +1,7 @@
+[Unit]
+Description=Update the OpenShift node bootstrap configuration
+
+[Service]
+Type=oneshot
+ExecStart=/usr/bin/openshift-bootstrap-update
+User=root
diff --git a/roles/openshift_gcp/files/openshift-bootstrap-update.timer b/roles/openshift_gcp/files/openshift-bootstrap-update.timer
new file mode 100644
index 000000000..1a517b33e
--- /dev/null
+++ b/roles/openshift_gcp/files/openshift-bootstrap-update.timer
@@ -0,0 +1,10 @@
+[Unit]
+Description=Update the OpenShift node bootstrap credentials hourly
+
+[Timer]
+OnBootSec=30s
+OnCalendar=hourly
+Persistent=true
+
+[Install]
+WantedBy=timers.target \ No newline at end of file
diff --git a/roles/openshift_gcp_image_prep/files/partition.conf b/roles/openshift_gcp/files/partition.conf
index b87e5e0b6..76e65ab9c 100644
--- a/roles/openshift_gcp_image_prep/files/partition.conf
+++ b/roles/openshift_gcp/files/partition.conf
@@ -1,3 +1,3 @@
[Service]
ExecStartPost=-/usr/bin/growpart /dev/sda 1
-ExecStartPost=-/sbin/xfs_growfs /
+ExecStartPost=-/sbin/xfs_growfs / \ No newline at end of file
diff --git a/roles/openshift_gcp/meta/main.yml b/roles/openshift_gcp/meta/main.yml
new file mode 100644
index 000000000..5e428f8de
--- /dev/null
+++ b/roles/openshift_gcp/meta/main.yml
@@ -0,0 +1,17 @@
+---
+galaxy_info:
+ author: Clayton Coleman
+ description:
+ company: Red Hat, Inc.
+ license: Apache License, Version 2.0
+ min_ansible_version: 1.8
+ platforms:
+ - name: EL
+ versions:
+ - 7
+ categories:
+ - cloud
+ - system
+dependencies:
+- role: lib_utils
+- role: lib_openshift
diff --git a/roles/openshift_gcp/tasks/add_custom_repositories.yml b/roles/openshift_gcp/tasks/add_custom_repositories.yml
new file mode 100644
index 000000000..04718f78e
--- /dev/null
+++ b/roles/openshift_gcp/tasks/add_custom_repositories.yml
@@ -0,0 +1,20 @@
+---
+- name: Copy custom repository secrets
+ copy:
+ src: "{{ files_dir }}/{{ item.1.sslclientcert }}"
+ dest: /var/lib/yum/custom_secret_{{ item.0 }}_cert
+ when: item.1.sslclientcert | default(false)
+ with_indexed_items: "{{ provision_custom_repositories }}"
+- name: Copy custom repository secrets
+ copy:
+ src: "{{ files_dir }}/{{ item.1.sslclientkey }}"
+ dest: /var/lib/yum/custom_secret_{{ item.0 }}_key
+ when: item.1.sslclientkey | default(false)
+ with_indexed_items: "{{ provision_custom_repositories }}"
+
+- name: Create any custom repos that are defined
+ template:
+ src: yum_repo.j2
+ dest: /etc/yum.repos.d/provision_custom_repositories.repo
+ when: provision_custom_repositories | length > 0
+ notify: refresh cache
diff --git a/roles/openshift_gcp_image_prep/tasks/main.yaml b/roles/openshift_gcp/tasks/configure_gcp_base_image.yml
index fee5ab618..2c6e2790a 100644
--- a/roles/openshift_gcp_image_prep/tasks/main.yaml
+++ b/roles/openshift_gcp/tasks/configure_gcp_base_image.yml
@@ -1,18 +1,10 @@
----
# GCE instances are starting with xfs AND barrier=1, which is only for extfs.
+---
- name: Remove barrier=1 from XFS fstab entries
- lineinfile:
- path: /etc/fstab
- regexp: '^(.+)xfs(.+?),?barrier=1,?(.*?)$'
- line: '\1xfs\2 \4'
- backrefs: yes
+ command: sed -i -e 's/xfs\(.*\)barrier=1/xfs\1/g; s/, / /g' /etc/fstab
- name: Ensure the root filesystem has XFS group quota turned on
- lineinfile:
- path: /boot/grub2/grub.cfg
- regexp: '^(.*)linux16 (.*)$'
- line: '\1linux16 \2 rootflags=gquota'
- backrefs: yes
+ command: sed -i -e 's/linux16 \(.*\)$/linux16 \1 rootflags=gquota/g' /boot/grub2/grub.cfg
- name: Ensure the root partition grows on startup
copy: src=partition.conf dest=/etc/systemd/system/google-instance-setup.service.d/
diff --git a/roles/openshift_gcp/tasks/configure_master_bootstrap.yml b/roles/openshift_gcp/tasks/configure_master_bootstrap.yml
new file mode 100644
index 000000000..591cb593c
--- /dev/null
+++ b/roles/openshift_gcp/tasks/configure_master_bootstrap.yml
@@ -0,0 +1,36 @@
+#
+# These tasks configure the instance to periodically update the project metadata with the
+# latest bootstrap kubeconfig from the project metadata. This keeps the project metadata
+# in sync with the cluster's configuration. We then invoke a CSR approve on any nodes that
+# are waiting to join the cluster.
+#
+---
+- name: Copy unit service
+ copy:
+ src: openshift-bootstrap-update.timer
+ dest: /etc/systemd/system/openshift-bootstrap-update.timer
+ owner: root
+ group: root
+ mode: 0664
+
+- name: Copy unit timer
+ copy:
+ src: openshift-bootstrap-update.service
+ dest: /etc/systemd/system/openshift-bootstrap-update.service
+ owner: root
+ group: root
+ mode: 0664
+
+- name: Create bootstrap update script
+ template: src=openshift-bootstrap-update.j2 dest=/usr/bin/openshift-bootstrap-update mode=u+rx
+
+- name: Start bootstrap update timer
+ systemd:
+ name: "openshift-bootstrap-update.timer"
+ state: started
+
+- name: Bootstrap all nodes that were identified with bootstrap metadata
+ run_once: true
+ oc_adm_csr:
+ nodes: "{{ groups['all'] | map('extract', hostvars) | selectattr('gce_metadata.bootstrap', 'match', 'true') | map(attribute='gce_name') | list }}"
+ timeout: 60
diff --git a/roles/openshift_gcp/tasks/configure_master_healthcheck.yml b/roles/openshift_gcp/tasks/configure_master_healthcheck.yml
new file mode 100644
index 000000000..aa9655977
--- /dev/null
+++ b/roles/openshift_gcp/tasks/configure_master_healthcheck.yml
@@ -0,0 +1,19 @@
+---
+- name: refresh yum cache
+ command: yum clean all
+ args:
+ warn: no
+ when: ansible_os_family == "RedHat"
+
+- name: install haproxy
+ package: name=haproxy state=present
+ register: result
+ until: '"failed" not in result'
+ retries: 10
+ delay: 10
+
+- name: configure haproxy
+ template: src=master_healthcheck.j2 dest=/etc/haproxy/haproxy.cfg
+
+- name: start and enable haproxy service
+ service: name=haproxy state=started enabled=yes
diff --git a/roles/openshift_gcp/tasks/dynamic_inventory.yml b/roles/openshift_gcp/tasks/dynamic_inventory.yml
new file mode 100644
index 000000000..1637da945
--- /dev/null
+++ b/roles/openshift_gcp/tasks/dynamic_inventory.yml
@@ -0,0 +1,5 @@
+---
+- name: Extract PEM from service account file
+ copy: content="{{ (lookup('file', openshift_gcp_iam_service_account_keyfile ) | from_json ).private_key }}" dest=/tmp/gce.pem mode=0600
+- name: Templatize environment script
+ template: src=inventory.j2.sh dest=/tmp/inventory.sh mode=u+rx
diff --git a/roles/openshift_gcp/tasks/frequent_log_rotation.yml b/roles/openshift_gcp/tasks/frequent_log_rotation.yml
new file mode 100644
index 000000000..0b4b27f84
--- /dev/null
+++ b/roles/openshift_gcp/tasks/frequent_log_rotation.yml
@@ -0,0 +1,18 @@
+---
+- name: Rotate logs daily
+ replace:
+ dest: /etc/logrotate.conf
+ regexp: '^weekly|monthly|yearly$'
+ replace: daily
+- name: Rotate at a smaller size of log
+ lineinfile:
+ dest: /etc/logrotate.conf
+ state: present
+ regexp: '^size'
+ line: size 10M
+- name: Limit total size of log files
+ lineinfile:
+ dest: /etc/logrotate.conf
+ state: present
+ regexp: '^maxsize'
+ line: maxsize 20M
diff --git a/roles/openshift_gcp/tasks/main.yaml b/roles/openshift_gcp/tasks/main.yml
index ad205ba33..fb147bc78 100644
--- a/roles/openshift_gcp/tasks/main.yaml
+++ b/roles/openshift_gcp/tasks/main.yml
@@ -17,7 +17,7 @@
- name: Provision GCP DNS domain
command: /tmp/openshift_gcp_provision_dns.sh
args:
- chdir: "{{ playbook_dir }}/files"
+ chdir: "{{ files_dir }}"
register: dns_provision
when:
- state | default('present') == 'present'
@@ -33,7 +33,7 @@
- name: Provision GCP resources
command: /tmp/openshift_gcp_provision.sh
args:
- chdir: "{{ playbook_dir }}/files"
+ chdir: "{{ files_dir }}"
when:
- state | default('present') == 'present'
diff --git a/roles/openshift_gcp/tasks/node_cloud_config.yml b/roles/openshift_gcp/tasks/node_cloud_config.yml
new file mode 100644
index 000000000..4e982f497
--- /dev/null
+++ b/roles/openshift_gcp/tasks/node_cloud_config.yml
@@ -0,0 +1,12 @@
+---
+- name: ensure the /etc/origin folder exists
+ file: name=/etc/origin state=directory
+
+- name: configure gce cloud config options
+ ini_file: dest=/etc/origin/cloudprovider/gce.conf section=Global option={{ item.key }} value={{ item.value }} state=present create=yes
+ with_items:
+ - { key: 'project-id', value: '{{ openshift_gcp_project }}' }
+ - { key: 'network-name', value: '{{ openshift_gcp_network_name }}' }
+ - { key: 'node-tags', value: '{{ openshift_gcp_prefix }}ocp' }
+ - { key: 'node-instance-prefix', value: '{{ openshift_gcp_prefix }}' }
+ - { key: 'multizone', value: 'false' }
diff --git a/roles/openshift_gcp/tasks/publish_image.yml b/roles/openshift_gcp/tasks/publish_image.yml
new file mode 100644
index 000000000..db8a7ca69
--- /dev/null
+++ b/roles/openshift_gcp/tasks/publish_image.yml
@@ -0,0 +1,32 @@
+---
+- name: Require openshift_gcp_image
+ fail:
+ msg: "A source image name or family is required for image publishing. Please ensure `openshift_gcp_image` is defined."
+ when: openshift_gcp_image is undefined
+
+- name: Require openshift_gcp_target_image
+ fail:
+ msg: "A target image name or family is required for image publishing. Please ensure `openshift_gcp_target_image` is defined."
+ when: openshift_gcp_target_image is undefined
+
+- block:
+ - name: Retrieve images in the {{ openshift_gcp_target_image }} family
+ command: >
+ gcloud --project "{{ openshift_gcp_project }}" compute images list
+ "--filter=family={{ openshift_gcp_target_image }}"
+ --format=json --sort-by ~creationTimestamp
+ register: images
+ - name: Prune oldest images
+ command: >
+ gcloud --project "{{ openshift_gcp_project }}" compute images delete "{{ item['name'] }}"
+ with_items: "{{ (images.stdout | default('[]') | from_json )[( openshift_gcp_keep_images | int ):] }}"
+ when: openshift_gcp_keep_images is defined
+
+- name: Copy the latest image in the family {{ openshift_gcp_image }} to {{ openshift_gcp_target_image }}
+ command: >
+ gcloud --project "{{ openshift_gcp_target_project | default(openshift_gcp_project) }}"
+ beta compute images create
+ "{{ openshift_gcp_target_image_name | default(openshift_gcp_target_image + '-' + lookup('pipe','date +%Y%m%d-%H%M%S')) }}"
+ --family "{{ openshift_gcp_target_image }}"
+ --source-image-family "{{ openshift_gcp_image }}"
+ --source-image-project "{{ openshift_gcp_project }}"
diff --git a/roles/openshift_gcp/tasks/setup_scale_group_facts.yml b/roles/openshift_gcp/tasks/setup_scale_group_facts.yml
new file mode 100644
index 000000000..0fda43123
--- /dev/null
+++ b/roles/openshift_gcp/tasks/setup_scale_group_facts.yml
@@ -0,0 +1,44 @@
+---
+- name: Add masters to requisite groups
+ add_host:
+ name: "{{ hostvars[item].gce_name }}"
+ groups: masters, etcd
+ with_items: "{{ groups['tag_ocp-master'] }}"
+
+- name: Add a master to the primary masters group
+ add_host:
+ name: "{{ hostvars[item].gce_name }}"
+ groups: primary_master
+ with_items: "{{ groups['tag_ocp-master'].0 }}"
+
+- name: Add non-bootstrapping master node instances to node group
+ add_host:
+ name: "{{ hostvars[item].gce_name }}"
+ groups: nodes
+ openshift_node_labels:
+ role: infra
+ with_items: "{{ groups['tag_ocp-master'] | default([]) | difference(groups['tag_ocp-bootstrap'] | default([])) }}"
+
+- name: Add infra node instances to node group
+ add_host:
+ name: "{{ hostvars[item].gce_name }}"
+ groups: nodes
+ openshift_node_labels:
+ role: infra
+ with_items: "{{ groups['tag_ocp-infra-node'] | default([]) | difference(groups['tag_ocp-bootstrap'] | default([])) }}"
+
+- name: Add node instances to node group
+ add_host:
+ name: "{{ hostvars[item].gce_name }}"
+ groups: nodes
+ openshift_node_labels:
+ role: app
+ with_items: "{{ groups['tag_ocp-node'] | default([]) | difference(groups['tag_ocp-bootstrap'] | default([])) }}"
+
+- name: Add bootstrap node instances
+ add_host:
+ name: "{{ hostvars[item].gce_name }}"
+ groups: bootstrap_nodes
+ openshift_node_bootstrap: True
+ with_items: "{{ groups['tag_ocp-node'] | default([]) | intersect(groups['tag_ocp-bootstrap'] | default([])) }}"
+ when: not (openshift_node_bootstrap | default(False))
diff --git a/roles/openshift_gcp/templates/inventory.j2.sh b/roles/openshift_gcp/templates/inventory.j2.sh
new file mode 100644
index 000000000..dcaffb578
--- /dev/null
+++ b/roles/openshift_gcp/templates/inventory.j2.sh
@@ -0,0 +1,8 @@
+#!/bin/sh
+
+export GCE_PROJECT="{{ openshift_gcp_project }}"
+export GCE_ZONE="{{ openshift_gcp_zone }}"
+export GCE_EMAIL="{{ (lookup('file', openshift_gcp_iam_service_account_keyfile ) | from_json ).client_email }}"
+export GCE_PEM_FILE_PATH="/tmp/gce.pem"
+export INVENTORY_IP_TYPE="{{ inventory_ip_type }}"
+export GCE_TAGGED_INSTANCES="{{ openshift_gcp_prefix }}ocp" \ No newline at end of file
diff --git a/roles/openshift_gcp/templates/master_healthcheck.j2 b/roles/openshift_gcp/templates/master_healthcheck.j2
new file mode 100644
index 000000000..189e578c5
--- /dev/null
+++ b/roles/openshift_gcp/templates/master_healthcheck.j2
@@ -0,0 +1,68 @@
+#---------------------------------------------------------------------
+# Example configuration for a possible web application. See the
+# full configuration options online.
+#
+# http://haproxy.1wt.eu/download/1.4/doc/configuration.txt
+#
+#---------------------------------------------------------------------
+
+#---------------------------------------------------------------------
+# Global settings
+#---------------------------------------------------------------------
+global
+ # to have these messages end up in /var/log/haproxy.log you will
+ # need to:
+ #
+ # 1) configure syslog to accept network log events. This is done
+ # by adding the '-r' option to the SYSLOGD_OPTIONS in
+ # /etc/sysconfig/syslog
+ #
+ # 2) configure local2 events to go to the /var/log/haproxy.log
+ # file. A line like the following can be added to
+ # /etc/sysconfig/syslog
+ #
+ # local2.* /var/log/haproxy.log
+ #
+ log 127.0.0.1 local2
+
+ chroot /var/lib/haproxy
+ pidfile /var/run/haproxy.pid
+ maxconn 4000
+ user haproxy
+ group haproxy
+ daemon
+
+ # turn on stats unix socket
+ stats socket /var/lib/haproxy/stats
+
+#---------------------------------------------------------------------
+# common defaults that all the 'listen' and 'backend' sections will
+# use if not designated in their block
+#---------------------------------------------------------------------
+defaults
+ mode http
+ log global
+ option httplog
+ option dontlognull
+ option http-server-close
+ option forwardfor except 127.0.0.0/8
+ option redispatch
+ retries 3
+ timeout http-request 10s
+ timeout queue 1m
+ timeout connect 10s
+ timeout client 1m
+ timeout server 1m
+ timeout http-keep-alive 10s
+ timeout check 10s
+ maxconn 3000
+
+#---------------------------------------------------------------------
+# main frontend which proxys to the backends
+#---------------------------------------------------------------------
+frontend http-proxy *:8080
+ acl url_healthz path_beg -i /healthz
+ use_backend ocp if url_healthz
+
+backend ocp
+ server ocp localhost:{{ internal_console_port }} ssl verify none
diff --git a/roles/openshift_gcp/templates/openshift-bootstrap-update.j2 b/roles/openshift_gcp/templates/openshift-bootstrap-update.j2
new file mode 100644
index 000000000..5b0563724
--- /dev/null
+++ b/roles/openshift_gcp/templates/openshift-bootstrap-update.j2
@@ -0,0 +1,7 @@
+#!/bin/bash
+
+set -euo pipefail
+
+oc serviceaccounts create-kubeconfig -n openshift-infra node-bootstrapper > /root/bootstrap.kubeconfig
+gcloud compute project-info --project '{{ openshift_gcp_project }}' add-metadata --metadata-from-file '{{ openshift_gcp_prefix + openshift_gcp_clusterid | default("default") }}-bootstrap-config=/root/bootstrap.kubeconfig'
+rm -f /root/bootstrap.kubeconfig
diff --git a/roles/openshift_gcp/templates/provision.j2.sh b/roles/openshift_gcp/templates/provision.j2.sh
index 4d150bc74..794985322 100644
--- a/roles/openshift_gcp/templates/provision.j2.sh
+++ b/roles/openshift_gcp/templates/provision.j2.sh
@@ -9,15 +9,26 @@ if [[ -n "{{ openshift_gcp_ssh_private_key }}" ]]; then
ssh-add "{{ openshift_gcp_ssh_private_key }}" || true
fi
- # Check if the ~/.ssh/google_compute_engine.pub key is in the project metadata, and if not, add it there
- pub_key=$(cut -d ' ' -f 2 < "{{ openshift_gcp_ssh_private_key }}.pub")
+ # Check if the public key is in the project metadata, and if not, add it there
+ if [ -f "{{ openshift_gcp_ssh_private_key }}.pub" ]; then
+ pub_file="{{ openshift_gcp_ssh_private_key }}.pub"
+ pub_key=$(cut -d ' ' -f 2 < "{{ openshift_gcp_ssh_private_key }}.pub")
+ else
+ keyfile="${HOME}/.ssh/google_compute_engine"
+ pub_file="${keyfile}.pub"
+ mkdir -p "${HOME}/.ssh"
+ cp "{{ openshift_gcp_ssh_private_key }}" "${keyfile}"
+ chmod 0600 "${keyfile}"
+ ssh-keygen -y -f "${keyfile}" > "${pub_file}"
+ pub_key=$(cut -d ' ' -f 2 < "${pub_file}")
+ fi
key_tmp_file='/tmp/ocp-gce-keys'
if ! gcloud --project "{{ openshift_gcp_project }}" compute project-info describe | grep -q "$pub_key"; then
if gcloud --project "{{ openshift_gcp_project }}" compute project-info describe | grep -q ssh-rsa; then
gcloud --project "{{ openshift_gcp_project }}" compute project-info describe | grep ssh-rsa | sed -e 's/^[[:space:]]*//' -e 's/[[:space:]]*$//' -e 's/value: //' > "$key_tmp_file"
fi
echo -n 'cloud-user:' >> "$key_tmp_file"
- cat "{{ openshift_gcp_ssh_private_key }}.pub" >> "$key_tmp_file"
+ cat "${pub_file}" >> "$key_tmp_file"
gcloud --project "{{ openshift_gcp_project }}" compute project-info add-metadata --metadata-from-file "sshKeys=${key_tmp_file}"
rm -f "$key_tmp_file"
fi
diff --git a/roles/openshift_gcp/templates/yum_repo.j2 b/roles/openshift_gcp/templates/yum_repo.j2
new file mode 100644
index 000000000..77919ea75
--- /dev/null
+++ b/roles/openshift_gcp/templates/yum_repo.j2
@@ -0,0 +1,20 @@
+{% for repo in provision_custom_repositories %}
+[{{ repo.id | default(repo.name) }}]
+name={{ repo.name | default(repo.id) }}
+baseurl={{ repo.baseurl }}
+{% set enable_repo = repo.enabled | default(1) %}
+enabled={{ 1 if ( enable_repo == 1 or enable_repo == True ) else 0 }}
+{% set enable_gpg_check = repo.gpgcheck | default(1) %}
+gpgcheck={{ 1 if ( enable_gpg_check == 1 or enable_gpg_check == True ) else 0 }}
+{% if 'sslclientcert' in repo %}
+sslclientcert={{ "/var/lib/yum/custom_secret_" + (loop.index-1)|string + "_cert" if repo.sslclientcert }}
+{% endif %}
+{% if 'sslclientkey' in repo %}
+sslclientkey={{ "/var/lib/yum/custom_secret_" + (loop.index-1)|string + "_key" if repo.sslclientkey }}
+{% endif %}
+{% for key, value in repo.iteritems() %}
+{% if key not in ['id', 'name', 'baseurl', 'enabled', 'gpgcheck', 'sslclientkey', 'sslclientcert'] and value is defined %}
+{{ key }}={{ value }}
+{% endif %}
+{% endfor %}
+{% endfor %}
diff --git a/roles/openshift_hosted/tasks/registry.yml b/roles/openshift_hosted/tasks/registry.yml
index 22294e3d4..bc4d81eb7 100644
--- a/roles/openshift_hosted/tasks/registry.yml
+++ b/roles/openshift_hosted/tasks/registry.yml
@@ -43,7 +43,7 @@
- name: Update registry environment variables when pushing via dns
set_fact:
- openshift_hosted_registry_env_vars: "{{ openshift_hosted_registry_env_vars | combine({'OPENSHIFT_DEFAULT_REGISTRY':'docker-registry.default.svc:5000'}) }}"
+ openshift_hosted_registry_env_vars: "{{ openshift_hosted_registry_env_vars | combine({'REGISTRY_OPENSHIFT_SERVER_ADDR':'docker-registry.default.svc:5000'}) }}"
when: openshift_push_via_dns | bool
- name: Update registry proxy settings for dc/docker-registry
diff --git a/roles/openshift_hosted/tasks/storage/registry_config.j2 b/roles/openshift_hosted/tasks/storage/registry_config.j2
deleted file mode 120000
index f3e82ad4f..000000000
--- a/roles/openshift_hosted/tasks/storage/registry_config.j2
+++ /dev/null
@@ -1 +0,0 @@
-../../../templates/registry_config.j2 \ No newline at end of file
diff --git a/roles/openshift_loadbalancer/templates/haproxy.cfg.j2 b/roles/openshift_loadbalancer/templates/haproxy.cfg.j2
index de5a8d7c2..823f012af 100644
--- a/roles/openshift_loadbalancer/templates/haproxy.cfg.j2
+++ b/roles/openshift_loadbalancer/templates/haproxy.cfg.j2
@@ -38,7 +38,8 @@ defaults
timeout check 10s
maxconn {{ openshift_loadbalancer_default_maxconn | default(20000) }}
-listen stats :9000
+listen stats
+ bind :9000
mode http
stats enable
stats uri /
diff --git a/roles/openshift_management/files/templates/cloudforms/cfme-backup-job.yaml b/roles/openshift_management/files/templates/cloudforms/cfme-backup-job.yaml
index c3bc1d20c..48d1d4e26 100644
--- a/roles/openshift_management/files/templates/cloudforms/cfme-backup-job.yaml
+++ b/roles/openshift_management/files/templates/cloudforms/cfme-backup-job.yaml
@@ -9,7 +9,7 @@ spec:
spec:
containers:
- name: postgresql
- image: brew-pulp-docker01.web.prod.ext.phx2.redhat.com:8888/cloudforms46/cfme-openshift-postgresql:latest
+ image: registry.access.redhat.com/cloudforms46-beta/cfme-openshift-postgresql:latest
command:
- "/opt/rh/cfme-container-scripts/backup_db"
env:
diff --git a/roles/openshift_management/files/templates/cloudforms/cfme-restore-job.yaml b/roles/openshift_management/files/templates/cloudforms/cfme-restore-job.yaml
index 8b23f8a33..7fd4fc2e1 100644
--- a/roles/openshift_management/files/templates/cloudforms/cfme-restore-job.yaml
+++ b/roles/openshift_management/files/templates/cloudforms/cfme-restore-job.yaml
@@ -9,7 +9,7 @@ spec:
spec:
containers:
- name: postgresql
- image: brew-pulp-docker01.web.prod.ext.phx2.redhat.com:8888/cloudforms46/cfme-openshift-postgresql:latest
+ image: registry.access.redhat.com/cloudforms46-beta/cfme-openshift-postgresql:latest
command:
- "/opt/rh/cfme-container-scripts/restore_db"
env:
diff --git a/roles/openshift_management/files/templates/cloudforms/cfme-template-ext-db.yaml b/roles/openshift_management/files/templates/cloudforms/cfme-template-ext-db.yaml
index 4a04f3372..9866c29c3 100644
--- a/roles/openshift_management/files/templates/cloudforms/cfme-template-ext-db.yaml
+++ b/roles/openshift_management/files/templates/cloudforms/cfme-template-ext-db.yaml
@@ -31,6 +31,7 @@ objects:
name: "${NAME}-secrets"
stringData:
pg-password: "${DATABASE_PASSWORD}"
+ admin-password: "${APPLICATION_ADMIN_PASSWORD}"
database-url: postgresql://${DATABASE_USER}:${DATABASE_PASSWORD}@${DATABASE_SERVICE_NAME}/${DATABASE_NAME}?encoding=utf8&pool=5&wait_timeout=5
v2-key: "${V2_KEY}"
- apiVersion: v1
@@ -90,15 +91,15 @@ objects:
- name: cloudforms
image: "${FRONTEND_APPLICATION_IMG_NAME}:${FRONTEND_APPLICATION_IMG_TAG}"
livenessProbe:
- tcpSocket:
- port: 80
+ exec:
+ command:
+ - pidof
+ - MIQ Server
initialDelaySeconds: 480
timeoutSeconds: 3
readinessProbe:
- httpGet:
- path: "/"
+ tcpSocket:
port: 80
- scheme: HTTP
initialDelaySeconds: 200
timeoutSeconds: 3
ports:
@@ -126,6 +127,11 @@ objects:
secretKeyRef:
name: "${NAME}-secrets"
key: v2-key
+ - name: APPLICATION_ADMIN_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: "${NAME}-secrets"
+ key: admin-password
- name: ANSIBLE_ADMIN_PASSWORD
valueFrom:
secretKeyRef:
@@ -433,18 +439,173 @@ objects:
<VirtualHost *:80>
KeepAlive on
+ # Without ServerName mod_auth_mellon compares against http:// and not https:// from the IdP
+ ServerName https://%{REQUEST_HOST}
+
ProxyPreserveHost on
- ProxyPass /ws/ ws://${NAME}/ws/
- ProxyPassReverse /ws/ ws://${NAME}/ws/
- ProxyPass / http://${NAME}/
+
+ RewriteCond %{REQUEST_URI} ^/ws [NC]
+ RewriteCond %{HTTP:UPGRADE} ^websocket$ [NC]
+ RewriteCond %{HTTP:CONNECTION} ^Upgrade$ [NC]
+ RewriteRule .* ws://${NAME}%{REQUEST_URI} [P,QSA,L]
+
+ # For httpd, some ErrorDocuments must by served by the httpd pod
+ RewriteCond %{REQUEST_URI} !^/proxy_pages
+
+ # For SAML /saml2 is only served by mod_auth_mellon in the httpd pod
+ RewriteCond %{REQUEST_URI} !^/saml2
+ RewriteRule ^/ http://${NAME}%{REQUEST_URI} [P,QSA,L]
ProxyPassReverse / http://${NAME}/
+
+ # Ensures httpd stdout/stderr are seen by docker logs.
+ ErrorLog "| /usr/bin/tee /proc/1/fd/2 /var/log/httpd/error_log"
+ CustomLog "| /usr/bin/tee /proc/1/fd/1 /var/log/httpd/access_log" common
</VirtualHost>
+ authentication.conf: |
+ # Load appropriate authentication configuration files
+ #
+ Include "conf.d/configuration-${HTTPD_AUTH_TYPE}-auth"
+ configuration-internal-auth: |
+ # Internal authentication
+ #
+ configuration-external-auth: |
+ Include "conf.d/external-auth-load-modules-conf"
+
+ <Location /dashboard/kerberos_authenticate>
+ AuthType Kerberos
+ AuthName "Kerberos Login"
+ KrbMethodNegotiate On
+ KrbMethodK5Passwd Off
+ KrbAuthRealms ${HTTPD_AUTH_KERBEROS_REALMS}
+ Krb5KeyTab /etc/http.keytab
+ KrbServiceName Any
+ Require pam-account httpd-auth
+
+ ErrorDocument 401 /proxy_pages/invalid_sso_credentials.js
+ </Location>
+
+ Include "conf.d/external-auth-login-form-conf"
+ Include "conf.d/external-auth-application-api-conf"
+ Include "conf.d/external-auth-lookup-user-details-conf"
+ Include "conf.d/external-auth-remote-user-conf"
+ configuration-active-directory-auth: |
+ Include "conf.d/external-auth-load-modules-conf"
+
+ <Location /dashboard/kerberos_authenticate>
+ AuthType Kerberos
+ AuthName "Kerberos Login"
+ KrbMethodNegotiate On
+ KrbMethodK5Passwd Off
+ KrbAuthRealms ${HTTPD_AUTH_KERBEROS_REALMS}
+ Krb5KeyTab /etc/krb5.keytab
+ KrbServiceName Any
+ Require pam-account httpd-auth
+
+ ErrorDocument 401 /proxy_pages/invalid_sso_credentials.js
+ </Location>
+
+ Include "conf.d/external-auth-login-form-conf"
+ Include "conf.d/external-auth-application-api-conf"
+ Include "conf.d/external-auth-lookup-user-details-conf"
+ Include "conf.d/external-auth-remote-user-conf"
+ configuration-saml-auth: |
+ LoadModule auth_mellon_module modules/mod_auth_mellon.so
+
+ <Location />
+ MellonEnable "info"
+
+ MellonIdPMetadataFile "/etc/httpd/saml2/idp-metadata.xml"
+
+ MellonSPPrivateKeyFile "/etc/httpd/saml2/sp-key.key"
+ MellonSPCertFile "/etc/httpd/saml2/sp-cert.cert"
+ MellonSPMetadataFile "/etc/httpd/saml2/sp-metadata.xml"
+
+ MellonVariable "sp-cookie"
+ MellonSecureCookie On
+ MellonCookiePath "/"
+
+ MellonIdP "IDP"
+
+ MellonEndpointPath "/saml2"
+
+ MellonUser username
+ MellonMergeEnvVars On
+
+ MellonSetEnvNoPrefix "REMOTE_USER" username
+ MellonSetEnvNoPrefix "REMOTE_USER_EMAIL" email
+ MellonSetEnvNoPrefix "REMOTE_USER_FIRSTNAME" firstname
+ MellonSetEnvNoPrefix "REMOTE_USER_LASTNAME" lastname
+ MellonSetEnvNoPrefix "REMOTE_USER_FULLNAME" fullname
+ MellonSetEnvNoPrefix "REMOTE_USER_GROUPS" groups
+ </Location>
+
+ <Location /saml_login>
+ AuthType "Mellon"
+ MellonEnable "auth"
+ Require valid-user
+ </Location>
+
+ Include "conf.d/external-auth-remote-user-conf"
+ external-auth-load-modules-conf: |
+ LoadModule authnz_pam_module modules/mod_authnz_pam.so
+ LoadModule intercept_form_submit_module modules/mod_intercept_form_submit.so
+ LoadModule lookup_identity_module modules/mod_lookup_identity.so
+ LoadModule auth_kerb_module modules/mod_auth_kerb.so
+ external-auth-login-form-conf: |
+ <Location /dashboard/external_authenticate>
+ InterceptFormPAMService httpd-auth
+ InterceptFormLogin user_name
+ InterceptFormPassword user_password
+ InterceptFormLoginSkip admin
+ InterceptFormClearRemoteUserForSkipped on
+ </Location>
+ external-auth-application-api-conf: |
+ <LocationMatch ^/api>
+ SetEnvIf Authorization '^Basic +YWRtaW46' let_admin_in
+ SetEnvIf X-Auth-Token '^.+$' let_api_token_in
+ SetEnvIf X-MIQ-Token '^.+$' let_sys_token_in
+
+ AuthType Basic
+ AuthName "External Authentication (httpd) for API"
+ AuthBasicProvider PAM
+
+ AuthPAMService httpd-auth
+ Require valid-user
+ Order Allow,Deny
+ Allow from env=let_admin_in
+ Allow from env=let_api_token_in
+ Allow from env=let_sys_token_in
+ Satisfy Any
+ </LocationMatch>
+ external-auth-lookup-user-details-conf: |
+ <LocationMatch ^/dashboard/external_authenticate$|^/dashboard/kerberos_authenticate$|^/api>
+ LookupUserAttr mail REMOTE_USER_EMAIL
+ LookupUserAttr givenname REMOTE_USER_FIRSTNAME
+ LookupUserAttr sn REMOTE_USER_LASTNAME
+ LookupUserAttr displayname REMOTE_USER_FULLNAME
+ LookupUserAttr domainname REMOTE_USER_DOMAIN
+
+ LookupUserGroups REMOTE_USER_GROUPS ":"
+ LookupDbusTimeout 5000
+ </LocationMatch>
+ external-auth-remote-user-conf: |
+ RequestHeader unset X_REMOTE_USER
+
+ RequestHeader set X_REMOTE_USER %{REMOTE_USER}e env=REMOTE_USER
+ RequestHeader set X_EXTERNAL_AUTH_ERROR %{EXTERNAL_AUTH_ERROR}e env=EXTERNAL_AUTH_ERROR
+ RequestHeader set X_REMOTE_USER_EMAIL %{REMOTE_USER_EMAIL}e env=REMOTE_USER_EMAIL
+ RequestHeader set X_REMOTE_USER_FIRSTNAME %{REMOTE_USER_FIRSTNAME}e env=REMOTE_USER_FIRSTNAME
+ RequestHeader set X_REMOTE_USER_LASTNAME %{REMOTE_USER_LASTNAME}e env=REMOTE_USER_LASTNAME
+ RequestHeader set X_REMOTE_USER_FULLNAME %{REMOTE_USER_FULLNAME}e env=REMOTE_USER_FULLNAME
+ RequestHeader set X_REMOTE_USER_GROUPS %{REMOTE_USER_GROUPS}e env=REMOTE_USER_GROUPS
+ RequestHeader set X_REMOTE_USER_DOMAIN %{REMOTE_USER_DOMAIN}e env=REMOTE_USER_DOMAIN
- apiVersion: v1
kind: ConfigMap
metadata:
name: "${HTTPD_SERVICE_NAME}-auth-configs"
data:
auth-type: internal
+ auth-kerberos-realms: undefined
auth-configuration.conf: |
# External Authentication Configuration File
#
@@ -464,6 +625,20 @@ objects:
selector:
name: httpd
- apiVersion: v1
+ kind: Service
+ metadata:
+ name: "${HTTPD_DBUS_API_SERVICE_NAME}"
+ annotations:
+ description: Exposes the httpd server dbus api
+ service.alpha.openshift.io/dependencies: '[{"name":"${NAME}","namespace":"","kind":"Service"}]'
+ spec:
+ ports:
+ - name: http-dbus-api
+ port: 8080
+ targetPort: 8080
+ selector:
+ name: httpd
+- apiVersion: v1
kind: DeploymentConfig
metadata:
name: "${HTTPD_SERVICE_NAME}"
@@ -497,6 +672,9 @@ objects:
image: "${HTTPD_IMG_NAME}:${HTTPD_IMG_TAG}"
ports:
- containerPort: 80
+ protocol: TCP
+ - containerPort: 8080
+ protocol: TCP
livenessProbe:
exec:
command:
@@ -526,6 +704,11 @@ objects:
configMapKeyRef:
name: "${HTTPD_SERVICE_NAME}-auth-configs"
key: auth-type
+ - name: HTTPD_AUTH_KERBEROS_REALMS
+ valueFrom:
+ configMapKeyRef:
+ name: "${HTTPD_SERVICE_NAME}-auth-configs"
+ key: auth-kerberos-realms
lifecycle:
postStart:
exec:
@@ -581,6 +764,11 @@ parameters:
displayName: Application Database Region
description: Database region that will be used for application.
value: '0'
+- name: APPLICATION_ADMIN_PASSWORD
+ displayName: Application Admin Password
+ required: true
+ description: Admin password that will be set on the application.
+ value: smartvm
- name: ANSIBLE_DATABASE_NAME
displayName: Ansible PostgreSQL database name
required: true
@@ -678,7 +866,7 @@ parameters:
- name: MEMCACHED_IMG_NAME
displayName: Memcached Image Name
description: This is the Memcached image name requested to deploy.
- value: brew-pulp-docker01.web.prod.ext.phx2.redhat.com:8888/cloudforms46/cfme-openshift-memcached
+ value: registry.access.redhat.com/cloudforms46-beta/cfme-openshift-memcached
- name: MEMCACHED_IMG_TAG
displayName: Memcached Image Tag
description: This is the Memcached image tag/version requested to deploy.
@@ -686,11 +874,11 @@ parameters:
- name: FRONTEND_APPLICATION_IMG_NAME
displayName: Frontend Application Image Name
description: This is the Frontend Application image name requested to deploy.
- value: brew-pulp-docker01.web.prod.ext.phx2.redhat.com:8888/cloudforms46/cfme-openshift-app-ui
+ value: registry.access.redhat.com/cloudforms46-beta/cfme-openshift-app-ui
- name: BACKEND_APPLICATION_IMG_NAME
displayName: Backend Application Image Name
description: This is the Backend Application image name requested to deploy.
- value: brew-pulp-docker01.web.prod.ext.phx2.redhat.com:8888/cloudforms46/cfme-openshift-app
+ value: registry.access.redhat.com/cloudforms46-beta/cfme-openshift-app
- name: FRONTEND_APPLICATION_IMG_TAG
displayName: Front end Application Image Tag
description: This is the CloudForms Frontend Application image tag/version requested to deploy.
@@ -702,7 +890,7 @@ parameters:
- name: ANSIBLE_IMG_NAME
displayName: Ansible Image Name
description: This is the Ansible image name requested to deploy.
- value: brew-pulp-docker01.web.prod.ext.phx2.redhat.com:8888/cloudforms46/cfme-openshift-embedded-ansible
+ value: registry.access.redhat.com/cloudforms46-beta/cfme-openshift-embedded-ansible
- name: ANSIBLE_IMG_TAG
displayName: Ansible Image Tag
description: This is the Ansible image tag/version requested to deploy.
@@ -730,10 +918,15 @@ parameters:
displayName: Apache httpd Service Name
description: The name of the OpenShift Service exposed for the httpd container.
value: httpd
+- name: HTTPD_DBUS_API_SERVICE_NAME
+ required: true
+ displayName: Apache httpd DBus API Service Name
+ description: The name of httpd dbus api service.
+ value: httpd-dbus-api
- name: HTTPD_IMG_NAME
displayName: Apache httpd Image Name
description: This is the httpd image name requested to deploy.
- value: brew-pulp-docker01.web.prod.ext.phx2.redhat.com:8888/cloudforms46/cfme-openshift-httpd
+ value: registry.access.redhat.com/cloudforms46-beta/cfme-openshift-httpd
- name: HTTPD_IMG_TAG
displayName: Apache httpd Image Tag
description: This is the httpd image tag/version requested to deploy.
diff --git a/roles/openshift_management/files/templates/cloudforms/cfme-template.yaml b/roles/openshift_management/files/templates/cloudforms/cfme-template.yaml
index d7c9f5af7..5c757b6c2 100644
--- a/roles/openshift_management/files/templates/cloudforms/cfme-template.yaml
+++ b/roles/openshift_management/files/templates/cloudforms/cfme-template.yaml
@@ -31,6 +31,7 @@ objects:
name: "${NAME}-secrets"
stringData:
pg-password: "${DATABASE_PASSWORD}"
+ admin-password: "${APPLICATION_ADMIN_PASSWORD}"
database-url: postgresql://${DATABASE_USER}:${DATABASE_PASSWORD}@${DATABASE_SERVICE_NAME}/${DATABASE_NAME}?encoding=utf8&pool=5&wait_timeout=5
v2-key: "${V2_KEY}"
- apiVersion: v1
@@ -128,18 +129,173 @@ objects:
<VirtualHost *:80>
KeepAlive on
+ # Without ServerName mod_auth_mellon compares against http:// and not https:// from the IdP
+ ServerName https://%{REQUEST_HOST}
+
ProxyPreserveHost on
- ProxyPass /ws/ ws://${NAME}/ws/
- ProxyPassReverse /ws/ ws://${NAME}/ws/
- ProxyPass / http://${NAME}/
+
+ RewriteCond %{REQUEST_URI} ^/ws [NC]
+ RewriteCond %{HTTP:UPGRADE} ^websocket$ [NC]
+ RewriteCond %{HTTP:CONNECTION} ^Upgrade$ [NC]
+ RewriteRule .* ws://${NAME}%{REQUEST_URI} [P,QSA,L]
+
+ # For httpd, some ErrorDocuments must by served by the httpd pod
+ RewriteCond %{REQUEST_URI} !^/proxy_pages
+
+ # For SAML /saml2 is only served by mod_auth_mellon in the httpd pod
+ RewriteCond %{REQUEST_URI} !^/saml2
+ RewriteRule ^/ http://${NAME}%{REQUEST_URI} [P,QSA,L]
ProxyPassReverse / http://${NAME}/
+
+ # Ensures httpd stdout/stderr are seen by docker logs.
+ ErrorLog "| /usr/bin/tee /proc/1/fd/2 /var/log/httpd/error_log"
+ CustomLog "| /usr/bin/tee /proc/1/fd/1 /var/log/httpd/access_log" common
</VirtualHost>
+ authentication.conf: |
+ # Load appropriate authentication configuration files
+ #
+ Include "conf.d/configuration-${HTTPD_AUTH_TYPE}-auth"
+ configuration-internal-auth: |
+ # Internal authentication
+ #
+ configuration-external-auth: |
+ Include "conf.d/external-auth-load-modules-conf"
+
+ <Location /dashboard/kerberos_authenticate>
+ AuthType Kerberos
+ AuthName "Kerberos Login"
+ KrbMethodNegotiate On
+ KrbMethodK5Passwd Off
+ KrbAuthRealms ${HTTPD_AUTH_KERBEROS_REALMS}
+ Krb5KeyTab /etc/http.keytab
+ KrbServiceName Any
+ Require pam-account httpd-auth
+
+ ErrorDocument 401 /proxy_pages/invalid_sso_credentials.js
+ </Location>
+
+ Include "conf.d/external-auth-login-form-conf"
+ Include "conf.d/external-auth-application-api-conf"
+ Include "conf.d/external-auth-lookup-user-details-conf"
+ Include "conf.d/external-auth-remote-user-conf"
+ configuration-active-directory-auth: |
+ Include "conf.d/external-auth-load-modules-conf"
+
+ <Location /dashboard/kerberos_authenticate>
+ AuthType Kerberos
+ AuthName "Kerberos Login"
+ KrbMethodNegotiate On
+ KrbMethodK5Passwd Off
+ KrbAuthRealms ${HTTPD_AUTH_KERBEROS_REALMS}
+ Krb5KeyTab /etc/krb5.keytab
+ KrbServiceName Any
+ Require pam-account httpd-auth
+
+ ErrorDocument 401 /proxy_pages/invalid_sso_credentials.js
+ </Location>
+
+ Include "conf.d/external-auth-login-form-conf"
+ Include "conf.d/external-auth-application-api-conf"
+ Include "conf.d/external-auth-lookup-user-details-conf"
+ Include "conf.d/external-auth-remote-user-conf"
+ configuration-saml-auth: |
+ LoadModule auth_mellon_module modules/mod_auth_mellon.so
+
+ <Location />
+ MellonEnable "info"
+
+ MellonIdPMetadataFile "/etc/httpd/saml2/idp-metadata.xml"
+
+ MellonSPPrivateKeyFile "/etc/httpd/saml2/sp-key.key"
+ MellonSPCertFile "/etc/httpd/saml2/sp-cert.cert"
+ MellonSPMetadataFile "/etc/httpd/saml2/sp-metadata.xml"
+
+ MellonVariable "sp-cookie"
+ MellonSecureCookie On
+ MellonCookiePath "/"
+
+ MellonIdP "IDP"
+
+ MellonEndpointPath "/saml2"
+
+ MellonUser username
+ MellonMergeEnvVars On
+
+ MellonSetEnvNoPrefix "REMOTE_USER" username
+ MellonSetEnvNoPrefix "REMOTE_USER_EMAIL" email
+ MellonSetEnvNoPrefix "REMOTE_USER_FIRSTNAME" firstname
+ MellonSetEnvNoPrefix "REMOTE_USER_LASTNAME" lastname
+ MellonSetEnvNoPrefix "REMOTE_USER_FULLNAME" fullname
+ MellonSetEnvNoPrefix "REMOTE_USER_GROUPS" groups
+ </Location>
+
+ <Location /saml_login>
+ AuthType "Mellon"
+ MellonEnable "auth"
+ Require valid-user
+ </Location>
+
+ Include "conf.d/external-auth-remote-user-conf"
+ external-auth-load-modules-conf: |
+ LoadModule authnz_pam_module modules/mod_authnz_pam.so
+ LoadModule intercept_form_submit_module modules/mod_intercept_form_submit.so
+ LoadModule lookup_identity_module modules/mod_lookup_identity.so
+ LoadModule auth_kerb_module modules/mod_auth_kerb.so
+ external-auth-login-form-conf: |
+ <Location /dashboard/external_authenticate>
+ InterceptFormPAMService httpd-auth
+ InterceptFormLogin user_name
+ InterceptFormPassword user_password
+ InterceptFormLoginSkip admin
+ InterceptFormClearRemoteUserForSkipped on
+ </Location>
+ external-auth-application-api-conf: |
+ <LocationMatch ^/api>
+ SetEnvIf Authorization '^Basic +YWRtaW46' let_admin_in
+ SetEnvIf X-Auth-Token '^.+$' let_api_token_in
+ SetEnvIf X-MIQ-Token '^.+$' let_sys_token_in
+
+ AuthType Basic
+ AuthName "External Authentication (httpd) for API"
+ AuthBasicProvider PAM
+
+ AuthPAMService httpd-auth
+ Require valid-user
+ Order Allow,Deny
+ Allow from env=let_admin_in
+ Allow from env=let_api_token_in
+ Allow from env=let_sys_token_in
+ Satisfy Any
+ </LocationMatch>
+ external-auth-lookup-user-details-conf: |
+ <LocationMatch ^/dashboard/external_authenticate$|^/dashboard/kerberos_authenticate$|^/api>
+ LookupUserAttr mail REMOTE_USER_EMAIL
+ LookupUserAttr givenname REMOTE_USER_FIRSTNAME
+ LookupUserAttr sn REMOTE_USER_LASTNAME
+ LookupUserAttr displayname REMOTE_USER_FULLNAME
+ LookupUserAttr domainname REMOTE_USER_DOMAIN
+
+ LookupUserGroups REMOTE_USER_GROUPS ":"
+ LookupDbusTimeout 5000
+ </LocationMatch>
+ external-auth-remote-user-conf: |
+ RequestHeader unset X_REMOTE_USER
+
+ RequestHeader set X_REMOTE_USER %{REMOTE_USER}e env=REMOTE_USER
+ RequestHeader set X_EXTERNAL_AUTH_ERROR %{EXTERNAL_AUTH_ERROR}e env=EXTERNAL_AUTH_ERROR
+ RequestHeader set X_REMOTE_USER_EMAIL %{REMOTE_USER_EMAIL}e env=REMOTE_USER_EMAIL
+ RequestHeader set X_REMOTE_USER_FIRSTNAME %{REMOTE_USER_FIRSTNAME}e env=REMOTE_USER_FIRSTNAME
+ RequestHeader set X_REMOTE_USER_LASTNAME %{REMOTE_USER_LASTNAME}e env=REMOTE_USER_LASTNAME
+ RequestHeader set X_REMOTE_USER_FULLNAME %{REMOTE_USER_FULLNAME}e env=REMOTE_USER_FULLNAME
+ RequestHeader set X_REMOTE_USER_GROUPS %{REMOTE_USER_GROUPS}e env=REMOTE_USER_GROUPS
+ RequestHeader set X_REMOTE_USER_DOMAIN %{REMOTE_USER_DOMAIN}e env=REMOTE_USER_DOMAIN
- apiVersion: v1
kind: ConfigMap
metadata:
name: "${HTTPD_SERVICE_NAME}-auth-configs"
data:
auth-type: internal
+ auth-kerberos-realms: undefined
auth-configuration.conf: |
# External Authentication Configuration File
#
@@ -203,15 +359,15 @@ objects:
- name: cloudforms
image: "${FRONTEND_APPLICATION_IMG_NAME}:${FRONTEND_APPLICATION_IMG_TAG}"
livenessProbe:
- tcpSocket:
- port: 80
+ exec:
+ command:
+ - pidof
+ - MIQ Server
initialDelaySeconds: 480
timeoutSeconds: 3
readinessProbe:
- httpGet:
- path: "/"
+ tcpSocket:
port: 80
- scheme: HTTP
initialDelaySeconds: 200
timeoutSeconds: 3
ports:
@@ -239,6 +395,11 @@ objects:
secretKeyRef:
name: "${NAME}-secrets"
key: v2-key
+ - name: APPLICATION_ADMIN_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: "${NAME}-secrets"
+ key: admin-password
- name: ANSIBLE_ADMIN_PASSWORD
valueFrom:
secretKeyRef:
@@ -611,6 +772,20 @@ objects:
selector:
name: httpd
- apiVersion: v1
+ kind: Service
+ metadata:
+ name: "${HTTPD_DBUS_API_SERVICE_NAME}"
+ annotations:
+ description: Exposes the httpd server dbus api
+ service.alpha.openshift.io/dependencies: '[{"name":"${NAME}","namespace":"","kind":"Service"}]'
+ spec:
+ ports:
+ - name: http-dbus-api
+ port: 8080
+ targetPort: 8080
+ selector:
+ name: httpd
+- apiVersion: v1
kind: DeploymentConfig
metadata:
name: "${HTTPD_SERVICE_NAME}"
@@ -644,6 +819,9 @@ objects:
image: "${HTTPD_IMG_NAME}:${HTTPD_IMG_TAG}"
ports:
- containerPort: 80
+ protocol: TCP
+ - containerPort: 8080
+ protocol: TCP
livenessProbe:
exec:
command:
@@ -673,6 +851,11 @@ objects:
configMapKeyRef:
name: "${HTTPD_SERVICE_NAME}-auth-configs"
key: auth-type
+ - name: HTTPD_AUTH_KERBEROS_REALMS
+ valueFrom:
+ configMapKeyRef:
+ name: "${HTTPD_SERVICE_NAME}-auth-configs"
+ key: auth-kerberos-realms
lifecycle:
postStart:
exec:
@@ -718,6 +901,11 @@ parameters:
displayName: Application Database Region
description: Database region that will be used for application.
value: '0'
+- name: APPLICATION_ADMIN_PASSWORD
+ displayName: Application Admin Password
+ required: true
+ description: Admin password that will be set on the application.
+ value: smartvm
- name: ANSIBLE_DATABASE_NAME
displayName: Ansible PostgreSQL database name
required: true
@@ -842,7 +1030,7 @@ parameters:
- name: POSTGRESQL_IMG_NAME
displayName: PostgreSQL Image Name
description: This is the PostgreSQL image name requested to deploy.
- value: brew-pulp-docker01.web.prod.ext.phx2.redhat.com:8888/cloudforms46/cfme-openshift-postgresql
+ value: registry.access.redhat.com/cloudforms46-beta/cfme-openshift-postgresql
- name: POSTGRESQL_IMG_TAG
displayName: PostgreSQL Image Tag
description: This is the PostgreSQL image tag/version requested to deploy.
@@ -850,7 +1038,7 @@ parameters:
- name: MEMCACHED_IMG_NAME
displayName: Memcached Image Name
description: This is the Memcached image name requested to deploy.
- value: brew-pulp-docker01.web.prod.ext.phx2.redhat.com:8888/cloudforms46/cfme-openshift-memcached
+ value: registry.access.redhat.com/cloudforms46-beta/cfme-openshift-memcached
- name: MEMCACHED_IMG_TAG
displayName: Memcached Image Tag
description: This is the Memcached image tag/version requested to deploy.
@@ -858,11 +1046,11 @@ parameters:
- name: FRONTEND_APPLICATION_IMG_NAME
displayName: Frontend Application Image Name
description: This is the Frontend Application image name requested to deploy.
- value: brew-pulp-docker01.web.prod.ext.phx2.redhat.com:8888/cloudforms46/cfme-openshift-app-ui
+ value: registry.access.redhat.com/cloudforms46-beta/cfme-openshift-app-ui
- name: BACKEND_APPLICATION_IMG_NAME
displayName: Backend Application Image Name
description: This is the Backend Application image name requested to deploy.
- value: brew-pulp-docker01.web.prod.ext.phx2.redhat.com:8888/cloudforms46/cfme-openshift-app
+ value: registry.access.redhat.com/cloudforms46-beta/cfme-openshift-app
- name: FRONTEND_APPLICATION_IMG_TAG
displayName: Front end Application Image Tag
description: This is the CloudForms Frontend Application image tag/version requested to deploy.
@@ -874,7 +1062,7 @@ parameters:
- name: ANSIBLE_IMG_NAME
displayName: Ansible Image Name
description: This is the Ansible image name requested to deploy.
- value: brew-pulp-docker01.web.prod.ext.phx2.redhat.com:8888/cloudforms46/cfme-openshift-embedded-ansible
+ value: registry.access.redhat.com/cloudforms46-beta/cfme-openshift-embedded-ansible
- name: ANSIBLE_IMG_TAG
displayName: Ansible Image Tag
description: This is the Ansible image tag/version requested to deploy.
@@ -907,10 +1095,15 @@ parameters:
displayName: Apache httpd Service Name
description: The name of the OpenShift Service exposed for the httpd container.
value: httpd
+- name: HTTPD_DBUS_API_SERVICE_NAME
+ required: true
+ displayName: Apache httpd DBus API Service Name
+ description: The name of httpd dbus api service.
+ value: httpd-dbus-api
- name: HTTPD_IMG_NAME
displayName: Apache httpd Image Name
description: This is the httpd image name requested to deploy.
- value: brew-pulp-docker01.web.prod.ext.phx2.redhat.com:8888/cloudforms46/cfme-openshift-httpd
+ value: registry.access.redhat.com/cloudforms46-beta/cfme-openshift-httpd
- name: HTTPD_IMG_TAG
displayName: Apache httpd Image Tag
description: This is the httpd image tag/version requested to deploy.
diff --git a/roles/openshift_metrics/tasks/oc_apply.yaml b/roles/openshift_metrics/tasks/oc_apply.yaml
index 057963c1a..30fdde94c 100644
--- a/roles/openshift_metrics/tasks/oc_apply.yaml
+++ b/roles/openshift_metrics/tasks/oc_apply.yaml
@@ -16,9 +16,7 @@
apply -f {{ file_name }}
-n {{namespace}}
register: generation_apply
- failed_when:
- - "'error' in generation_apply.stderr"
- - "generation_apply.rc != 0"
+ failed_when: "'error' in generation_apply.stderr or (generation_apply.rc | int != 0)"
changed_when: no
- name: Determine change status of {{file_content.kind}} {{file_content.metadata.name}}
@@ -30,7 +28,5 @@
register: version_changed
vars:
init_version: "{{ (generation_init is defined) | ternary(generation_init.stdout, '0') }}"
- failed_when:
- - "'error' in version_changed.stderr"
- - "version_changed.rc != 0"
+ failed_when: "'error' in version_changed.stderr or version_changed.rc | int != 0"
changed_when: version_changed.stdout | int > init_version | int
diff --git a/roles/openshift_node/defaults/main.yml b/roles/openshift_node/defaults/main.yml
index 0b10413c5..5864d3c03 100644
--- a/roles/openshift_node/defaults/main.yml
+++ b/roles/openshift_node/defaults/main.yml
@@ -77,6 +77,18 @@ r_openshift_node_use_firewalld: "{{ os_firewall_use_firewalld | default(False) }
l_is_node_system_container: "{{ (openshift_use_node_system_container | default(openshift_use_system_containers | default(false)) | bool) }}"
+openshift_node_syscon_auth_mounts_l:
+- type: bind
+ source: "{{ oreg_auth_credentials_path }}"
+ destination: "/root/.docker"
+ options:
+ - ro
+
+# If we need to add new mounts in the future, or the user wants to mount data.
+# This should be in the same format as auth_mounts_l above.
+openshift_node_syscon_add_mounts_l: []
+
+
openshift_deployment_type: "{{ openshift_deployment_type | default('origin') }}"
openshift_node_image_dict:
diff --git a/roles/openshift_node/tasks/node_system_container.yml b/roles/openshift_node/tasks/node_system_container.yml
index 06b879050..008f209d7 100644
--- a/roles/openshift_node/tasks/node_system_container.yml
+++ b/roles/openshift_node/tasks/node_system_container.yml
@@ -14,4 +14,23 @@
- "DNS_DOMAIN={{ openshift.common.dns_domain }}"
- "DOCKER_SERVICE={{ openshift_docker_service_name }}.service"
- "MASTER_SERVICE={{ openshift_service_type }}.service"
+ - 'ADDTL_MOUNTS={{ l_node_syscon_add_mounts2 }}'
state: latest
+ vars:
+ # We need to evaluate some variables here to ensure
+ # l_bind_docker_reg_auth is evaluated after registry_auth.yml has been
+ # processed.
+
+ # Determine if we want to include auth credentials mount.
+ l_node_syscon_auth_mounts_l: "{{ l_bind_docker_reg_auth | ternary(openshift_node_syscon_auth_mounts_l,[]) }}"
+
+ # Join any user-provided mounts and auth_mounts into a combined list.
+ l_node_syscon_add_mounts_l: "{{ openshift_node_syscon_add_mounts_l | union(l_node_syscon_auth_mounts_l) }}"
+
+ # We must prepend a ',' here to ensure the value is inserted properly into an
+ # existing json list in the container's config.json
+ # lib_utils_oo_l_of_d_to_csv is a custom filter plugin in roles/lib_utils/oo_filters.py
+ l_node_syscon_add_mounts: ",{{ l_node_syscon_add_mounts_l | lib_utils_oo_l_of_d_to_csv }}"
+ # if we have just a ',' then both mount lists were empty, we don't want to add
+ # anything to config.json
+ l_node_syscon_add_mounts2: "{{ (l_node_syscon_add_mounts != ',') | bool | ternary(l_node_syscon_add_mounts,'') }}"
diff --git a/roles/openshift_node/templates/node.yaml.v1.j2 b/roles/openshift_node/templates/node.yaml.v1.j2
index 5f2a94ea2..7d817463c 100644
--- a/roles/openshift_node/templates/node.yaml.v1.j2
+++ b/roles/openshift_node/templates/node.yaml.v1.j2
@@ -32,7 +32,7 @@ masterClientConnectionOverrides:
contentType: application/vnd.kubernetes.protobuf
burst: 200
qps: 100
-masterKubeConfig: system:node:{{ openshift.common.hostname }}.kubeconfig
+masterKubeConfig: system:node:{{ openshift.common.hostname | lower }}.kubeconfig
{% if openshift_node_use_openshift_sdn | bool %}
networkPluginName: {{ openshift_node_sdn_network_plugin_name }}
{% endif %}
diff --git a/roles/openshift_node_certificates/tasks/main.yml b/roles/openshift_node_certificates/tasks/main.yml
index 5f73f3bdc..13d9fd718 100644
--- a/roles/openshift_node_certificates/tasks/main.yml
+++ b/roles/openshift_node_certificates/tasks/main.yml
@@ -18,9 +18,9 @@
stat:
path: "{{ openshift.common.config_base }}/node/{{ item }}"
with_items:
- - "system:node:{{ openshift.common.hostname }}.crt"
- - "system:node:{{ openshift.common.hostname }}.key"
- - "system:node:{{ openshift.common.hostname }}.kubeconfig"
+ - "system:node:{{ openshift.common.hostname | lower }}.crt"
+ - "system:node:{{ openshift.common.hostname | lower }}.key"
+ - "system:node:{{ openshift.common.hostname | lower }}.kubeconfig"
- ca.crt
- server.key
- server.crt
@@ -59,16 +59,16 @@
--certificate-authority {{ legacy_ca_certificate }}
{% endfor %}
--certificate-authority={{ openshift_ca_cert }}
- --client-dir={{ openshift_generated_configs_dir }}/node-{{ hostvars[item].openshift.common.hostname }}
+ --client-dir={{ openshift_generated_configs_dir }}/node-{{ hostvars[item].openshift.common.hostname | lower }}
--groups=system:nodes
--master={{ hostvars[openshift_ca_host].openshift.master.api_url }}
--signer-cert={{ openshift_ca_cert }}
--signer-key={{ openshift_ca_key }}
--signer-serial={{ openshift_ca_serial }}
- --user=system:node:{{ hostvars[item].openshift.common.hostname }}
+ --user=system:node:{{ hostvars[item].openshift.common.hostname | lower }}
--expire-days={{ openshift_node_cert_expire_days }}
args:
- creates: "{{ openshift_generated_configs_dir }}/node-{{ hostvars[item].openshift.common.hostname }}"
+ creates: "{{ openshift_generated_configs_dir }}/node-{{ hostvars[item].openshift.common.hostname | lower }}"
with_items: "{{ hostvars
| lib_utils_oo_select_keys(groups['oo_nodes_to_config'])
| lib_utils_oo_collect(attribute='inventory_hostname', filters={'node_certs_missing':True}) }}"
@@ -78,16 +78,16 @@
- name: Generate the node server certificate
command: >
{{ hostvars[openshift_ca_host]['first_master_client_binary'] }} adm ca create-server-cert
- --cert={{ openshift_generated_configs_dir }}/node-{{ hostvars[item].openshift.common.hostname }}/server.crt
- --key={{ openshift_generated_configs_dir }}/node-{{ hostvars[item].openshift.common.hostname }}/server.key
+ --cert={{ openshift_generated_configs_dir }}/node-{{ hostvars[item].openshift.common.hostname | lower }}/server.crt
+ --key={{ openshift_generated_configs_dir }}/node-{{ hostvars[item].openshift.common.hostname | lower }}/server.key
--expire-days={{ openshift_node_cert_expire_days }}
--overwrite=true
- --hostnames={{ hostvars[item].openshift.common.hostname }},{{ hostvars[item].openshift.common.public_hostname }},{{ hostvars[item].openshift.common.ip }},{{ hostvars[item].openshift.common.public_ip }}
+ --hostnames={{ hostvars[item].openshift.common.hostname }},{{ hostvars[item].openshift.common.hostname | lower }},{{ hostvars[item].openshift.common.public_hostname }},{{ hostvars[item].openshift.common.public_hostname | lower }},{{ hostvars[item].openshift.common.ip }},{{ hostvars[item].openshift.common.public_ip }}
--signer-cert={{ openshift_ca_cert }}
--signer-key={{ openshift_ca_key }}
--signer-serial={{ openshift_ca_serial }}
args:
- creates: "{{ openshift_generated_configs_dir }}/node-{{ hostvars[item].openshift.common.hostname }}/server.crt"
+ creates: "{{ openshift_generated_configs_dir }}/node-{{ hostvars[item].openshift.common.hostname | lower }}/server.crt"
with_items: "{{ hostvars
| lib_utils_oo_select_keys(groups['oo_nodes_to_config'])
| lib_utils_oo_collect(attribute='inventory_hostname', filters={'node_certs_missing':True}) }}"
diff --git a/roles/openshift_node_certificates/vars/main.yml b/roles/openshift_node_certificates/vars/main.yml
index 17ad8106d..12a6d3f94 100644
--- a/roles/openshift_node_certificates/vars/main.yml
+++ b/roles/openshift_node_certificates/vars/main.yml
@@ -1,7 +1,7 @@
---
openshift_generated_configs_dir: "{{ openshift.common.config_base }}/generated-configs"
openshift_node_cert_dir: "{{ openshift.common.config_base }}/node"
-openshift_node_cert_subdir: "node-{{ openshift.common.hostname }}"
+openshift_node_cert_subdir: "node-{{ openshift.common.hostname | lower }}"
openshift_node_config_dir: "{{ openshift.common.config_base }}/node"
openshift_node_generated_config_dir: "{{ openshift_generated_configs_dir }}/{{ openshift_node_cert_subdir }}"
diff --git a/roles/openshift_provisioners/tasks/oc_apply.yaml b/roles/openshift_provisioners/tasks/oc_apply.yaml
index 239e1f1cc..27c8a4b81 100644
--- a/roles/openshift_provisioners/tasks/oc_apply.yaml
+++ b/roles/openshift_provisioners/tasks/oc_apply.yaml
@@ -15,9 +15,7 @@
apply -f {{ file_name }}
-n {{ namespace }}
register: generation_apply
- failed_when:
- - "'error' in generation_apply.stderr"
- - "generation_apply.rc != 0"
+ failed_when: "'error' in generation_apply.stderr or generation_apply.rc != 0"
changed_when: no
- name: Determine change status of {{file_content.kind}} {{file_content.metadata.name}}
@@ -38,9 +36,7 @@
delete -f {{ file_name }}
-n {{ namespace }}
register: generation_delete
- failed_when:
- - "'error' in generation_delete.stderr"
- - "generation_delete.rc != 0"
+ failed_when: "'error' in generation_delete.stderr or generation_delete.rc != 0"
changed_when: generation_delete.rc == 0
when: generation_apply.rc != 0
@@ -50,8 +46,6 @@
apply -f {{ file_name }}
-n {{ namespace }}
register: generation_apply
- failed_when:
- - "'error' in generation_apply.stderr"
- - "generation_apply.rc != 0"
+ failed_when: "'error' in generation_apply.stderr or generation_apply.rc | int != 0"
changed_when: generation_apply.rc == 0
when: generation_apply.rc != 0
diff --git a/roles/openshift_web_console/files/console-config.yaml b/roles/openshift_web_console/files/console-config.yaml
index 32a28775f..55c650fbe 100644
--- a/roles/openshift_web_console/files/console-config.yaml
+++ b/roles/openshift_web_console/files/console-config.yaml
@@ -12,6 +12,7 @@ extensions:
properties: null
features:
inactivityTimeoutMinutes: 0
+ clusterResourceOverridesEnabled: false
servingInfo:
bindAddress: 0.0.0.0:8443
bindNetwork: tcp4
diff --git a/roles/openshift_web_console/tasks/install.yml b/roles/openshift_web_console/tasks/install.yml
index ead62799a..cc5eef47d 100644
--- a/roles/openshift_web_console/tasks/install.yml
+++ b/roles/openshift_web_console/tasks/install.yml
@@ -86,6 +86,8 @@
value: "{{ openshift.master.logout_url | default('') }}"
- key: features#inactivityTimeoutMinutes
value: "{{ openshift_web_console_inactivity_timeout_minutes | default(0) }}"
+ - key: features#clusterResourceOverridesEnabled
+ value: "{{ openshift_web_console_cluster_resource_overrides_enabled | default(false) }}"
- key: extensions#scriptURLs
value: "{{ openshift_web_console_extension_script_urls | default([]) }}"
- key: extensions#stylesheetURLs
diff --git a/roles/openshift_web_console/tasks/update_console_config.yml b/roles/openshift_web_console/tasks/update_console_config.yml
index 4d2957977..967222ea4 100644
--- a/roles/openshift_web_console/tasks/update_console_config.yml
+++ b/roles/openshift_web_console/tasks/update_console_config.yml
@@ -19,43 +19,49 @@
# value: "https://{{ openshift_logging_kibana_hostname }}"
# when: openshift_web_console_install | default(true) | bool
-- name: Read web console config map
+- name: Read the existing web console config map
oc_configmap:
namespace: openshift-web-console
name: webconsole-config
state: list
- register: webconsole_config
-
-- name: Make temp directory
- command: mktemp -d /tmp/console-ansible-XXXXXX
- register: mktemp_console
- changed_when: False
-
-- name: Copy web console config to temp file
- copy:
- content: "{{webconsole_config.results.results[0].data['webconsole-config.yaml']}}"
- dest: "{{ mktemp_console.stdout }}/webconsole-config.yaml"
-
-- name: Change web console config properties
- yedit:
- src: "{{ mktemp_console.stdout }}/webconsole-config.yaml"
- edits: "{{console_config_edits}}"
- separator: '#'
- state: present
-
-- name: Update web console config map
- oc_configmap:
- namespace: openshift-web-console
- name: webconsole-config
- state: present
- from_file:
- webconsole-config.yaml: "{{ mktemp_console.stdout }}/webconsole-config.yaml"
-
-- name: Remove temp directory
- file:
- state: absent
- name: "{{ mktemp_console.stdout }}"
- changed_when: False
-
-# TODO: Only rollout if config has changed.
-- include_tasks: rollout_console.yml
+ register: webconsole_config_map
+
+- set_fact:
+ existing_config_map_data: "{{ webconsole_config_map.results.results[0].data | default({}) }}"
+
+- when: existing_config_map_data['webconsole-config.yaml'] is defined
+ block:
+ - name: Make temp directory
+ command: mktemp -d /tmp/console-ansible-XXXXXX
+ register: mktemp_console
+ changed_when: False
+
+ - name: Copy the existing web console config to temp directory
+ copy:
+ content: "{{ existing_config_map_data['webconsole-config.yaml'] }}"
+ dest: "{{ mktemp_console.stdout }}/webconsole-config.yaml"
+
+ - name: Change web console config properties
+ yedit:
+ src: "{{ mktemp_console.stdout }}/webconsole-config.yaml"
+ edits: "{{console_config_edits}}"
+ separator: '#'
+ state: present
+
+ - name: Update web console config map
+ oc_configmap:
+ namespace: openshift-web-console
+ name: webconsole-config
+ state: present
+ from_file:
+ webconsole-config.yaml: "{{ mktemp_console.stdout }}/webconsole-config.yaml"
+ register: update_console_config_map
+
+ - name: Remove temp directory
+ file:
+ state: absent
+ name: "{{ mktemp_console.stdout }}"
+ changed_when: False
+
+ - include_tasks: rollout_console.yml
+ when: update_console_config_map.changed | bool
diff --git a/utils/src/ooinstall/cli_installer.py b/utils/src/ooinstall/cli_installer.py
index a85a43bd3..eb42721b5 100644
--- a/utils/src/ooinstall/cli_installer.py
+++ b/utils/src/ooinstall/cli_installer.py
@@ -820,7 +820,7 @@ http://docs.openshift.com/enterprise/latest/admin_guide/overview.html
click.echo(message)
-@click.group()
+@click.group(context_settings=dict(max_content_width=120))
@click.pass_context
@click.option('--unattended', '-u', is_flag=True, default=False)
@click.option('--configuration', '-c',
@@ -932,7 +932,7 @@ def uninstall(ctx):
openshift_ansible.run_uninstall_playbook(hosts, verbose)
-@click.command()
+@click.command(context_settings=dict(max_content_width=120))
@click.option('--latest-minor', '-l', is_flag=True, default=False)
@click.option('--next-major', '-n', is_flag=True, default=False)
@click.pass_context
diff --git a/utils/src/ooinstall/openshift_ansible.py b/utils/src/ooinstall/openshift_ansible.py
index 216664cd0..84a76fa53 100644
--- a/utils/src/ooinstall/openshift_ansible.py
+++ b/utils/src/ooinstall/openshift_ansible.py
@@ -122,7 +122,7 @@ def write_inventory_vars(base_inventory, lb):
if CFG.deployment.variables['ansible_ssh_user'] != 'root':
base_inventory.write('ansible_become=yes\n')
- base_inventory.write('openshift_override_hostname_check=true\n')
+ base_inventory.write('openshift_hostname_check=false\n')
if lb is not None:
base_inventory.write("openshift_master_cluster_hostname={}\n".format(lb.hostname))