summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.tito/packages/openshift-ansible2
-rw-r--r--bin/README_SHELL_COMPLETION37
-rwxr-xr-xbin/ohi147
-rw-r--r--bin/openshift_ansible.conf.example6
-rw-r--r--bin/openshift_ansible/__init__.py0
l---------bin/openshift_ansible/aws1
-rw-r--r--bin/openshift_ansible/awsutil.py268
l---------bin/openshift_ansible/multi_inventory.py1
-rw-r--r--bin/openshift_ansible/utils.py30
-rwxr-xr-xbin/opscp151
-rwxr-xr-xbin/opssh154
-rwxr-xr-xbin/oscp184
-rwxr-xr-xbin/ossh172
-rwxr-xr-xbin/ossh_bash_completion52
-rw-r--r--bin/ossh_zsh_completion31
-rw-r--r--bin/zsh_functions/_ossh49
-rw-r--r--filter_plugins/oo_filters.py5
-rwxr-xr-xgit/pylint.sh1
-rw-r--r--inventory/byo/hosts.aep.example2
-rw-r--r--inventory/byo/hosts.origin.example3
-rw-r--r--inventory/byo/hosts.ose.example3
-rw-r--r--openshift-ansible.spec5
-rw-r--r--playbooks/aws/openshift-cluster/config.yml20
-rw-r--r--playbooks/aws/openshift-cluster/tasks/launch_instances.yml1
-rw-r--r--playbooks/aws/openshift-cluster/templates/user_data.j25
-rw-r--r--playbooks/aws/openshift-cluster/update.yml21
-rw-r--r--playbooks/aws/openshift-cluster/vars.yml2
-rw-r--r--playbooks/common/openshift-cluster/config.yml2
-rw-r--r--playbooks/common/openshift-cluster/update_repos_and_packages.yml2
-rw-r--r--playbooks/common/openshift-loadbalancer/config.yml5
l---------playbooks/common/openshift-loadbalancer/filter_plugins1
l---------playbooks/common/openshift-loadbalancer/lookup_plugins1
l---------playbooks/common/openshift-loadbalancer/roles1
-rw-r--r--playbooks/common/openshift-loadbalancer/service.yml20
-rw-r--r--playbooks/common/openshift-master/config.yml143
-rw-r--r--playbooks/common/openshift-node/config.yml70
-rw-r--r--playbooks/gce/openshift-cluster/config.yml21
-rw-r--r--playbooks/gce/openshift-cluster/library/gce.py543
-rw-r--r--playbooks/gce/openshift-cluster/tasks/launch_instances.yml5
-rw-r--r--playbooks/gce/openshift-cluster/update.yml21
-rw-r--r--playbooks/libvirt/openshift-cluster/config.yml20
-rw-r--r--playbooks/libvirt/openshift-cluster/update.yml18
-rw-r--r--playbooks/openstack/openshift-cluster/config.yml19
-rw-r--r--playbooks/openstack/openshift-cluster/files/heat_stack.yaml8
-rw-r--r--playbooks/openstack/openshift-cluster/launch.yml2
-rw-r--r--playbooks/openstack/openshift-cluster/update.yml21
-rw-r--r--playbooks/openstack/openshift-cluster/vars.yml1
-rw-r--r--roles/haproxy/tasks/main.yml43
-rw-r--r--roles/openshift_ansible_inventory/README.md41
-rw-r--r--roles/openshift_ansible_inventory/defaults/main.yml4
-rw-r--r--roles/openshift_ansible_inventory/handlers/main.yml2
-rw-r--r--roles/openshift_ansible_inventory/meta/main.yml8
-rw-r--r--roles/openshift_ansible_inventory/tasks/main.yml47
-rw-r--r--roles/openshift_ansible_inventory/vars/main.yml2
-rw-r--r--roles/openshift_ca/README.md48
-rw-r--r--roles/openshift_ca/meta/main.yml (renamed from roles/openshift_master_ca/meta/main.yml)8
-rw-r--r--roles/openshift_ca/tasks/main.yml56
-rw-r--r--roles/openshift_ca/vars/main.yml6
-rw-r--r--roles/openshift_clock/meta/main.yml15
-rw-r--r--roles/openshift_clock/tasks/main.yaml14
-rw-r--r--roles/openshift_etcd/meta/main.yml1
-rwxr-xr-xroles/openshift_facts/library/openshift_facts.py19
-rw-r--r--roles/openshift_loadbalancer/README.md (renamed from roles/haproxy/README.md)4
-rw-r--r--roles/openshift_loadbalancer/defaults/main.yml (renamed from roles/haproxy/defaults/main.yml)0
-rw-r--r--roles/openshift_loadbalancer/handlers/main.yml (renamed from roles/haproxy/handlers/main.yml)0
-rw-r--r--roles/openshift_loadbalancer/meta/main.yml (renamed from roles/haproxy/meta/main.yml)7
-rw-r--r--roles/openshift_loadbalancer/tasks/main.yml73
-rw-r--r--roles/openshift_loadbalancer/templates/haproxy.cfg.j2 (renamed from roles/haproxy/templates/haproxy.cfg.j2)8
-rw-r--r--roles/openshift_master/meta/main.yml2
-rw-r--r--roles/openshift_master_ca/README.md34
-rw-r--r--roles/openshift_master_certificates/README.md29
-rw-r--r--roles/openshift_master_certificates/meta/main.yml6
-rw-r--r--roles/openshift_master_certificates/tasks/main.yml123
-rw-r--r--roles/openshift_master_certificates/vars/main.yml2
-rw-r--r--roles/openshift_node/meta/main.yml3
-rw-r--r--roles/openshift_node_certificates/README.md33
-rw-r--r--roles/openshift_node_certificates/meta/main.yml6
-rw-r--r--roles/openshift_node_certificates/tasks/main.yml97
-rw-r--r--roles/openshift_node_certificates/vars/main.yml9
-rwxr-xr-xroles/openshift_node_dnsmasq/files/networkmanager/99-origin-dns.sh5
-rw-r--r--test/env-setup8
-rw-r--r--test/units/README.md7
-rwxr-xr-xtest/units/multi_inventory_test.py114
-rwxr-xr-xtest/units/yedit_test.py143
84 files changed, 1255 insertions, 2049 deletions
diff --git a/.tito/packages/openshift-ansible b/.tito/packages/openshift-ansible
index 137f6636b..9e57340b4 100644
--- a/.tito/packages/openshift-ansible
+++ b/.tito/packages/openshift-ansible
@@ -1 +1 @@
-3.0.93-1 ./
+3.0.94-1 ./
diff --git a/bin/README_SHELL_COMPLETION b/bin/README_SHELL_COMPLETION
deleted file mode 100644
index 49bba3acc..000000000
--- a/bin/README_SHELL_COMPLETION
+++ /dev/null
@@ -1,37 +0,0 @@
-# completion is available for ossh/oscp
-
-ossh/oscp uses a dynamic inventory cache in order to lookup
-hostnames and translate them to something meaningful
-such as an IP address or dns name.
-
-This allows us to treat our servers as cattle and not as pets.
-
-If you have not run the ossh command and it has not laid down
-a cache file the completions will not be available.
-
-You can populate the cache by running `ossh --list`. This
-will populate the cache file and the completions should
-become available.
-
-This script will look at the cached version of your
-multi_inventory results in ~/.ansible/tmp/multi_inventory.cache.
-It will then parse a few {host}.{env} out of the json
-and return them to be completable.
-
-# BASH
-In order to setup bash completion, source the following script:
-/path/to/repository/openshift-ansible/bin/ossh_bash_completion
-
-# ZSH
-In order to setup zsh completion, you will need to verify
-that the _ossh_zsh_completion script is somewhere in the path
-of $fpath.
-
-Once $fpath includes the _ossh_zsh_completion script then you should
-run `exec zsh`. This will then allow you to call `ossh host[TAB]`
-for a list of completions.
-
-Before completing the final step, zsh keeps its own cache in
-~/.zcompdump of the known functions and variables. In order to
-refresh with new variables and completion arrays you might need
-to `rm ~/.zcompdump` before running `exec zsh`.
diff --git a/bin/ohi b/bin/ohi
deleted file mode 100755
index 9c2ce8432..000000000
--- a/bin/ohi
+++ /dev/null
@@ -1,147 +0,0 @@
-#!/usr/bin/env python
-'''
-Ohi = Openshift Host Inventory
-
-This script provides an easy way to look at your host inventory.
-
-This depends on multi_inventory being setup correctly.
-'''
-# vim: expandtab:tabstop=4:shiftwidth=4
-
-import argparse
-import sys
-import os
-import ConfigParser
-
-from openshift_ansible import awsutil
-from openshift_ansible import utils
-from openshift_ansible.awsutil import ArgumentError
-
-CONFIG_MAIN_SECTION = 'main'
-CONFIG_HOST_TYPE_ALIAS_SECTION = 'host_type_aliases'
-
-
-class Ohi(object):
- '''
- Class for managing openshift host inventory
- '''
- def __init__(self):
- self.host_type_aliases = {}
- self.file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)))
-
- # Default the config path to /etc
- self.config_path = os.path.join(os.path.sep, 'etc', \
- 'openshift_ansible', \
- 'openshift_ansible.conf')
-
- self.args = None
- self.parse_cli_args()
- self.parse_config_file()
-
- self.aws = awsutil.AwsUtil(self.host_type_aliases)
-
- def run(self):
- '''
- Call into awsutil and retrieve the desired hosts and environments
- '''
-
- if self.args.list_host_types:
- self.aws.print_host_types()
- return 0
-
- if self.args.v3:
- version = '3'
- elif self.args.all_versions:
- version = 'all'
- else:
- version = '2'
-
- hosts = self.aws.get_host_list(clusters=self.args.cluster,
- host_type=self.args.host_type,
- sub_host_type=self.args.sub_host_type,
- envs=self.args.env,
- version=version,
- cached=self.args.cache_only)
-
- if hosts is None:
- # We weren't able to determine what they wanted to do
- raise ArgumentError("Invalid combination of arguments")
-
- if self.args.ip:
- hosts = self.aws.convert_to_ip(hosts)
-
- for host in sorted(hosts, key=utils.normalize_dnsname):
- if self.args.user:
- print "%s@%s" % (self.args.user, host)
- else:
- print host
-
- return 0
-
- def parse_config_file(self):
- '''
- Parse the config file for ohi
- '''
- if os.path.isfile(self.config_path):
- config = ConfigParser.ConfigParser()
- config.read(self.config_path)
-
- self.host_type_aliases = {}
- if config.has_section(CONFIG_HOST_TYPE_ALIAS_SECTION):
- for alias in config.options(CONFIG_HOST_TYPE_ALIAS_SECTION):
- value = config.get(CONFIG_HOST_TYPE_ALIAS_SECTION, alias).split(',')
- self.host_type_aliases[alias] = value
-
- def parse_cli_args(self):
- """Setup the command line parser with the options we want
- """
-
- parser = argparse.ArgumentParser(description='OpenShift Host Inventory')
-
- parser.add_argument('--list-host-types', default=False, action='store_true', help='List all of the host types')
- parser.add_argument('--list', default=False, action='store_true', help='List all hosts')
-
- parser.add_argument('-c', '--cluster', action="append", help="Which clusterid to use")
- parser.add_argument('-e', '--env', action="append", help="Which environment to use")
-
- parser.add_argument('-t', '--host-type', action="store", help="Which host type to use")
-
- parser.add_argument('-s', '--sub-host-type', action="store", help="Which sub host type to use")
-
- parser.add_argument('-l', '--user', action='store', default=None, help='username')
-
- parser.add_argument('--cache-only', action='store_true', default=False,
- help='Retrieve the host inventory by cache only. Default is false.')
-
- parser.add_argument('--v2', action='store_true', default=True,
- help='Specify the openshift version. Default is 2')
-
- parser.add_argument('--v3', action='store_true', default=False,
- help='Specify the openshift version.')
-
- parser.add_argument('--ip', action='store_true', default=False,
- help='Return ip address only.')
-
- parser.add_argument('--all-versions', action='store_true', default=False,
- help='Specify the openshift version. Return all versions')
-
- self.args = parser.parse_args()
-
-def main():
- '''
- Ohi will do its work here
- '''
- if len(sys.argv) == 1:
- print "\nError: No options given. Use --help to see the available options\n"
- sys.exit(0)
-
- try:
- ohi = Ohi()
- exitcode = ohi.run()
- sys.exit(exitcode)
- except ArgumentError as err:
- print "\nError: %s\n" % err.message
-
-if __name__ == '__main__':
- main()
-
diff --git a/bin/openshift_ansible.conf.example b/bin/openshift_ansible.conf.example
deleted file mode 100644
index 8786dfc13..000000000
--- a/bin/openshift_ansible.conf.example
+++ /dev/null
@@ -1,6 +0,0 @@
-#[main]
-#inventory = /usr/share/ansible/inventory/multi_inventory.py
-
-#[host_type_aliases]
-#host-type-one = aliasa,aliasb
-#host-type-two = aliasfortwo
diff --git a/bin/openshift_ansible/__init__.py b/bin/openshift_ansible/__init__.py
deleted file mode 100644
index e69de29bb..000000000
--- a/bin/openshift_ansible/__init__.py
+++ /dev/null
diff --git a/bin/openshift_ansible/aws b/bin/openshift_ansible/aws
deleted file mode 120000
index eb0575b4d..000000000
--- a/bin/openshift_ansible/aws
+++ /dev/null
@@ -1 +0,0 @@
-../../inventory/aws/ \ No newline at end of file
diff --git a/bin/openshift_ansible/awsutil.py b/bin/openshift_ansible/awsutil.py
deleted file mode 100644
index 11651f087..000000000
--- a/bin/openshift_ansible/awsutil.py
+++ /dev/null
@@ -1,268 +0,0 @@
-# vim: expandtab:tabstop=4:shiftwidth=4
-
-"""This module comprises Aws specific utility functions."""
-
-import os
-import re
-
-# Buildbot does not have multi_inventory installed
-#pylint: disable=no-name-in-module
-from openshift_ansible import multi_inventory
-
-class ArgumentError(Exception):
- """This class is raised when improper arguments are passed."""
-
- def __init__(self, message):
- """Initialize an ArgumentError.
-
- Keyword arguments:
- message -- the exact error message being raised
- """
- super(ArgumentError, self).__init__()
- self.message = message
-
-class AwsUtil(object):
- """This class contains the AWS utility functions."""
-
- def __init__(self, host_type_aliases=None):
- """Initialize the AWS utility class.
-
- Keyword arguments:
- host_type_aliases -- a list of aliases to common host-types (e.g. ex-node)
- """
-
- self.alias_lookup = {}
- host_type_aliases = host_type_aliases or {}
-
- self.host_type_aliases = host_type_aliases
- self.file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)))
-
- self.setup_host_type_alias_lookup()
-
- def setup_host_type_alias_lookup(self):
- """Sets up the alias to host-type lookup table."""
- for key, values in self.host_type_aliases.iteritems():
- for value in values:
- self.alias_lookup[value] = key
-
- @staticmethod
- def get_inventory(args=None, cached=False):
- """Calls the inventory script and returns a dictionary containing the inventory."
-
- Keyword arguments:
- args -- optional arguments to pass to the inventory script
- """
- minv = multi_inventory.MultiInventory(args)
- if cached:
- minv.get_inventory_from_cache()
- else:
- minv.run()
- return minv.result
-
- def get_clusters(self):
- """Searches for cluster tags in the inventory and returns all of the clusters found."""
- pattern = re.compile(r'^oo_clusterid_(.*)')
-
- clusters = []
- inv = self.get_inventory()
- for key in inv.keys():
- matched = pattern.match(key)
- if matched:
- clusters.append(matched.group(1))
-
- clusters.sort()
- return clusters
-
- def get_environments(self):
- """Searches for env tags in the inventory and returns all of the envs found."""
- pattern = re.compile(r'^oo_environment_(.*)')
-
- envs = []
- inv = self.get_inventory()
- for key in inv.keys():
- matched = pattern.match(key)
- if matched:
- envs.append(matched.group(1))
-
- envs.sort()
- return envs
-
- def get_host_types(self):
- """Searches for host-type tags in the inventory and returns all host-types found."""
- pattern = re.compile(r'^oo_hosttype_(.*)')
-
- host_types = []
- inv = self.get_inventory()
- for key in inv.keys():
- matched = pattern.match(key)
- if matched:
- host_types.append(matched.group(1))
-
- host_types.sort()
- return host_types
-
- def get_sub_host_types(self):
- """Searches for sub-host-type tags in the inventory and returns all sub-host-types found."""
- pattern = re.compile(r'^oo_subhosttype_(.*)')
-
- sub_host_types = []
- inv = self.get_inventory()
- for key in inv.keys():
- matched = pattern.match(key)
- if matched:
- sub_host_types.append(matched.group(1))
-
- sub_host_types.sort()
- return sub_host_types
-
- def get_security_groups(self):
- """Searches for security_groups in the inventory and returns all SGs found."""
- pattern = re.compile(r'^security_group_(.*)')
-
- groups = []
- inv = self.get_inventory()
- for key in inv.keys():
- matched = pattern.match(key)
- if matched:
- groups.append(matched.group(1))
-
- groups.sort()
- return groups
-
- def build_host_dict_by_env(self, args=None):
- """Searches the inventory for hosts in an env and returns their hostvars."""
- args = args or []
- inv = self.get_inventory(args)
-
- inst_by_env = {}
- for _, host in inv['_meta']['hostvars'].items():
- # If you don't have an environment tag, we're going to ignore you
- if 'oo_environment' not in host:
- continue
-
- if host['oo_environment'] not in inst_by_env:
- inst_by_env[host['oo_environment']] = {}
- host_id = "%s:%s" % (host['oo_name'], host['oo_id'])
- inst_by_env[host['oo_environment']][host_id] = host
-
- return inst_by_env
-
- def print_host_types(self):
- """Gets the list of host types and aliases and outputs them in columns."""
- host_types = self.get_host_types()
- ht_format_str = "%35s"
- alias_format_str = "%-20s"
- combined_format_str = ht_format_str + " " + alias_format_str
-
- print
- print combined_format_str % ('Host Types', 'Aliases')
- print combined_format_str % ('----------', '-------')
-
- for host_type in host_types:
- aliases = []
- if host_type in self.host_type_aliases:
- aliases = self.host_type_aliases[host_type]
- print combined_format_str % (host_type, ", ".join(aliases))
- else:
- print ht_format_str % host_type
- print
-
- def resolve_host_type(self, host_type):
- """Converts a host-type alias into a host-type.
-
- Keyword arguments:
- host_type -- The alias or host_type to look up.
-
- Example (depends on aliases defined in config file):
- host_type = ex-node
- returns: openshift-node
- """
- if self.alias_lookup.has_key(host_type):
- return self.alias_lookup[host_type]
- return host_type
-
- @staticmethod
- def gen_version_tag(ver):
- """Generate the version tag
- """
- return "oo_version_%s" % ver
-
- @staticmethod
- def gen_clusterid_tag(clu):
- """Generate the clusterid tag
- """
- return "oo_clusterid_%s" % clu
-
- @staticmethod
- def gen_env_tag(env):
- """Generate the environment tag
- """
- return "oo_environment_%s" % env
-
- def gen_host_type_tag(self, host_type, version):
- """Generate the host type tag
- """
- if version == '2':
- host_type = self.resolve_host_type(host_type)
- return "oo_hosttype_%s" % host_type
-
- @staticmethod
- def gen_sub_host_type_tag(sub_host_type):
- """Generate the host type tag
- """
- return "oo_subhosttype_%s" % sub_host_type
-
- # This function uses all of these params to perform a filters on our host inventory.
- # pylint: disable=too-many-arguments
- def get_host_list(self, clusters=None, host_type=None, sub_host_type=None, envs=None, version=None, cached=False):
- """Get the list of hosts from the inventory using host-type and environment
- """
- retval = set([])
- envs = envs or []
-
- inv = self.get_inventory(cached=cached)
-
- retval.update(inv.get('all_hosts', []))
-
- if clusters:
- cluster_hosts = set([])
- if len(clusters) > 1:
- for cluster in clusters:
- clu_tag = AwsUtil.gen_clusterid_tag(cluster)
- cluster_hosts.update(inv.get(clu_tag, []))
- else:
- cluster_hosts.update(inv.get(AwsUtil.gen_clusterid_tag(clusters[0]), []))
-
- retval.intersection_update(cluster_hosts)
-
- if envs:
- env_hosts = set([])
- if len(envs) > 1:
- for env in envs:
- env_tag = AwsUtil.gen_env_tag(env)
- env_hosts.update(inv.get(env_tag, []))
- else:
- env_hosts.update(inv.get(AwsUtil.gen_env_tag(envs[0]), []))
-
- retval.intersection_update(env_hosts)
-
- if host_type:
- retval.intersection_update(inv.get(self.gen_host_type_tag(host_type, version), []))
-
- if sub_host_type:
- retval.intersection_update(inv.get(self.gen_sub_host_type_tag(sub_host_type), []))
-
- if version != 'all':
- retval.intersection_update(inv.get(AwsUtil.gen_version_tag(version), []))
-
- return list(retval)
-
- def convert_to_ip(self, hosts, cached=False):
- """convert a list of host names to ip addresses"""
-
- inv = self.get_inventory(cached=cached)
- ips = []
- for host in hosts:
- ips.append(inv['_meta']['hostvars'][host]['oo_public_ip'])
-
- return ips
diff --git a/bin/openshift_ansible/multi_inventory.py b/bin/openshift_ansible/multi_inventory.py
deleted file mode 120000
index b40feec07..000000000
--- a/bin/openshift_ansible/multi_inventory.py
+++ /dev/null
@@ -1 +0,0 @@
-../../inventory/multi_inventory.py \ No newline at end of file
diff --git a/bin/openshift_ansible/utils.py b/bin/openshift_ansible/utils.py
deleted file mode 100644
index e6243aa5a..000000000
--- a/bin/openshift_ansible/utils.py
+++ /dev/null
@@ -1,30 +0,0 @@
-#!/usr/bin/env python
-# vim: expandtab:tabstop=4:shiftwidth=4
-
-''' The purpose of this module is to contain small utility functions.
-'''
-
-import re
-
-def normalize_dnsname(name, padding=10):
- ''' The purpose of this function is to return a dns name with zero padding,
- so that it sorts properly (as a human would expect).
-
- Example: name=ex-lrg-node10.prod.rhcloud.com
- Returns: ex-lrg-node0000000010.prod.rhcloud.com
-
- Example Usage:
- sorted(['a3.example.com', 'a10.example.com', 'a1.example.com'],
- key=normalize_dnsname)
-
- Returns: ['a1.example.com', 'a3.example.com', 'a10.example.com']
- '''
- parts = re.split(r'(\d+)', name)
- retval = []
- for part in parts:
- if re.match(r'^\d+$', part):
- retval.append(part.zfill(padding))
- else:
- retval.append(part)
-
- return ''.join(retval)
diff --git a/bin/opscp b/bin/opscp
deleted file mode 100755
index 4bfe166f6..000000000
--- a/bin/opscp
+++ /dev/null
@@ -1,151 +0,0 @@
-#!/bin/bash
-# vim: expandtab:tabstop=4:shiftwidth=4
-
-
-function usage() {
- cat << EOF
-Usage: opscp [OPTIONS] local remote
-
-Options:
- --version show program's version number and exit
- --help show this help message and exit
- -l USER, --user=USER username (OPTIONAL)
- -p PAR, --par=PAR max number of parallel threads (OPTIONAL)
- --outdir=OUTDIR output directory for stdout files (OPTIONAL)
- --errdir=ERRDIR output directory for stderr files (OPTIONAL)
- -c CLUSTER, --cluster CLUSTER
- which cluster to use
- -e ENV, --env ENV which environment to use
- --v3 When working with v3 environments. v2 by default
- -t HOST_TYPE, --host-type HOST_TYPE
- which host type to use
- --list-host-types list all of the host types
- --timeout=TIMEOUT timeout (secs) (0 = no timeout) per host (OPTIONAL)
- -O OPTION, --option=OPTION
- SSH option (OPTIONAL)
- -v, --verbose turn on warning and diagnostic messages (OPTIONAL)
- -A, --askpass Ask for a password (OPTIONAL)
- -x ARGS, --extra-args=ARGS
- Extra command-line arguments, with processing for
- spaces, quotes, and backslashes
- -X ARG, --extra-arg=ARG
- Extra command-line argument
- -r, --recursive recusively copy directories (OPTIONAL)
-
-Example: opscp -t ex-srv -e stg -l irb2 foo.txt /home/irb2/foo.txt
-
-EOF
-}
-
-if [ $# -eq 0 ] || [ "$1" == "--help" ]
-then
- usage
- exit 1
-fi
-
-# See if ohi is installed
-if ! which ohi &>/dev/null ; then
- echo "ERROR: can't find ohi (OpenShift Host Inventory) on your system, please either install the openshift-ansible-bin package, or add openshift-ansible/bin to your path."
-
- exit 10
-fi
-
-PAR=200
-USER=root
-TIMEOUT=0
-ENV=""
-HOST_TYPE=""
-
-while [ $# -gt 0 ] ; do
- case $1 in
- -t|--host-type)
- shift # get past the option
- HOST_TYPE=$1
- shift # get past the value of the option
- ;;
-
- -c)
- shift # get past the option
- CLUSTER=$1
- shift # get past the value of the option
- ;;
-
- -e)
- shift # get past the option
- ENV=$1
- shift # get past the value of the option
- ;;
-
- --v3)
- OPENSHIFT_VERSION="--v3 --ip"
- shift # get past the value of the option
- ;;
-
- --timeout)
- shift # get past the option
- TIMEOUT=$1
- shift # get past the value of the option
- ;;
-
- -p|--par)
- shift # get past the option
- PAR=$1
- shift # get past the value of the option
- ;;
-
- -l|--user)
- shift # get past the option
- USER=$1
- shift # get past the value of the option
- ;;
-
- --list-host-types)
- ohi --list-host-types
- exit 0
- ;;
-
- -h|--hosts|-H|--host|-o)
- echo "ERROR: unknown option $1"
- exit 20
- ;;
-
- *)
- args+=("$1")
- shift
- ;;
- esac
-done
-
-# Get host list from ohi
-CMD=""
-if [ -n "$CLUSTER" ] ; then
- CMD="$CMD -c $CLUSTER"
-fi
-
-if [ -n "$ENV" ] ; then
- CMD="$CMD -e $ENV"
-fi
-
-if [ -n "$HOST_TYPE" ] ; then
- CMD="$CMD -t $HOST_TYPE"
-fi
-
-if [ -n "$OPENSHIFT_VERSION" ] ; then
- CMD="$CMD $OPENSHIFT_VERSION"
-fi
-
-if [ -n "$CMD" ] ; then
- HOSTS="$(ohi $CMD 2>/dev/null)"
- OHI_ECODE=$?
-fi
-
-if [ $OHI_ECODE -ne 0 ] ; then
- echo
- echo "ERROR: ohi failed with exit code $OHI_ECODE"
- echo
- echo "This is usually caused by a bad value passed for host-type or environment."
- echo
- exit 25
-fi
-
-exec pscp.pssh -t $TIMEOUT -p $PAR -l $USER -h <(echo "$HOSTS") "${args[@]}"
diff --git a/bin/opssh b/bin/opssh
deleted file mode 100755
index 0113e7216..000000000
--- a/bin/opssh
+++ /dev/null
@@ -1,154 +0,0 @@
-#!/bin/bash
-# vim: expandtab:tabstop=4:shiftwidth=4
-
-
-function usage() {
- cat << EOF
-Usage: opssh [OPTIONS] command [...]
-
-Options:
- --version show program's version number and exit
- --help show this help message and exit
- -l USER, --user=USER username (OPTIONAL)
- -p PAR, --par=PAR max number of parallel threads (OPTIONAL)
- --outdir=OUTDIR output directory for stdout files (OPTIONAL)
- --errdir=ERRDIR output directory for stderr files (OPTIONAL)
- -c CLUSTER, --cluster CLUSTER
- which cluster to use
- -e ENV, --env ENV which environment to use
- --v3 When working with v3 environments. v2 by default
- -t HOST_TYPE, --host-type HOST_TYPE
- which host type to use
- --list-host-types list all of the host types
- --timeout=TIMEOUT timeout (secs) (0 = no timeout) per host (OPTIONAL)
- -O OPTION, --option=OPTION
- SSH option (OPTIONAL)
- -v, --verbose turn on warning and diagnostic messages (OPTIONAL)
- -A, --askpass Ask for a password (OPTIONAL)
- -x ARGS, --extra-args=ARGS
- Extra command-line arguments, with processing for
- spaces, quotes, and backslashes
- -X ARG, --extra-arg=ARG
- Extra command-line argument
- -i, --inline inline aggregated output and error for each server
- --inline-stdout inline standard output for each server
- -I, --send-input read from standard input and send as input to ssh
- -P, --print print output as we get it
-
-Example: opssh -t ex-srv -e stg -l irb2 --outdir /tmp/foo uptime
-
-EOF
-}
-
-if [ $# -eq 0 ] || [ "$1" == "--help" ]
-then
- usage
- exit 1
-fi
-
-# See if ohi is installed
-if ! which ohi &>/dev/null ; then
- echo "ERROR: can't find ohi (OpenShift Host Inventory) on your system, please either install the openshift-ansible-bin package, or add openshift-ansible/bin to your path."
-
- exit 10
-fi
-
-PAR=200
-USER=root
-TIMEOUT=0
-ENV=""
-HOST_TYPE=""
-
-while [ $# -gt 0 ] ; do
- case $1 in
- -t|--host-type)
- shift # get past the option
- HOST_TYPE=$1
- shift # get past the value of the option
- ;;
-
- -c)
- shift # get past the option
- CLUSTER=$1
- shift # get past the value of the option
- ;;
-
- -e)
- shift # get past the option
- ENV=$1
- shift # get past the value of the option
- ;;
-
- --v3)
- OPENSHIFT_VERSION="--v3 --ip"
- shift # get past the value of the option
- ;;
-
- --timeout)
- shift # get past the option
- TIMEOUT=$1
- shift # get past the value of the option
- ;;
-
- -p|--par)
- shift # get past the option
- PAR=$1
- shift # get past the value of the option
- ;;
-
- -l|--user)
- shift # get past the option
- USER=$1
- shift # get past the value of the option
- ;;
-
- --list-host-types)
- ohi --list-host-types
- exit 0
- ;;
-
- -h|--hosts|-H|--host|-o)
- echo "ERROR: unknown option $1"
- exit 20
- ;;
-
- *)
- args+=("$1")
- shift
- ;;
- esac
-done
-
-# Get host list from ohi
-CMD=""
-if [ -n "$CLUSTER" ] ; then
- CMD="$CMD -c $CLUSTER"
-fi
-
-if [ -n "$ENV" ] ; then
- CMD="$CMD -e $ENV"
-fi
-
-if [ -n "$HOST_TYPE" ] ; then
- CMD="$CMD -t $HOST_TYPE"
-fi
-
-if [ -n "$OPENSHIFT_VERSION" ] ; then
- CMD="$CMD $OPENSHIFT_VERSION"
-fi
-
-if [ -n "$CMD" ] ; then
- HOSTS="$(ohi $CMD 2>/dev/null)"
- OHI_ECODE=$?
-fi
-
-if [ $OHI_ECODE -ne 0 ] ; then
- echo
- echo "ERROR: ohi failed with exit code $OHI_ECODE"
- echo
- echo "This is usually caused by a bad value passed for host-type or environment."
- echo
- exit 25
-fi
-
-exec pssh -t $TIMEOUT -p $PAR -l $USER -h <(echo "$HOSTS") "${args[@]}"
diff --git a/bin/oscp b/bin/oscp
deleted file mode 100755
index 4d3286ed8..000000000
--- a/bin/oscp
+++ /dev/null
@@ -1,184 +0,0 @@
-#!/usr/bin/env python2
-# vim: expandtab:tabstop=4:shiftwidth=4
-
-import argparse
-import traceback
-import sys
-import os
-import re
-import ConfigParser
-
-from openshift_ansible import awsutil
-
-CONFIG_MAIN_SECTION = 'main'
-
-class Oscp(object):
- def __init__(self):
- self.host = None
- self.user = ''
- self.file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)))
-
- # Default the config path to /etc
- self.config_path = os.path.join(os.path.sep, 'etc', \
- 'openshift_ansible', \
- 'openshift_ansible.conf')
-
- self.parse_cli_args()
- self.parse_config_file()
-
- # parse host and user
- self.process_host()
-
- self.aws = awsutil.AwsUtil()
-
- # get a dict of host inventory
- if self.args.refresh_cache:
- self.get_hosts(True)
- else:
- self.get_hosts()
-
- if (self.args.src == '' or self.args.dest == '') and not self.args.list:
- self.parser.print_help()
- return
-
- if self.args.debug:
- print self.host
- print self.args
-
- # perform the scp
- if self.args.list:
- self.list_hosts()
- else:
- self.scp()
-
- def parse_config_file(self):
- if os.path.isfile(self.config_path):
- config = ConfigParser.ConfigParser()
- config.read(self.config_path)
-
- def parse_cli_args(self):
- parser = argparse.ArgumentParser(description='OpenShift Online SSH Tool.')
- parser.add_argument('-d', '--debug', default=False,
- action="store_true", help="debug mode")
- parser.add_argument('-v', '--verbose', default=False,
- action="store_true", help="Verbose?")
- parser.add_argument('--refresh-cache', default=False,
- action="store_true", help="Force a refresh on the host cache.")
- parser.add_argument('--list', default=False,
- action="store_true", help="list out hosts")
- parser.add_argument('-r', '--recurse', action='store_true', default=False,
- help='Recursively copy files to or from destination.')
- parser.add_argument('-o', '--ssh_opts', action='store',
- help='options to pass to SSH.\n \
- "-oPort=22,TCPKeepAlive=yes"')
-
- parser.add_argument('src', nargs='?', default='')
- parser.add_argument('dest',nargs='?', default='')
-
- self.args = parser.parse_args()
- self.parser = parser
-
-
- def process_host(self):
- '''Determine host name and user name for SSH.
- '''
- # is the first param passed a valid file?
- if os.path.isfile(self.args.src) or os.path.isdir(self.args.src):
- self.local_src = True
- self.host = self.args.dest
- else:
- self.local_src = False
- self.host = self.args.src
-
- if '@' in self.host:
- re_host = re.compile("(.*@)(.*)(:.*$)")
- else:
- re_host = re.compile("(.*)(:.*$)")
-
- search = re_host.search(self.host)
-
- if search:
- if len(search.groups()) > 2:
- self.user = search.groups()[0]
- self.host = search.groups()[1]
- self.path = search.groups()[2]
- else:
- self.host = search.groups()[0]
- self.path = search.groups()[1]
-
- def get_hosts(self, refresh_cache=False):
- '''Query our host inventory and return a dict where the format '''
- if refresh_cache:
- self.host_inventory = self.aws.get_inventory(['--refresh-cache'])['_meta']['hostvars']
- else:
- self.host_inventory = self.aws.get_inventory()['_meta']['hostvars']
-
- def select_host(self):
- '''select host attempts to match the host specified
- on the command line with a list of hosts.
- '''
- results = None
- if self.host_inventory.has_key(self.host):
- results = (self.host, self.host_inventory[self.host])
- else:
- print "Could not find specified host: %s." % self.host
-
- # default - no results found.
- return results
-
- def list_hosts(self, limit=None):
- '''Function to print out the host inventory.
-
- Takes a single parameter to limit the number of hosts printed.
- '''
- for host_id, server_info in self.host_inventory.items():
- print '{oo_name:<35} {oo_clusterid:<10} {oo_environment:<8} ' \
- '{oo_id:<15} {oo_public_ip:<18} {oo_private_ip:<18}'.format(**server_info)
-
- def scp(self):
- '''scp files to or from a specified host
- '''
- try:
- # shell args start with the program name in position 1
- scp_args = ['/usr/bin/scp']
-
- if self.args.verbose:
- scp_args.append('-v')
-
- if self.args.recurse:
- scp_args.append('-r')
-
- if self.args.ssh_opts:
- for arg in self.args.ssh_opts.split(","):
- scp_args.append("-o%s" % arg)
-
- results = self.select_host()
-
- if self.args.debug: print results
-
- if not results:
- return # early exit, no results
-
- # Assume we have one and only one.
- server_info = results[1]
-
- host_str = "%s%s%s" % (self.user, server_info['oo_public_ip'], self.path)
-
- if self.local_src:
- scp_args.append(self.args.src)
- scp_args.append(host_str)
- else:
- scp_args.append(host_str)
- scp_args.append(self.args.dest)
-
- print "Running: %s\n" % ' '.join(scp_args)
-
- os.execve('/usr/bin/scp', scp_args, os.environ)
- except:
- print traceback.print_exc()
- print sys.exc_info()
-
-
-if __name__ == '__main__':
- oscp = Oscp()
-
diff --git a/bin/ossh b/bin/ossh
deleted file mode 100755
index 0dd2fb741..000000000
--- a/bin/ossh
+++ /dev/null
@@ -1,172 +0,0 @@
-#!/usr/bin/env python2
-# vim: expandtab:tabstop=4:shiftwidth=4
-
-import argparse
-import traceback
-import sys
-import os
-import re
-import ConfigParser
-
-from openshift_ansible import awsutil
-
-CONFIG_MAIN_SECTION = 'main'
-
-class Ossh(object):
- def __init__(self):
- self.user = None
- self.host = None
- self.file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)))
-
- # Default the config path to /etc
- self.config_path = os.path.join(os.path.sep, 'etc', \
- 'openshift_ansible', \
- 'openshift_ansible.conf')
-
- self.parse_cli_args()
- self.parse_config_file()
-
- self.aws = awsutil.AwsUtil()
-
- if self.args.refresh_cache:
- self.get_hosts(True)
- else:
- self.get_hosts()
-
- # parse host and user
- self.process_host()
-
- if self.args.host == '' and not self.args.list:
- self.parser.print_help()
- return
-
- if self.args.debug:
- print self.args
-
- # perform the SSH
- if self.args.list:
- self.list_hosts()
- else:
- self.ssh()
-
- def parse_config_file(self):
- if os.path.isfile(self.config_path):
- config = ConfigParser.ConfigParser()
- config.read(self.config_path)
-
- def parse_cli_args(self):
- parser = argparse.ArgumentParser(description='OpenShift Online SSH Tool.')
- parser.add_argument('-d', '--debug', default=False,
- action="store_true", help="debug mode")
- parser.add_argument('-v', '--verbose', default=False,
- action="store_true", help="Verbose?")
- parser.add_argument('--refresh-cache', default=False,
- action="store_true", help="Force a refresh on the host cache.")
- parser.add_argument('--list', default=False,
- action="store_true", help="list out hosts")
- parser.add_argument('-c', '--command', action='store',
- help='Command to run on remote host')
- parser.add_argument('-l', '--login_name', action='store',
- help='User in which to ssh as')
-
- parser.add_argument('-o', '--ssh_opts', action='store',
- help='options to pass to SSH.\n \
- "-oForwardX11=yes,TCPKeepAlive=yes"')
- parser.add_argument('-A', default=False, action="store_true",
- help='Forward authentication agent')
- parser.add_argument('host', nargs='?', default='')
-
- self.args = parser.parse_args()
- self.parser = parser
-
-
- def process_host(self):
- '''Determine host name and user name for SSH.
- '''
-
- parts = self.args.host.split('@')
-
- # parse username if passed
- if len(parts) > 1:
- self.user = parts[0]
- self.host = parts[1]
- else:
- self.host = parts[0]
-
- if self.args.login_name:
- self.user = self.args.login_name
-
-
- def get_hosts(self, refresh_cache=False):
- '''Query our host inventory and return a dict where the format '''
- if refresh_cache:
- self.host_inventory = self.aws.get_inventory(['--refresh-cache'])['_meta']['hostvars']
- else:
- self.host_inventory = self.aws.get_inventory()['_meta']['hostvars']
-
- def select_host(self):
- '''select host attempts to match the host specified
- on the command line with a list of hosts.
- '''
- results = None
- if self.host_inventory.has_key(self.host):
- results = (self.host, self.host_inventory[self.host])
- else:
- print "Could not find specified host: %s." % self.host
-
- # default - no results found.
- return results
-
- def list_hosts(self, limit=None):
- '''Function to print out the host inventory.
-
- Takes a single parameter to limit the number of hosts printed.
- '''
- for host_id, server_info in self.host_inventory.items():
- print '{oo_name:<35} {oo_clusterid:<10} {oo_environment:<8} ' \
- '{oo_id:<15} {oo_public_ip:<18} {oo_private_ip:<18}'.format(**server_info)
-
- def ssh(self):
- '''SSH to a specified host
- '''
- try:
- # shell args start with the program name in position 1
- ssh_args = ['/usr/bin/ssh']
-
- if self.user:
- ssh_args.append('-l%s' % self.user)
-
- if self.args.A:
- ssh_args.append('-A')
-
- if self.args.verbose:
- ssh_args.append('-vvv')
-
- if self.args.ssh_opts:
- for arg in self.args.ssh_opts.split(","):
- ssh_args.append("-o%s" % arg)
-
- results = self.select_host()
- if not results:
- return # early exit, no results
-
- # Assume we have one and only one.
- server_info = results[1]
-
- ssh_args.append(server_info['oo_public_ip'])
-
- #last argument
- if self.args.command:
- ssh_args.append("%s" % self.args.command)
-
- print "Running: %s\n" % ' '.join(ssh_args)
-
- os.execve('/usr/bin/ssh', ssh_args, os.environ)
- except:
- print traceback.print_exc()
- print sys.exc_info()
-
-
-if __name__ == '__main__':
- ossh = Ossh()
-
diff --git a/bin/ossh_bash_completion b/bin/ossh_bash_completion
deleted file mode 100755
index dcbde3e51..000000000
--- a/bin/ossh_bash_completion
+++ /dev/null
@@ -1,52 +0,0 @@
-__ossh_known_hosts(){
- if python -c 'import openshift_ansible' &>/dev/null; then
- /usr/bin/python -c 'from openshift_ansible import multi_inventory; m=multi_inventory.MultiInventory(); m.run(); z=m.result; print "\n".join([name for name in z["_meta"]["hostvars"].keys()])'
-
- elif [[ -f /dev/shm/.ansible/tmp/multi_inventory.cache ]]; then
- /usr/bin/python -c 'import json; loc="/dev/shm/.ansible/tmp/multi_inventory.cache"; z=json.loads(open(loc).read()); print "\n".join([name for name in z["_meta"]["hostvars"].keys()])'
-
- elif [[ -f ~/.ansible/tmp/multi_inventory.cache ]]; then
- /usr/bin/python -c 'import json,os; loc="%s" % os.path.expanduser("~/.ansible/tmp/multi_inventory.cache"); z=json.loads(open(loc).read()); print "\n".join([name for name in z["_meta"]["hostvars"].keys()])'
-
- fi
-}
-
-_ossh()
-{
- local cur prev known_hosts
- COMPREPLY=()
- cur="${COMP_WORDS[COMP_CWORD]}"
- prev="${COMP_WORDS[COMP_CWORD-1]}"
- known_hosts="$(__ossh_known_hosts)"
- COMPREPLY=( $(compgen -W "${known_hosts}" -- ${cur}))
-
- return 0
-}
-complete -F _ossh ossh oscp
-
-__opssh_known_hosts(){
- if python -c 'import openshift_ansible' &>/dev/null; then
- /usr/bin/python -c 'from openshift_ansible.multi_inventory import MultiInventory; m=MultiInventory(); m.run(); print "\n".join(set(["%s" % (host["oo_hosttype"]) for dns, host in m.result["_meta"]["hostvars"].items() if "oo_hosttype" in host]))'
-
- elif [[ -f /dev/shm/.ansible/tmp/multi_inventory.cache ]]; then
- /usr/bin/python -c 'import json; loc="/dev/shm/.ansible/tmp/multi_inventory.cache"; z=json.loads(open(loc).read()); print "\n".join(set(["%s" % (host["oo_hosttype"]) for dns, host in z["_meta"]["hostvars"].items() if "oo_hosttype" in host]))'
-
- elif [[ -f ~/.ansible/tmp/multi_inventory.cache ]]; then
- /usr/bin/python -c 'import json,os; loc="%s" % os.path.expanduser("/dev/shm/.ansible/tmp/multi_inventory.cache"); z=json.loads(open(loc).read()); print "\n".join(set(["%s" % (host["oo_hosttype"]) for dns, host in z["_meta"]["hostvars"].items() if "oo_hosttype" in host]))'
-
- fi
-}
-
-_opssh()
-{
- local cur prev known_hosts
- COMPREPLY=()
- cur="${COMP_WORDS[COMP_CWORD]}"
- prev="${COMP_WORDS[COMP_CWORD-1]}"
- known_hosts="$(__opssh_known_hosts)"
- COMPREPLY=( $(compgen -W "${known_hosts}" -- ${cur}))
-
- return 0
-}
-complete -F _opssh opssh
-
diff --git a/bin/ossh_zsh_completion b/bin/ossh_zsh_completion
deleted file mode 100644
index 94ea61dab..000000000
--- a/bin/ossh_zsh_completion
+++ /dev/null
@@ -1,31 +0,0 @@
-#compdef ossh oscp
-
-_ossh_known_hosts(){
- if python -c 'import openshift_ansible' &>/dev/null; then
- print $(/usr/bin/python -c 'from openshift_ansible import multi_inventory; m=multi_inventory.MultiInventory(); m.run(); z=m.result; print "\n".join([name for name in z["_meta"]["hostvars"].keys()])')
-
- elif [[ -f /dev/shm/.ansible/tmp/multi_inventory.cache ]]; then
- print $(/usr/bin/python -c 'import json; loc="/dev/shm/.ansible/tmp/multi_inventory.cache"; z=json.loads(open(loc).read()); print "\n".join([name for name in z["_meta"]["hostvars"].keys() ])')
-
- elif [[ -f ~/.ansible/tmp/multi_inventory.cache ]]; then
- print $(/usr/bin/python -c 'import json,os; loc="%s" % os.path.expanduser("~/.ansible/tmp/multi_inventory.cache"); z=json.loads(open(loc).read()); print "\n".join([name for name in z["_meta"]["hostvars"].keys() ])')
-
- fi
-
-}
-_ossh(){
- local curcontext="$curcontext" state line
- typeset -A opt_args
-
-#_arguments "*:Hosts:_ossh_known_hosts"
- _arguments -s : \
- "*:hosts:->hosts"
-
- case "$state" in
- hosts)
- _values 'hosts' $(_ossh_known_hosts)
- ;;
- esac
-
-}
-_ossh "$@"
diff --git a/bin/zsh_functions/_ossh b/bin/zsh_functions/_ossh
deleted file mode 100644
index 65979c58a..000000000
--- a/bin/zsh_functions/_ossh
+++ /dev/null
@@ -1,49 +0,0 @@
-#compdef ossh oscp
-
-_ossh_known_hosts(){
- if [[ -f ~/.ansible/tmp/multi_inventory.cache ]]; then
- print $(/usr/bin/python -c 'import json,os; z = json.loads(open("%s"%os.path.expanduser("~/.ansible/tmp/multi_inventory.cache")).read()); print "\n".join(["%s.%s" % (host["oo_name"],host["oo_environment"]) for dns, host in z["_meta"]["hostvars"].items()])')
- fi
-}
-
-_ossh(){
- local curcontext="$curcontext" state line
- typeset -A opt_args
-
- common_arguments=(
- '(- *)'{-h,--help}'[show help]' \
- {-v,--verbose}'[enable verbose]' \
- {-d,--debug}'[debug mode]' \
- {-l,--login_name}+'[login name]:login_name' \
- {-c,--command}+'[command to run on remote host]:command' \
- {-o,--ssh_opts}+'[SSH Options to pass to SSH]:ssh options' \
- {-e,--env}+'[environtment to use]:environment:->env' \
- '--list[list out hosts]' \
- ':OP Hosts:->oo_hosts'
- )
-
- case "$service" in
- ossh)
- _arguments -C -s \
- "$common_arguments[@]" \
- ;;
-
- oscp)
- _arguments -C -s \
- "$common_arguments[@]" \
- {-r,--recurse}'[Recursive copy]' \
- ':file:_files'
- ;;
- esac
-
- case "$state" in
- oo_hosts)
- _values 'oo_hosts' $(_ossh_known_hosts)
- ;;
- env)
- _values 'environment' ops int stg prod
- ;;
- esac
-}
-
-_ossh "$@"
diff --git a/filter_plugins/oo_filters.py b/filter_plugins/oo_filters.py
index 554ef440f..260dea92c 100644
--- a/filter_plugins/oo_filters.py
+++ b/filter_plugins/oo_filters.py
@@ -306,7 +306,7 @@ class FilterModule(object):
return string.split(separator)
@staticmethod
- def oo_haproxy_backend_masters(hosts):
+ def oo_haproxy_backend_masters(hosts, port):
""" This takes an array of dicts and returns an array of dicts
to be used as a backend for the haproxy role
"""
@@ -314,8 +314,7 @@ class FilterModule(object):
for idx, host_info in enumerate(hosts):
server = dict(name="master%s" % idx)
server_ip = host_info['openshift']['common']['ip']
- server_port = host_info['openshift']['master']['api_port']
- server['address'] = "%s:%s" % (server_ip, server_port)
+ server['address'] = "%s:%s" % (server_ip, port)
server['opts'] = 'check'
servers.append(server)
return servers
diff --git a/git/pylint.sh b/git/pylint.sh
index f29c055dc..3acf9cc8c 100755
--- a/git/pylint.sh
+++ b/git/pylint.sh
@@ -7,6 +7,7 @@ ANSIBLE_UPSTREAM_FILES=(
'inventory/libvirt/hosts/libvirt_generic.py'
'inventory/openstack/hosts/nova.py'
'lookup_plugins/sequence.py'
+ 'playbooks/gce/openshift-cluster/library/gce.py'
)
OLDREV=$1
diff --git a/inventory/byo/hosts.aep.example b/inventory/byo/hosts.aep.example
index 5376dd353..defd53d43 100644
--- a/inventory/byo/hosts.aep.example
+++ b/inventory/byo/hosts.aep.example
@@ -409,6 +409,8 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# masterConfig.volumeConfig.dynamicProvisioningEnabled, configurable as of 1.2/3.2, enabled by default
#openshift_master_dynamic_provisioning_enabled=False
+# Configure usage of openshift_clock role.
+#openshift_clock_enabled=true
# host group for masters
[masters]
diff --git a/inventory/byo/hosts.origin.example b/inventory/byo/hosts.origin.example
index cfbb7ff8c..b153e73fd 100644
--- a/inventory/byo/hosts.origin.example
+++ b/inventory/byo/hosts.origin.example
@@ -414,6 +414,9 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# masterConfig.volumeConfig.dynamicProvisioningEnabled, configurable as of 1.2/3.2, enabled by default
#openshift_master_dynamic_provisioning_enabled=False
+# Configure usage of openshift_clock role.
+#openshift_clock_enabled=true
+
# host group for masters
[masters]
ose3-master[1:3]-ansible.test.example.com
diff --git a/inventory/byo/hosts.ose.example b/inventory/byo/hosts.ose.example
index 2a8d4c02b..d0f5c4c52 100644
--- a/inventory/byo/hosts.ose.example
+++ b/inventory/byo/hosts.ose.example
@@ -410,6 +410,9 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# masterConfig.volumeConfig.dynamicProvisioningEnabled, configurable as of 1.2/3.2, enabled by default
#openshift_master_dynamic_provisioning_enabled=False
+# Configure usage of openshift_clock role.
+#openshift_clock_enabled=true
+
# host group for masters
[masters]
ose3-master[1:3]-ansible.test.example.com
diff --git a/openshift-ansible.spec b/openshift-ansible.spec
index c3102a9d9..6f9083154 100644
--- a/openshift-ansible.spec
+++ b/openshift-ansible.spec
@@ -5,7 +5,7 @@
}
Name: openshift-ansible
-Version: 3.0.93
+Version: 3.0.94
Release: 1%{?dist}
Summary: Openshift and Atomic Enterprise Ansible
License: ASL 2.0
@@ -205,6 +205,9 @@ Atomic OpenShift Utilities includes
%changelog
+* Thu May 26 2016 Scott Dodson <sdodson@redhat.com> 3.0.94-1
+- Use grep to decide when to add our comment (sdodson@redhat.com)
+
* Tue May 24 2016 Troy Dawson <tdawson@redhat.com> 3.0.93-1
- Fixup spec file (tdawson@redhat.com)
diff --git a/playbooks/aws/openshift-cluster/config.yml b/playbooks/aws/openshift-cluster/config.yml
index 8402b3579..4839c100b 100644
--- a/playbooks/aws/openshift-cluster/config.yml
+++ b/playbooks/aws/openshift-cluster/config.yml
@@ -1,7 +1,20 @@
+- hosts: localhost
+ gather_facts: no
+ tasks:
+ - include_vars: vars.yml
+ - include_vars: cluster_hosts.yml
+ - add_host:
+ name: "{{ item }}"
+ groups: l_oo_all_hosts
+ with_items: g_all_hosts
+
+- hosts: l_oo_all_hosts
+ gather_facts: no
+ tasks:
+ - include_vars: vars.yml
+ - include_vars: cluster_hosts.yml
+
- include: ../../common/openshift-cluster/config.yml
- vars_files:
- - ../../aws/openshift-cluster/vars.yml
- - ../../aws/openshift-cluster/cluster_hosts.yml
vars:
g_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
g_sudo: "{{ deployment_vars[deployment_type].become }}"
@@ -21,3 +34,4 @@
os_sdn_network_plugin_name: "{{ lookup('oo_option', 'sdn_network_plugin_name') }}"
openshift_use_flannel: "{{ lookup('oo_option', 'use_flannel') }}"
openshift_use_fluentd: "{{ lookup('oo_option', 'use_fluentd') }}"
+ openshift_use_dnsmasq: false
diff --git a/playbooks/aws/openshift-cluster/tasks/launch_instances.yml b/playbooks/aws/openshift-cluster/tasks/launch_instances.yml
index 7d5776ae6..d22c86cda 100644
--- a/playbooks/aws/openshift-cluster/tasks/launch_instances.yml
+++ b/playbooks/aws/openshift-cluster/tasks/launch_instances.yml
@@ -150,6 +150,7 @@
groups: "{{ instance_groups }}"
ec2_private_ip_address: "{{ item.1.private_ip }}"
ec2_ip_address: "{{ item.1.public_ip }}"
+ ec2_tag_sub-host-type: "{{ sub_host_type }}"
openshift_node_labels: "{{ node_label }}"
logrotate_scripts: "{{ logrotate }}"
with_together:
diff --git a/playbooks/aws/openshift-cluster/templates/user_data.j2 b/playbooks/aws/openshift-cluster/templates/user_data.j2
index 2a3974a8c..b1087f9c4 100644
--- a/playbooks/aws/openshift-cluster/templates/user_data.j2
+++ b/playbooks/aws/openshift-cluster/templates/user_data.j2
@@ -3,8 +3,10 @@
mounts:
- [ xvdb ]
- [ ephemeral0 ]
+{% endif %}
write_files:
+{% if type in ['node', 'master'] and 'docker' in volume_defs[type] %}
- content: |
DEVS=/dev/xvdb
VG=docker_vg
@@ -12,8 +14,7 @@ write_files:
owner: root:root
permissions: '0644'
{% endif %}
-
-{% if deployment_vars[deployment_type].become %}
+{% if deployment_vars[deployment_type].become | bool %}
- path: /etc/sudoers.d/99-{{ deployment_vars[deployment_type].ssh_user }}-cloud-init-requiretty
permissions: 440
content: |
diff --git a/playbooks/aws/openshift-cluster/update.yml b/playbooks/aws/openshift-cluster/update.yml
index bd31c42dd..d762203b2 100644
--- a/playbooks/aws/openshift-cluster/update.yml
+++ b/playbooks/aws/openshift-cluster/update.yml
@@ -1,12 +1,25 @@
---
+- hosts: localhost
+ gather_facts: no
+ tasks:
+ - include_vars: vars.yml
+ - include_vars: cluster_hosts.yml
+ - add_host:
+ name: "{{ item }}"
+ groups: l_oo_all_hosts
+ with_items: g_all_hosts
+
+- hosts: l_oo_all_hosts
+ gather_facts: no
+ tasks:
+ - include_vars: vars.yml
+ - include_vars: cluster_hosts.yml
+
- name: Update - Populate oo_hosts_to_update group
hosts: localhost
connection: local
become: no
gather_facts: no
- vars_files:
- - vars.yml
- - cluster_hosts.yml
tasks:
- name: Update - Evaluate oo_hosts_to_update
add_host:
@@ -14,7 +27,7 @@
groups: oo_hosts_to_update
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
ansible_become: "{{ deployment_vars[deployment_type].become }}"
- with_items: "{{ g_all_hosts | default([]) }}"
+ with_items: g_all_hosts | default([])
- include: ../../common/openshift-cluster/update_repos_and_packages.yml
diff --git a/playbooks/aws/openshift-cluster/vars.yml b/playbooks/aws/openshift-cluster/vars.yml
index 8bda72ac2..d774187f0 100644
--- a/playbooks/aws/openshift-cluster/vars.yml
+++ b/playbooks/aws/openshift-cluster/vars.yml
@@ -17,7 +17,7 @@ deployment_rhel7_ent_base:
deployment_vars:
origin:
# centos-7, requires marketplace
- image: "{{ lookup('oo_option', 'ec2_image') | default('ami-61bbf104', True) }}"
+ image: "{{ lookup('oo_option', 'ec2_image') | default('ami-6d1c2007', True) }}"
image_name: "{{ lookup('oo_option', 'ec2_image_name') | default(None, True) }}"
region: "{{ lookup('oo_option', 'ec2_region') | default('us-east-1', True) }}"
ssh_user: centos
diff --git a/playbooks/common/openshift-cluster/config.yml b/playbooks/common/openshift-cluster/config.yml
index b2f09d58d..5cf5df08e 100644
--- a/playbooks/common/openshift-cluster/config.yml
+++ b/playbooks/common/openshift-cluster/config.yml
@@ -33,6 +33,8 @@
- include: ../openshift-nfs/config.yml
+- include: ../openshift-loadbalancer/config.yml
+
- include: ../openshift-master/config.yml
- include: additional_config.yml
diff --git a/playbooks/common/openshift-cluster/update_repos_and_packages.yml b/playbooks/common/openshift-cluster/update_repos_and_packages.yml
index 1474bb3ca..0a37d4597 100644
--- a/playbooks/common/openshift-cluster/update_repos_and_packages.yml
+++ b/playbooks/common/openshift-cluster/update_repos_and_packages.yml
@@ -1,4 +1,6 @@
---
+- include: evaluate_groups.yml
+
- hosts: oo_hosts_to_update
vars:
openshift_deployment_type: "{{ deployment_type }}"
diff --git a/playbooks/common/openshift-loadbalancer/config.yml b/playbooks/common/openshift-loadbalancer/config.yml
new file mode 100644
index 000000000..f4392173a
--- /dev/null
+++ b/playbooks/common/openshift-loadbalancer/config.yml
@@ -0,0 +1,5 @@
+---
+- name: Configure load balancers
+ hosts: oo_lb_to_config
+ roles:
+ - role: openshift_loadbalancer
diff --git a/playbooks/common/openshift-loadbalancer/filter_plugins b/playbooks/common/openshift-loadbalancer/filter_plugins
new file mode 120000
index 000000000..99a95e4ca
--- /dev/null
+++ b/playbooks/common/openshift-loadbalancer/filter_plugins
@@ -0,0 +1 @@
+../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-loadbalancer/lookup_plugins b/playbooks/common/openshift-loadbalancer/lookup_plugins
new file mode 120000
index 000000000..ac79701db
--- /dev/null
+++ b/playbooks/common/openshift-loadbalancer/lookup_plugins
@@ -0,0 +1 @@
+../../../lookup_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-loadbalancer/roles b/playbooks/common/openshift-loadbalancer/roles
new file mode 120000
index 000000000..e2b799b9d
--- /dev/null
+++ b/playbooks/common/openshift-loadbalancer/roles
@@ -0,0 +1 @@
+../../../roles/ \ No newline at end of file
diff --git a/playbooks/common/openshift-loadbalancer/service.yml b/playbooks/common/openshift-loadbalancer/service.yml
new file mode 100644
index 000000000..19fffd5e9
--- /dev/null
+++ b/playbooks/common/openshift-loadbalancer/service.yml
@@ -0,0 +1,20 @@
+---
+- name: Populate g_service_nodes host group if needed
+ hosts: localhost
+ connection: local
+ become: no
+ gather_facts: no
+ tasks:
+ - fail: msg="new_cluster_state is required to be injected in this playbook"
+ when: new_cluster_state is not defined
+
+ - name: Evaluate g_service_lb
+ add_host: name={{ item }} groups=g_service_lb
+ with_items: oo_host_group_exp | default([])
+
+- name: Change state on lb instance(s)
+ hosts: g_service_lb
+ connection: ssh
+ gather_facts: no
+ tasks:
+ - service: name=hapoxy state="{{ new_cluster_state }}"
diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml
index c39af9c40..c6fac2870 100644
--- a/playbooks/common/openshift-master/config.yml
+++ b/playbooks/common/openshift-master/config.yml
@@ -156,127 +156,6 @@
- master.etcd-ca.crt
when: etcd_client_certs_missing is defined and etcd_client_certs_missing
-# Must be run before generating master certs which involved openshift_cli role and needs
-# to pull down the correct docker container:
-- name: Determine openshift_version to configure on first master
- hosts: oo_first_master
- any_errors_fatal: true
- # We do initial seeding of openshift_version if possible with these tasks. The openshift_docker role
- # will make absolutely sure it's set to a specific version after this.
- pre_tasks:
- - debug: var=openshift.common
- - debug: var=openshift_image_tag
- - debug: var=openshift_release
- - debug: var=openshift_version
- roles:
- - openshift_docker
-
-- name: Determine if master certificates need to be generated
- hosts: oo_first_master:oo_masters_to_config
- tasks:
- - set_fact:
- openshift_master_certs_no_etcd:
- - admin.crt
- - master.kubelet-client.crt
- - "{{ 'master.proxy-client.crt' if openshift.common.version_gte_3_1_or_1_1 else omit }}"
- - master.server.crt
- - openshift-master.crt
- - openshift-registry.crt
- - openshift-router.crt
- - etcd.server.crt
- openshift_master_certs_etcd:
- - master.etcd-client.crt
-
- - set_fact:
- openshift_master_certs: "{{ (openshift_master_certs_no_etcd | union(openshift_master_certs_etcd)) if (groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config) else openshift_master_certs_no_etcd }}"
-
- - name: Check status of master certificates
- stat:
- path: "{{ openshift.common.config_base }}/master/{{ item }}"
- with_items: "{{ openshift_master_certs }}"
- register: g_master_cert_stat_result
- - set_fact:
- master_certs_missing: "{{ False in (g_master_cert_stat_result.results
- | oo_collect(attribute='stat.exists')
- | list ) }}"
- master_cert_subdir: master-{{ openshift.common.hostname }}
- master_cert_config_dir: "{{ openshift.common.config_base }}/master"
- - set_fact:
- openshift_infra_nodes: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'])
- | oo_nodes_with_label('region', 'infra')
- | oo_collect('inventory_hostname') }}"
- when: openshift_infra_nodes is not defined and groups.oo_nodes_to_config | default([]) | length > 0
-
-- name: Configure master certificates
- hosts: oo_first_master
- vars:
- master_generated_certs_dir: "{{ openshift.common.config_base }}/generated-configs"
- masters_needing_certs: "{{ hostvars
- | oo_select_keys(groups['oo_masters_to_config'] | difference(groups['oo_first_master']))
- | oo_filter_list(filter_attr='master_certs_missing') }}"
- master_hostnames: "{{ hostvars
- | oo_select_keys(groups['oo_masters_to_config'])
- | oo_collect('openshift.common.all_hostnames')
- | oo_flatten | unique }}"
- sync_tmpdir: "{{ hostvars.localhost.g_master_mktemp.stdout }}"
- openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}"
- roles:
- - openshift_master_certificates
- post_tasks:
- - name: Remove generated etcd client certs when using external etcd
- file:
- path: "{{ master_generated_certs_dir }}/{{ item.0.master_cert_subdir }}/{{ item.1 }}"
- state: absent
- when: groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config
- with_nested:
- - "{{ masters_needing_certs | default([]) }}"
- - - master.etcd-client.crt
- - master.etcd-client.key
-
- - name: Create a tarball of the master certs
- command: >
- tar -czvf {{ master_generated_certs_dir }}/{{ item.master_cert_subdir }}.tgz
- -C {{ master_generated_certs_dir }}/{{ item.master_cert_subdir }} .
- args:
- creates: "{{ master_generated_certs_dir }}/{{ item.master_cert_subdir }}.tgz"
- with_items: "{{ masters_needing_certs | default([]) }}"
-
- - name: Retrieve the master cert tarball from the master
- fetch:
- src: "{{ master_generated_certs_dir }}/{{ item.master_cert_subdir }}.tgz"
- dest: "{{ sync_tmpdir }}/"
- flat: yes
- fail_on_missing: yes
- validate_checksum: yes
- with_items: "{{ masters_needing_certs | default([]) }}"
-
-- name: Configure load balancers
- hosts: oo_lb_to_config
- vars:
- sync_tmpdir: "{{ hostvars.localhost.g_master_mktemp.stdout }}"
- haproxy_limit_nofile: 100000
- haproxy_global_maxconn: 20000
- haproxy_default_maxconn: 20000
- haproxy_frontend_port: "{{ hostvars[groups.oo_first_master.0].openshift.master.api_port }}"
- haproxy_frontends:
- - name: atomic-openshift-api
- mode: tcp
- options:
- - tcplog
- binds:
- - "*:{{ hostvars[groups.oo_first_master.0].openshift.master.api_port }}"
- default_backend: atomic-openshift-api
- haproxy_backends:
- - name: atomic-openshift-api
- mode: tcp
- option: tcplog
- balance: source
- servers: "{{ hostvars | oo_select_keys(groups['oo_masters']) | oo_haproxy_backend_masters }}"
- roles:
- - role: openshift_facts
- - role: haproxy
- when: hostvars[groups.oo_first_master.0].openshift.master.ha | bool
-
- name: Check for cached session secrets
hosts: oo_first_master
roles:
@@ -377,19 +256,17 @@
}}"
when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
openshift_generate_no_proxy_hosts | default(True) | bool }}"
- pre_tasks:
- - name: Ensure certificate directory exists
- file:
- path: "{{ openshift.common.config_base }}/master"
- state: directory
- when: master_certs_missing | bool and 'oo_first_master' not in group_names
- - name: Unarchive the tarball on the master
- unarchive:
- src: "{{ sync_tmpdir }}/{{ master_cert_subdir }}.tgz"
- dest: "{{ master_cert_config_dir }}"
- when: master_certs_missing | bool and 'oo_first_master' not in group_names
roles:
- - openshift_master
+ - role: openshift_master
+ openshift_ca_host: "{{ groups.oo_first_master.0 }}"
+ openshift_master_etcd_hosts: "{{ hostvars
+ | oo_select_keys(groups['oo_etcd_to_config'] | default([]))
+ | oo_collect('openshift.common.hostname')
+ | default(none, true) }}"
+ openshift_master_hostnames: "{{ hostvars
+ | oo_select_keys(groups['oo_masters_to_config'] | default([]))
+ | oo_collect('openshift.common.all_hostnames')
+ | oo_flatten | unique }}"
- role: nickhammond.logrotate
- role: nuage_master
when: openshift.common.use_nuage | bool
diff --git a/playbooks/common/openshift-node/config.yml b/playbooks/common/openshift-node/config.yml
index 5e92b5cbd..9c9aa779a 100644
--- a/playbooks/common/openshift-node/config.yml
+++ b/playbooks/common/openshift-node/config.yml
@@ -19,23 +19,6 @@
labels: "{{ openshift_node_labels | default(None) }}"
annotations: "{{ openshift_node_annotations | default(None) }}"
schedulable: "{{ openshift_schedulable | default(openshift_scheduleable) | default(None) }}"
- - name: Check status of node certificates
- stat:
- path: "{{ openshift.common.config_base }}/node/{{ item }}"
- with_items:
- - "system:node:{{ openshift.common.hostname }}.crt"
- - "system:node:{{ openshift.common.hostname }}.key"
- - "system:node:{{ openshift.common.hostname }}.kubeconfig"
- - ca.crt
- - server.key
- - server.crt
- register: stat_result
- - set_fact:
- certs_missing: "{{ stat_result.results | oo_collect(attribute='stat.exists')
- | list | intersect([false])}}"
- node_subdir: node-{{ openshift.common.hostname }}
- config_dir: "{{ openshift.common.config_base }}/generated-configs/node-{{ openshift.common.hostname }}"
- node_cert_dir: "{{ openshift.common.config_base }}/node"
- name: Create temp directory for syncing certs
hosts: localhost
@@ -48,53 +31,6 @@
register: mktemp
changed_when: False
-- name: Create node certificates
- hosts: oo_first_master
- vars:
- nodes_needing_certs: "{{ hostvars
- | oo_select_keys(groups['oo_nodes_to_config']
- | default([]))
- | oo_filter_list(filter_attr='certs_missing') }}"
- sync_tmpdir: "{{ hostvars.localhost.mktemp.stdout }}"
- roles:
- - openshift_node_certificates
- post_tasks:
- - name: Create a tarball of the node config directories
- command: >
- tar -czvf {{ item.config_dir }}.tgz
- --transform 's|system:{{ item.node_subdir }}|node|'
- -C {{ item.config_dir }} .
- args:
- creates: "{{ item.config_dir }}.tgz"
- with_items: "{{ nodes_needing_certs | default([]) }}"
-
- - name: Retrieve the node config tarballs from the master
- fetch:
- src: "{{ item.config_dir }}.tgz"
- dest: "{{ sync_tmpdir }}/"
- flat: yes
- fail_on_missing: yes
- validate_checksum: yes
- with_items: "{{ nodes_needing_certs | default([]) }}"
-
-- name: Deploy node certificates
- hosts: oo_nodes_to_config
- vars:
- sync_tmpdir: "{{ hostvars.localhost.mktemp.stdout }}"
- tasks:
- - name: Ensure certificate directory exists
- file:
- path: "{{ node_cert_dir }}"
- state: directory
- # TODO: notify restart node
- # possibly test service started time against certificate/config file
- # timestamps in node to trigger notify
- - name: Unarchive the tarball on the node
- unarchive:
- src: "{{ sync_tmpdir }}/{{ node_subdir }}.tgz"
- dest: "{{ node_cert_dir }}"
- when: certs_missing
-
- name: Evaluate node groups
hosts: localhost
become: no
@@ -140,7 +76,8 @@
when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
openshift_generate_no_proxy_hosts | default(True) | bool }}"
roles:
- - openshift_node
+ - role: openshift_node
+ openshift_ca_host: "{{ groups.oo_first_master.0 }}"
- name: Configure node instances
hosts: oo_nodes_to_config:!oo_containerized_master_nodes
@@ -156,7 +93,8 @@
when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
openshift_generate_no_proxy_hosts | default(True) | bool }}"
roles:
- - openshift_node
+ - role: openshift_node
+ openshift_ca_host: "{{ groups.oo_first_master.0 }}"
- name: Gather and set facts for flannel certificatess
hosts: oo_nodes_to_config
diff --git a/playbooks/gce/openshift-cluster/config.yml b/playbooks/gce/openshift-cluster/config.yml
index 475d29293..b973c513f 100644
--- a/playbooks/gce/openshift-cluster/config.yml
+++ b/playbooks/gce/openshift-cluster/config.yml
@@ -1,8 +1,23 @@
---
+- hosts: localhost
+ gather_facts: no
+ tasks:
+ - include_vars: vars.yml
+ - include_vars: cluster_hosts.yml
+ - add_host:
+ name: "{{ item }}"
+ groups: l_oo_all_hosts
+ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+ ansible_become: "{{ deployment_vars[deployment_type].become }}"
+ with_items: g_all_hosts
+
+- hosts: l_oo_all_hosts
+ gather_facts: no
+ tasks:
+ - include_vars: vars.yml
+ - include_vars: cluster_hosts.yml
+
- include: ../../common/openshift-cluster/config.yml
- vars_files:
- - ../../gce/openshift-cluster/vars.yml
- - ../../gce/openshift-cluster/cluster_hosts.yml
vars:
g_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
g_sudo: "{{ deployment_vars[deployment_type].become }}"
diff --git a/playbooks/gce/openshift-cluster/library/gce.py b/playbooks/gce/openshift-cluster/library/gce.py
new file mode 100644
index 000000000..fcaa3b850
--- /dev/null
+++ b/playbooks/gce/openshift-cluster/library/gce.py
@@ -0,0 +1,543 @@
+#!/usr/bin/python
+# Copyright 2013 Google Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: gce
+version_added: "1.4"
+short_description: create or terminate GCE instances
+description:
+ - Creates or terminates Google Compute Engine (GCE) instances. See
+ U(https://cloud.google.com/products/compute-engine) for an overview.
+ Full install/configuration instructions for the gce* modules can
+ be found in the comments of ansible/test/gce_tests.py.
+options:
+ image:
+ description:
+ - image string to use for the instance
+ required: false
+ default: "debian-7"
+ instance_names:
+ description:
+ - a comma-separated list of instance names to create or destroy
+ required: false
+ default: null
+ machine_type:
+ description:
+ - machine type to use for the instance, use 'n1-standard-1' by default
+ required: false
+ default: "n1-standard-1"
+ metadata:
+ description:
+ - a hash/dictionary of custom data for the instance;
+ '{"key":"value", ...}'
+ required: false
+ default: null
+ service_account_email:
+ version_added: "1.5.1"
+ description:
+ - service account email
+ required: false
+ default: null
+ service_account_permissions:
+ version_added: "2.0"
+ description:
+ - service account permissions (see
+ U(https://cloud.google.com/sdk/gcloud/reference/compute/instances/create),
+ --scopes section for detailed information)
+ required: false
+ default: null
+ choices: [
+ "bigquery", "cloud-platform", "compute-ro", "compute-rw",
+ "computeaccounts-ro", "computeaccounts-rw", "datastore", "logging-write",
+ "monitoring", "sql", "sql-admin", "storage-full", "storage-ro",
+ "storage-rw", "taskqueue", "userinfo-email"
+ ]
+ pem_file:
+ version_added: "1.5.1"
+ description:
+ - path to the pem file associated with the service account email
+ required: false
+ default: null
+ project_id:
+ version_added: "1.5.1"
+ description:
+ - your GCE project ID
+ required: false
+ default: null
+ name:
+ description:
+ - identifier when working with a single instance
+ required: false
+ network:
+ description:
+ - name of the network, 'default' will be used if not specified
+ required: false
+ default: "default"
+ persistent_boot_disk:
+ description:
+ - if set, create the instance with a persistent boot disk
+ required: false
+ default: "false"
+ disks:
+ description:
+ - a list of persistent disks to attach to the instance; a string value
+ gives the name of the disk; alternatively, a dictionary value can
+ define 'name' and 'mode' ('READ_ONLY' or 'READ_WRITE'). The first entry
+ will be the boot disk (which must be READ_WRITE).
+ required: false
+ default: null
+ version_added: "1.7"
+ state:
+ description:
+ - desired state of the resource
+ required: false
+ default: "present"
+ choices: ["active", "present", "absent", "deleted"]
+ tags:
+ description:
+ - a comma-separated list of tags to associate with the instance
+ required: false
+ default: null
+ zone:
+ description:
+ - the GCE zone to use
+ required: true
+ default: "us-central1-a"
+ ip_forward:
+ version_added: "1.9"
+ description:
+ - set to true if the instance can forward ip packets (useful for
+ gateways)
+ required: false
+ default: "false"
+ external_ip:
+ version_added: "1.9"
+ description:
+ - type of external ip, ephemeral by default
+ required: false
+ default: "ephemeral"
+ disk_auto_delete:
+ version_added: "1.9"
+ description:
+ - if set boot disk will be removed after instance destruction
+ required: false
+ default: "true"
+
+requirements:
+ - "python >= 2.6"
+ - "apache-libcloud >= 0.13.3"
+notes:
+ - Either I(name) or I(instance_names) is required.
+author: "Eric Johnson (@erjohnso) <erjohnso@google.com>"
+'''
+
+EXAMPLES = '''
+# Basic provisioning example. Create a single Debian 7 instance in the
+# us-central1-a Zone of n1-standard-1 machine type.
+- local_action:
+ module: gce
+ name: test-instance
+ zone: us-central1-a
+ machine_type: n1-standard-1
+ image: debian-7
+
+# Example using defaults and with metadata to create a single 'foo' instance
+- local_action:
+ module: gce
+ name: foo
+ metadata: '{"db":"postgres", "group":"qa", "id":500}'
+
+
+# Launch instances from a control node, runs some tasks on the new instances,
+# and then terminate them
+- name: Create a sandbox instance
+ hosts: localhost
+ vars:
+ names: foo,bar
+ machine_type: n1-standard-1
+ image: debian-6
+ zone: us-central1-a
+ service_account_email: unique-email@developer.gserviceaccount.com
+ pem_file: /path/to/pem_file
+ project_id: project-id
+ tasks:
+ - name: Launch instances
+ local_action: gce instance_names={{names}} machine_type={{machine_type}}
+ image={{image}} zone={{zone}}
+ service_account_email={{ service_account_email }}
+ pem_file={{ pem_file }} project_id={{ project_id }}
+ register: gce
+ - name: Wait for SSH to come up
+ local_action: wait_for host={{item.public_ip}} port=22 delay=10
+ timeout=60 state=started
+ with_items: {{gce.instance_data}}
+
+- name: Configure instance(s)
+ hosts: launched
+ sudo: True
+ roles:
+ - my_awesome_role
+ - my_awesome_tasks
+
+- name: Terminate instances
+ hosts: localhost
+ connection: local
+ tasks:
+ - name: Terminate instances that were previously launched
+ local_action:
+ module: gce
+ state: 'absent'
+ instance_names: {{gce.instance_names}}
+
+'''
+
+try:
+ import libcloud
+ from libcloud.compute.types import Provider
+ from libcloud.compute.providers import get_driver
+ from libcloud.common.google import GoogleBaseError, QuotaExceededError, \
+ ResourceExistsError, ResourceInUseError, ResourceNotFoundError
+ _ = Provider.GCE
+ HAS_LIBCLOUD = True
+except ImportError:
+ HAS_LIBCLOUD = False
+
+try:
+ from ast import literal_eval
+ HAS_PYTHON26 = True
+except ImportError:
+ HAS_PYTHON26 = False
+
+
+def get_instance_info(inst):
+ """Retrieves instance information from an instance object and returns it
+ as a dictionary.
+
+ """
+ metadata = {}
+ if 'metadata' in inst.extra and 'items' in inst.extra['metadata']:
+ for md in inst.extra['metadata']['items']:
+ metadata[md['key']] = md['value']
+
+ try:
+ netname = inst.extra['networkInterfaces'][0]['network'].split('/')[-1]
+ except:
+ netname = None
+ if 'disks' in inst.extra:
+ disk_names = [disk_info['source'].split('/')[-1]
+ for disk_info
+ in sorted(inst.extra['disks'],
+ key=lambda disk_info: disk_info['index'])]
+ else:
+ disk_names = []
+
+ if len(inst.public_ips) == 0:
+ public_ip = None
+ else:
+ public_ip = inst.public_ips[0]
+
+ return({
+ 'image': inst.image is not None and inst.image.split('/')[-1] or None,
+ 'disks': disk_names,
+ 'machine_type': inst.size,
+ 'metadata': metadata,
+ 'name': inst.name,
+ 'network': netname,
+ 'private_ip': inst.private_ips[0],
+ 'public_ip': public_ip,
+ 'status': ('status' in inst.extra) and inst.extra['status'] or None,
+ 'tags': ('tags' in inst.extra) and inst.extra['tags'] or [],
+ 'zone': ('zone' in inst.extra) and inst.extra['zone'].name or None,
+ })
+
+
+def create_instances(module, gce, instance_names):
+ """Creates new instances. Attributes other than instance_names are picked
+ up from 'module'
+
+ module : AnsibleModule object
+ gce: authenticated GCE libcloud driver
+ instance_names: python list of instance names to create
+
+ Returns:
+ A list of dictionaries with instance information
+ about the instances that were launched.
+
+ """
+ image = module.params.get('image')
+ machine_type = module.params.get('machine_type')
+ metadata = module.params.get('metadata')
+ network = module.params.get('network')
+ persistent_boot_disk = module.params.get('persistent_boot_disk')
+ disks = module.params.get('disks')
+ state = module.params.get('state')
+ tags = module.params.get('tags')
+ zone = module.params.get('zone')
+ ip_forward = module.params.get('ip_forward')
+ external_ip = module.params.get('external_ip')
+ disk_auto_delete = module.params.get('disk_auto_delete')
+ service_account_permissions = module.params.get('service_account_permissions')
+ service_account_email = module.params.get('service_account_email')
+
+ if external_ip == "none":
+ external_ip = None
+
+ new_instances = []
+ changed = False
+
+ lc_image = gce.ex_get_image(image)
+ lc_disks = []
+ disk_modes = []
+ for i, disk in enumerate(disks or []):
+ if isinstance(disk, dict):
+ lc_disks.append(gce.ex_get_volume(disk['name']))
+ disk_modes.append(disk['mode'])
+ else:
+ lc_disks.append(gce.ex_get_volume(disk))
+ # boot disk is implicitly READ_WRITE
+ disk_modes.append('READ_ONLY' if i > 0 else 'READ_WRITE')
+ lc_network = gce.ex_get_network(network)
+ lc_machine_type = gce.ex_get_size(machine_type)
+ lc_zone = gce.ex_get_zone(zone)
+
+ # Try to convert the user's metadata value into the format expected
+ # by GCE. First try to ensure user has proper quoting of a
+ # dictionary-like syntax using 'literal_eval', then convert the python
+ # dict into a python list of 'key' / 'value' dicts. Should end up
+ # with:
+ # [ {'key': key1, 'value': value1}, {'key': key2, 'value': value2}, ...]
+ if metadata:
+ if isinstance(metadata, dict):
+ md = metadata
+ else:
+ try:
+ md = literal_eval(str(metadata))
+ if not isinstance(md, dict):
+ raise ValueError('metadata must be a dict')
+ except ValueError as e:
+ module.fail_json(msg='bad metadata: %s' % str(e))
+ except SyntaxError as e:
+ module.fail_json(msg='bad metadata syntax')
+
+ if hasattr(libcloud, '__version__') and libcloud.__version__ < '0.15':
+ items = []
+ for k, v in md.items():
+ items.append({"key": k, "value": v})
+ metadata = {'items': items}
+ else:
+ metadata = md
+
+ ex_sa_perms = []
+ bad_perms = []
+ if service_account_permissions:
+ for perm in service_account_permissions:
+ if perm not in gce.SA_SCOPES_MAP.keys():
+ bad_perms.append(perm)
+ if len(bad_perms) > 0:
+ module.fail_json(msg='bad permissions: %s' % str(bad_perms))
+ if service_account_email:
+ ex_sa_perms.append({'email': service_account_email})
+ else:
+ ex_sa_perms.append({'email': "default"})
+ ex_sa_perms[0]['scopes'] = service_account_permissions
+
+ # These variables all have default values but check just in case
+ if not lc_image or not lc_network or not lc_machine_type or not lc_zone:
+ module.fail_json(msg='Missing required create instance variable',
+ changed=False)
+
+ for name in instance_names:
+ pd = None
+ if lc_disks:
+ pd = lc_disks[0]
+ elif persistent_boot_disk:
+ try:
+ pd = gce.create_volume(None, "%s" % name, image=lc_image)
+ except ResourceExistsError:
+ pd = gce.ex_get_volume("%s" % name, lc_zone)
+ inst = None
+ try:
+ inst = gce.create_node(
+ name, lc_machine_type, lc_image, location=lc_zone,
+ ex_network=network, ex_tags=tags, ex_metadata=metadata,
+ ex_boot_disk=pd, ex_can_ip_forward=ip_forward,
+ external_ip=external_ip, ex_disk_auto_delete=disk_auto_delete,
+ ex_service_accounts=ex_sa_perms
+ )
+ changed = True
+ except ResourceExistsError:
+ inst = gce.ex_get_node(name, lc_zone)
+ except GoogleBaseError as e:
+ module.fail_json(msg='Unexpected error attempting to create ' +
+ 'instance %s, error: %s' % (name, e.value))
+
+ for i, lc_disk in enumerate(lc_disks):
+ # Check whether the disk is already attached
+ if (len(inst.extra['disks']) > i):
+ attached_disk = inst.extra['disks'][i]
+ if attached_disk['source'] != lc_disk.extra['selfLink']:
+ module.fail_json(
+ msg=("Disk at index %d does not match: requested=%s found=%s" % (
+ i, lc_disk.extra['selfLink'], attached_disk['source'])))
+ elif attached_disk['mode'] != disk_modes[i]:
+ module.fail_json(
+ msg=("Disk at index %d is in the wrong mode: requested=%s found=%s" % (
+ i, disk_modes[i], attached_disk['mode'])))
+ else:
+ continue
+ gce.attach_volume(inst, lc_disk, ex_mode=disk_modes[i])
+ # Work around libcloud bug: attached volumes don't get added
+ # to the instance metadata. get_instance_info() only cares about
+ # source and index.
+ if len(inst.extra['disks']) != i+1:
+ inst.extra['disks'].append(
+ {'source': lc_disk.extra['selfLink'], 'index': i})
+
+ if inst:
+ new_instances.append(inst)
+
+ instance_names = []
+ instance_json_data = []
+ for inst in new_instances:
+ d = get_instance_info(inst)
+ instance_names.append(d['name'])
+ instance_json_data.append(d)
+
+ return (changed, instance_json_data, instance_names)
+
+
+def terminate_instances(module, gce, instance_names, zone_name):
+ """Terminates a list of instances.
+
+ module: Ansible module object
+ gce: authenticated GCE connection object
+ instance_names: a list of instance names to terminate
+ zone_name: the zone where the instances reside prior to termination
+
+ Returns a dictionary of instance names that were terminated.
+
+ """
+ changed = False
+ terminated_instance_names = []
+ for name in instance_names:
+ inst = None
+ try:
+ inst = gce.ex_get_node(name, zone_name)
+ except ResourceNotFoundError:
+ pass
+ except Exception as e:
+ module.fail_json(msg=unexpected_error_msg(e), changed=False)
+ if inst:
+ gce.destroy_node(inst)
+ terminated_instance_names.append(inst.name)
+ changed = True
+
+ return (changed, terminated_instance_names)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ image=dict(default='debian-7'),
+ instance_names=dict(),
+ machine_type=dict(default='n1-standard-1'),
+ metadata=dict(),
+ name=dict(),
+ network=dict(default='default'),
+ persistent_boot_disk=dict(type='bool', default=False),
+ disks=dict(type='list'),
+ state=dict(choices=['active', 'present', 'absent', 'deleted'],
+ default='present'),
+ tags=dict(type='list'),
+ zone=dict(default='us-central1-a'),
+ service_account_email=dict(),
+ service_account_permissions=dict(type='list'),
+ pem_file=dict(),
+ project_id=dict(),
+ ip_forward=dict(type='bool', default=False),
+ external_ip=dict(choices=['ephemeral', 'none'],
+ default='ephemeral'),
+ disk_auto_delete=dict(type='bool', default=True),
+ )
+ )
+
+ if not HAS_PYTHON26:
+ module.fail_json(msg="GCE module requires python's 'ast' module, python v2.6+")
+ if not HAS_LIBCLOUD:
+ module.fail_json(msg='libcloud with GCE support (0.13.3+) required for this module')
+
+ gce = gce_connect(module)
+
+ image = module.params.get('image')
+ instance_names = module.params.get('instance_names')
+ machine_type = module.params.get('machine_type')
+ metadata = module.params.get('metadata')
+ name = module.params.get('name')
+ network = module.params.get('network')
+ persistent_boot_disk = module.params.get('persistent_boot_disk')
+ state = module.params.get('state')
+ tags = module.params.get('tags')
+ zone = module.params.get('zone')
+ ip_forward = module.params.get('ip_forward')
+ changed = False
+
+ inames = []
+ if isinstance(instance_names, list):
+ inames = instance_names
+ elif isinstance(instance_names, str):
+ inames = instance_names.split(',')
+ if name:
+ inames.append(name)
+ if not inames:
+ module.fail_json(msg='Must specify a "name" or "instance_names"',
+ changed=False)
+ if not zone:
+ module.fail_json(msg='Must specify a "zone"', changed=False)
+
+ json_output = {'zone': zone}
+ if state in ['absent', 'deleted']:
+ json_output['state'] = 'absent'
+ (changed, terminated_instance_names) = terminate_instances(
+ module, gce, inames, zone)
+
+ # based on what user specified, return the same variable, although
+ # value could be different if an instance could not be destroyed
+ if instance_names:
+ json_output['instance_names'] = terminated_instance_names
+ elif name:
+ json_output['name'] = name
+
+ elif state in ['active', 'present']:
+ json_output['state'] = 'present'
+ (changed, instance_data, instance_name_list) = create_instances(
+ module, gce, inames)
+ json_output['instance_data'] = instance_data
+ if instance_names:
+ json_output['instance_names'] = instance_name_list
+ elif name:
+ json_output['name'] = name
+
+ json_output['changed'] = changed
+ module.exit_json(**json_output)
+
+# import module snippets
+from ansible.module_utils.basic import *
+from ansible.module_utils.gce import *
+if __name__ == '__main__':
+ main()
diff --git a/playbooks/gce/openshift-cluster/tasks/launch_instances.yml b/playbooks/gce/openshift-cluster/tasks/launch_instances.yml
index e3efd8566..c5c479052 100644
--- a/playbooks/gce/openshift-cluster/tasks/launch_instances.yml
+++ b/playbooks/gce/openshift-cluster/tasks/launch_instances.yml
@@ -17,6 +17,11 @@
- clusterid-{{ cluster_id }}
- host-type-{{ type }}
- sub-host-type-{{ g_sub_host_type }}
+ metadata:
+ startup-script: |
+ #!/bin/bash
+ echo "Defaults:{{ deployment_vars[deployment_type].ssh_user }} !requiretty" > /etc/sudoers.d/99-{{ deployment_vars[deployment_type].ssh_user }}
+
when: instances |length > 0
register: gce
diff --git a/playbooks/gce/openshift-cluster/update.yml b/playbooks/gce/openshift-cluster/update.yml
index 9b7a2777a..332f27da7 100644
--- a/playbooks/gce/openshift-cluster/update.yml
+++ b/playbooks/gce/openshift-cluster/update.yml
@@ -1,12 +1,25 @@
---
+- hosts: localhost
+ gather_facts: no
+ tasks:
+ - include_vars: vars.yml
+ - include_vars: cluster_hosts.yml
+ - add_host:
+ name: "{{ item }}"
+ groups: l_oo_all_hosts
+ with_items: g_all_hosts
+
+- hosts: l_oo_all_hosts
+ gather_facts: no
+ tasks:
+ - include_vars: vars.yml
+ - include_vars: cluster_hosts.yml
+
- name: Populate oo_hosts_to_update group
hosts: localhost
connection: local
become: no
gather_facts: no
- vars_files:
- - vars.yml
- - cluster_hosts.yml
tasks:
- name: Evaluate oo_hosts_to_update
add_host:
@@ -14,7 +27,7 @@
groups: oo_hosts_to_update
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
ansible_become: "{{ deployment_vars[deployment_type].become }}"
- with_items: "{{ g_all_hosts | default([]) }}"
+ with_items: g_all_hosts | default([])
- include: ../../common/openshift-cluster/update_repos_and_packages.yml
diff --git a/playbooks/libvirt/openshift-cluster/config.yml b/playbooks/libvirt/openshift-cluster/config.yml
index 81a6fff0d..032d4cf68 100644
--- a/playbooks/libvirt/openshift-cluster/config.yml
+++ b/playbooks/libvirt/openshift-cluster/config.yml
@@ -2,10 +2,23 @@
# TODO: need to figure out a plan for setting hostname, currently the default
# is localhost, so no hostname value (or public_hostname) value is getting
# assigned
+- hosts: localhost
+ gather_facts: no
+ tasks:
+ - include_vars: vars.yml
+ - include_vars: cluster_hosts.yml
+ - add_host:
+ name: "{{ item }}"
+ groups: l_oo_all_hosts
+ with_items: g_all_hosts
+
+- hosts: l_oo_all_hosts
+ gather_facts: no
+ tasks:
+ - include_vars: vars.yml
+ - include_vars: cluster_hosts.yml
+
- include: ../../common/openshift-cluster/config.yml
- vars_files:
- - ../../libvirt/openshift-cluster/vars.yml
- - ../../libvirt/openshift-cluster/cluster_hosts.yml
vars:
g_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
g_sudo: "{{ deployment_vars[deployment_type].become }}"
@@ -21,3 +34,4 @@
os_sdn_network_plugin_name: "{{ lookup('oo_option', 'sdn_network_plugin_name') }}"
openshift_use_flannel: "{{ lookup('oo_option', 'use_flannel') }}"
openshift_use_fluentd: "{{ lookup('oo_option', 'use_fluentd') }}"
+ openshift_use_dnsmasq: false
diff --git a/playbooks/libvirt/openshift-cluster/update.yml b/playbooks/libvirt/openshift-cluster/update.yml
index 9b7a2777a..28362c984 100644
--- a/playbooks/libvirt/openshift-cluster/update.yml
+++ b/playbooks/libvirt/openshift-cluster/update.yml
@@ -1,4 +1,20 @@
---
+- hosts: localhost
+ gather_facts: no
+ tasks:
+ - include_vars: vars.yml
+ - include_vars: cluster_hosts.yml
+ - add_host:
+ name: "{{ item }}"
+ groups: l_oo_all_hosts
+ with_items: g_all_hosts
+
+- hosts: l_oo_all_hosts
+ gather_facts: no
+ tasks:
+ - include_vars: vars.yml
+ - include_vars: cluster_hosts.yml
+
- name: Populate oo_hosts_to_update group
hosts: localhost
connection: local
@@ -14,7 +30,7 @@
groups: oo_hosts_to_update
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
ansible_become: "{{ deployment_vars[deployment_type].become }}"
- with_items: "{{ g_all_hosts | default([]) }}"
+ with_items: g_all_hosts | default([])
- include: ../../common/openshift-cluster/update_repos_and_packages.yml
diff --git a/playbooks/openstack/openshift-cluster/config.yml b/playbooks/openstack/openshift-cluster/config.yml
index 9c0ca9af9..6e4f414d6 100644
--- a/playbooks/openstack/openshift-cluster/config.yml
+++ b/playbooks/openstack/openshift-cluster/config.yml
@@ -1,8 +1,21 @@
---
+- hosts: localhost
+ gather_facts: no
+ tasks:
+ - include_vars: vars.yml
+ - include_vars: cluster_hosts.yml
+ - add_host:
+ name: "{{ item }}"
+ groups: l_oo_all_hosts
+ with_items: g_all_hosts
+
+- hosts: l_oo_all_hosts
+ gather_facts: no
+ tasks:
+ - include_vars: vars.yml
+ - include_vars: cluster_hosts.yml
+
- include: ../../common/openshift-cluster/config.yml
- vars_files:
- - ../../openstack/openshift-cluster/vars.yml
- - ../../openstack/openshift-cluster/cluster_hosts.yml
vars:
g_nodeonmaster: true
g_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
diff --git a/playbooks/openstack/openshift-cluster/files/heat_stack.yaml b/playbooks/openstack/openshift-cluster/files/heat_stack.yaml
index 2f05c3adc..1d54a9c39 100644
--- a/playbooks/openstack/openshift-cluster/files/heat_stack.yaml
+++ b/playbooks/openstack/openshift-cluster/files/heat_stack.yaml
@@ -591,11 +591,17 @@ resources:
type: OS::Heat::MultipartMime
properties:
parts:
- - config: { get_file: user-data }
- config:
str_replace:
template: |
#cloud-config
+ disable_root: true
+
+ system_info:
+ default_user:
+ name: openshift
+ sudo: ["ALL=(ALL) NOPASSWD: ALL"]
+
write_files:
- path: /etc/sudoers.d/00-openshift-no-requiretty
permissions: 440
diff --git a/playbooks/openstack/openshift-cluster/launch.yml b/playbooks/openstack/openshift-cluster/launch.yml
index 3d4fe42d0..6429a6755 100644
--- a/playbooks/openstack/openshift-cluster/launch.yml
+++ b/playbooks/openstack/openshift-cluster/launch.yml
@@ -46,7 +46,7 @@
-P master_flavor={{ openstack_flavor["master"] }}
-P node_flavor={{ openstack_flavor["node"] }}
-P infra_flavor={{ openstack_flavor["infra"] }}
- -P dns_flavor=m1.small
+ -P dns_flavor={{ openshift_flavor["dns"] }}
openshift-ansible-{{ cluster_id }}-stack'
- name: Wait for OpenStack Stack readiness
diff --git a/playbooks/openstack/openshift-cluster/update.yml b/playbooks/openstack/openshift-cluster/update.yml
index 539af6524..6d4d23963 100644
--- a/playbooks/openstack/openshift-cluster/update.yml
+++ b/playbooks/openstack/openshift-cluster/update.yml
@@ -1,4 +1,20 @@
---
+- hosts: localhost
+ gather_facts: no
+ tasks:
+ - include_vars: vars.yml
+ - include_vars: cluster_hosts.yml
+ - add_host:
+ name: "{{ item }}"
+ groups: l_oo_all_hosts
+ with_items: g_all_hosts
+
+- hosts: l_oo_all_hosts
+ gather_facts: no
+ tasks:
+ - include_vars: vars.yml
+ - include_vars: cluster_hosts.yml
+
- include: dns.yml
- name: Populate oo_hosts_to_update group
@@ -6,9 +22,6 @@
connection: local
become: no
gather_facts: no
- vars_files:
- - vars.yml
- - cluster_hosts.yml
tasks:
- name: Evaluate oo_hosts_to_update
add_host:
@@ -16,7 +29,7 @@
groups: oo_hosts_to_update
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
ansible_become: "{{ deployment_vars[deployment_type].become }}"
- with_items: "{{ g_all_hosts | default([]) }}"
+ with_items: g_all_hosts | default([])
- include: ../../common/openshift-cluster/update_repos_and_packages.yml
diff --git a/playbooks/openstack/openshift-cluster/vars.yml b/playbooks/openstack/openshift-cluster/vars.yml
index 84cba0506..bc53a51b0 100644
--- a/playbooks/openstack/openshift-cluster/vars.yml
+++ b/playbooks/openstack/openshift-cluster/vars.yml
@@ -13,6 +13,7 @@ openstack_ssh_public_key: "{{ lookup('file', lookup('oo_option', 'public_k
openstack_ssh_access_from: "{{ lookup('oo_option', 'ssh_from') |
default('0.0.0.0/0', True) }}"
openstack_flavor:
+ dns: "{{ lookup('oo_option', 'dns_flavor' ) | default('m1.small', True) }}"
etcd: "{{ lookup('oo_option', 'etcd_flavor' ) | default('m1.small', True) }}"
master: "{{ lookup('oo_option', 'master_flavor' ) | default('m1.small', True) }}"
infra: "{{ lookup('oo_option', 'infra_flavor' ) | default('m1.small', True) }}"
diff --git a/roles/haproxy/tasks/main.yml b/roles/haproxy/tasks/main.yml
deleted file mode 100644
index 837fa67db..000000000
--- a/roles/haproxy/tasks/main.yml
+++ /dev/null
@@ -1,43 +0,0 @@
----
-- name: Install haproxy
- action: "{{ ansible_pkg_mgr }} name=haproxy state=present"
- when: not openshift.common.is_containerized | bool
-
-- name: Configure systemd service directory for haproxy
- file:
- path: /etc/systemd/system/haproxy.service.d
- state: directory
- when: haproxy_limit_nofile is defined
-
-- name: Configure the nofile limits for haproxy
- ini_file:
- dest: /etc/systemd/system/haproxy.service.d/limits.conf
- section: Service
- option: LimitNOFILE
- value: "{{ haproxy_limit_nofile }}"
- when: haproxy_limit_nofile is defined
- notify: restart haproxy
- register: nofile_limit_result
-
-- name: Reload systemd if needed
- command: systemctl daemon-reload
- when: nofile_limit_result | changed
-
-- name: Configure haproxy
- template:
- src: haproxy.cfg.j2
- dest: /etc/haproxy/haproxy.cfg
- owner: root
- group: root
- mode: 0644
- notify: restart haproxy
-
-- name: Enable and start haproxy
- service:
- name: haproxy
- state: started
- enabled: yes
- register: start_result
-
-- set_fact:
- haproxy_start_result_changed: "{{ start_result | changed }}"
diff --git a/roles/openshift_ansible_inventory/README.md b/roles/openshift_ansible_inventory/README.md
deleted file mode 100644
index b62287c12..000000000
--- a/roles/openshift_ansible_inventory/README.md
+++ /dev/null
@@ -1,41 +0,0 @@
-OpenShift Ansible Inventory
-=========
-
-Install and configure openshift-ansible-inventory.
-
-Requirements
-------------
-
-None
-
-Role Variables
---------------
-
-oo_inventory_group
-oo_inventory_user
-oo_inventory_accounts
-oo_inventory_cache_max_age
-
-Dependencies
-------------
-
-None
-
-Example Playbook
-----------------
-
-Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too:
-
- - hosts: servers
- roles:
- - { role: username.rolename, x: 42 }
-
-License
--------
-
-ASL 2.0
-
-Author Information
-------------------
-
-OpenShift operations, Red Hat, Inc
diff --git a/roles/openshift_ansible_inventory/defaults/main.yml b/roles/openshift_ansible_inventory/defaults/main.yml
deleted file mode 100644
index f53c00c80..000000000
--- a/roles/openshift_ansible_inventory/defaults/main.yml
+++ /dev/null
@@ -1,4 +0,0 @@
----
-oo_inventory_group: root
-oo_inventory_owner: root
-oo_inventory_cache_max_age: 1800
diff --git a/roles/openshift_ansible_inventory/handlers/main.yml b/roles/openshift_ansible_inventory/handlers/main.yml
deleted file mode 100644
index e2db43477..000000000
--- a/roles/openshift_ansible_inventory/handlers/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# handlers file for openshift_ansible_inventory
diff --git a/roles/openshift_ansible_inventory/meta/main.yml b/roles/openshift_ansible_inventory/meta/main.yml
deleted file mode 100644
index 7f7387e80..000000000
--- a/roles/openshift_ansible_inventory/meta/main.yml
+++ /dev/null
@@ -1,8 +0,0 @@
----
-galaxy_info:
- author: OpenShift
- description: Install and configure openshift-ansible-inventory
- company: Red Hat, Inc
- license: ASL 2.0
- min_ansible_version: 1.2
-dependencies: []
diff --git a/roles/openshift_ansible_inventory/tasks/main.yml b/roles/openshift_ansible_inventory/tasks/main.yml
deleted file mode 100644
index 05c7a5f93..000000000
--- a/roles/openshift_ansible_inventory/tasks/main.yml
+++ /dev/null
@@ -1,47 +0,0 @@
----
-- action: "{{ ansible_pkg_mgr }} name={{ item}} state=present"
- with_items:
- - openshift-ansible-inventory
- - openshift-ansible-inventory-aws
- - openshift-ansible-inventory-gce
- when: not openshift.common.is_containerized | bool
-
-- name:
- copy:
- content: "{{ oo_inventory_accounts | to_nice_yaml }}"
- dest: /etc/ansible/multi_inventory.yaml
- group: "{{ oo_inventory_group }}"
- owner: "{{ oo_inventory_owner }}"
- mode: "0640"
-
-- file:
- state: directory
- dest: /etc/ansible/inventory
- owner: root
- group: libra_ops
- mode: 0750
-
-- file:
- state: link
- src: /usr/share/ansible/inventory/multi_inventory.py
- dest: /etc/ansible/inventory/multi_inventory.py
- owner: root
- group: libra_ops
-
-# This cron uses the above location to call its job
-- name: Cron to keep cache fresh
- cron:
- name: 'multi_inventory'
- minute: '*/10'
- job: '/usr/share/ansible/inventory/multi_inventory.py --refresh-cache &> /dev/null'
- when: oo_cron_refresh_cache is defined and oo_cron_refresh_cache
-
-- name: Set cache location
- file:
- state: directory
- dest: "{{ oo_inventory_cache_location | dirname }}"
- owner: root
- group: libra_ops
- recurse: yes
- mode: '2770'
- when: oo_inventory_cache_location is defined
diff --git a/roles/openshift_ansible_inventory/vars/main.yml b/roles/openshift_ansible_inventory/vars/main.yml
deleted file mode 100644
index 25c049282..000000000
--- a/roles/openshift_ansible_inventory/vars/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# vars file for openshift_ansible_inventory
diff --git a/roles/openshift_ca/README.md b/roles/openshift_ca/README.md
new file mode 100644
index 000000000..96c9cd5f2
--- /dev/null
+++ b/roles/openshift_ca/README.md
@@ -0,0 +1,48 @@
+OpenShift CA
+============
+
+This role delegates all tasks to the `openshift_ca_host` such that this role can be depended on by other OpenShift certificate roles.
+
+Requirements
+------------
+
+Role Variables
+--------------
+
+From this role:
+
+| Name | Default value | Description |
+|-------------------------|-----------------------------------------------|-----------------------------------------------------------------------------|
+| openshift_ca_host | None (Required) | The hostname of the system where the OpenShift CA will be created. |
+| openshift_ca_config_dir | `{{ openshift.common.config_base }}/master` | CA certificate directory. |
+| openshift_ca_cert | `{{ openshift_ca_config_dir }}/ca.crt` | CA certificate path including CA certificate filename. |
+| openshift_ca_key | `{{ openshift_ca_config_dir }}/ca.key` | CA key path including CA key filename. |
+| openshift_ca_serial | `{{ openshift_ca_config_dir }}/ca.serial.txt` | CA serial path including CA serial filename. |
+| openshift_version | `{{ openshift_pkg_version }}` | OpenShift package version. |
+
+Dependencies
+------------
+
+* openshift_repos
+* openshift_cli
+
+Example Playbook
+----------------
+
+```
+- name: Create OpenShift CA
+ hosts: localhost
+ roles:
+ - role: openshift_ca
+ openshift_ca_host: master1.example.com
+```
+
+License
+-------
+
+Apache License Version 2.0
+
+Author Information
+------------------
+
+Jason DeTiberus (jdetiber@redhat.com)
diff --git a/roles/openshift_master_ca/meta/main.yml b/roles/openshift_ca/meta/main.yml
index b5dd466c9..0089f4209 100644
--- a/roles/openshift_master_ca/meta/main.yml
+++ b/roles/openshift_ca/meta/main.yml
@@ -1,10 +1,10 @@
---
galaxy_info:
author: Jason DeTiberus
- description:
+ description: OpenShift CA
company: Red Hat, Inc.
license: Apache License, Version 2.0
- min_ansible_version: 1.8
+ min_ansible_version: 1.9.4
platforms:
- name: EL
versions:
@@ -13,5 +13,5 @@ galaxy_info:
- cloud
- system
dependencies:
-- { role: openshift_repos }
-- { role: openshift_cli }
+- role: openshift_repos
+- role: openshift_cli
diff --git a/roles/openshift_ca/tasks/main.yml b/roles/openshift_ca/tasks/main.yml
new file mode 100644
index 000000000..497473f22
--- /dev/null
+++ b/roles/openshift_ca/tasks/main.yml
@@ -0,0 +1,56 @@
+---
+- fail:
+ msg: "openshift_ca_host variable must be defined for this role"
+ when: openshift_ca_host is not defined
+
+- name: Install the base package for admin tooling
+ action: >
+ {{ ansible_pkg_mgr }}
+ name={{ openshift.common.service_type }}{{ openshift_version }}
+ state=present
+ when: not openshift.common.is_containerized | bool
+ register: install_result
+ delegate_to: "{{ openshift_ca_host }}"
+ run_once: true
+
+- name: Reload generated facts
+ openshift_facts:
+ when: install_result | changed
+ delegate_to: "{{ openshift_ca_host }}"
+ run_once: true
+
+- name: Create openshift_ca_config_dir if it does not exist
+ file:
+ path: "{{ openshift_ca_config_dir }}"
+ state: directory
+ delegate_to: "{{ openshift_ca_host }}"
+ run_once: true
+
+- name: Determine if CA must be created
+ stat:
+ path: "{{ openshift_ca_config_dir }}/{{ item }}"
+ register: g_master_ca_stat_result
+ with_items:
+ - ca.crt
+ - ca.key
+ delegate_to: "{{ openshift_ca_host }}"
+ run_once: true
+
+- set_fact:
+ master_ca_missing: "{{ False in (g_master_ca_stat_result.results
+ | oo_collect(attribute='stat.exists')
+ | list) }}"
+ delegate_to: "{{ openshift_ca_host }}"
+ run_once: true
+
+- name: Create the master certificates if they do not already exist
+ command: >
+ {{ openshift.common.admin_binary }} create-master-certs
+ --hostnames={{ openshift_master_hostnames | join(',') }}
+ --master={{ openshift.master.api_url }}
+ --public-master={{ openshift.master.public_api_url }}
+ --cert-dir={{ openshift_ca_config_dir }}
+ --overwrite=false
+ when: hostvars[openshift_ca_host].master_ca_missing | bool
+ delegate_to: "{{ openshift_ca_host }}"
+ run_once: true
diff --git a/roles/openshift_ca/vars/main.yml b/roles/openshift_ca/vars/main.yml
new file mode 100644
index 000000000..a32e385ec
--- /dev/null
+++ b/roles/openshift_ca/vars/main.yml
@@ -0,0 +1,6 @@
+---
+openshift_ca_config_dir: "{{ openshift.common.config_base }}/master"
+openshift_ca_cert: "{{ openshift_ca_config_dir }}/ca.crt"
+openshift_ca_key: "{{ openshift_ca_config_dir }}/ca.key"
+openshift_ca_serial: "{{ openshift_ca_config_dir }}/ca.serial.txt"
+openshift_version: "{{ openshift_pkg_version | default('') }}"
diff --git a/roles/openshift_clock/meta/main.yml b/roles/openshift_clock/meta/main.yml
new file mode 100644
index 000000000..3e175beb0
--- /dev/null
+++ b/roles/openshift_clock/meta/main.yml
@@ -0,0 +1,15 @@
+---
+galaxy_info:
+ author: Jeremiah Stuever
+ description: OpenShift Clock
+ company: Red Hat, Inc.
+ license: Apache License, Version 2.0
+ min_ansible_version: 1.9
+ platforms:
+ - name: EL
+ versions:
+ - 7
+ categories:
+ - cloud
+dependencies:
+- { role: openshift_facts }
diff --git a/roles/openshift_clock/tasks/main.yaml b/roles/openshift_clock/tasks/main.yaml
new file mode 100644
index 000000000..5a8403f68
--- /dev/null
+++ b/roles/openshift_clock/tasks/main.yaml
@@ -0,0 +1,14 @@
+---
+- name: Set clock facts
+ openshift_facts:
+ role: clock
+ local_facts:
+ enabled: "{{ openshift_clock_enabled | default(None) }}"
+
+- name: Install ntp package
+ action: "{{ ansible_pkg_mgr }} name=ntp state=present"
+ when: openshift.clock.enabled | bool and not openshift.clock.chrony_installed | bool
+
+- name: Start and enable ntpd/chronyd
+ shell: timedatectl set-ntp true
+ when: openshift.clock.enabled | bool
diff --git a/roles/openshift_etcd/meta/main.yml b/roles/openshift_etcd/meta/main.yml
index 7cc548f69..de36b201b 100644
--- a/roles/openshift_etcd/meta/main.yml
+++ b/roles/openshift_etcd/meta/main.yml
@@ -13,6 +13,7 @@ galaxy_info:
- cloud
dependencies:
- role: openshift_etcd_facts
+- role: openshift_clock
- role: openshift_docker
when: openshift.common.is_containerized | bool
- role: etcd
diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py
index eee4d2b8e..4e7785cd7 100755
--- a/roles/openshift_facts/library/openshift_facts.py
+++ b/roles/openshift_facts/library/openshift_facts.py
@@ -1549,11 +1549,13 @@ class OpenShiftFacts(object):
OpenShiftFactsUnsupportedRoleError:
"""
known_roles = ['builddefaults',
+ 'clock',
'cloudprovider',
'common',
'docker',
'etcd',
'hosted',
+ 'loadbalancer',
'master',
'node']
@@ -1719,6 +1721,16 @@ class OpenShiftFacts(object):
docker['version'] = version_info['version']
defaults['docker'] = docker
+ if 'clock' in roles:
+ exit_code, _, _ = module.run_command(['rpm', '-q', 'chrony'])
+ if exit_code == 0:
+ chrony_installed = True
+ else:
+ chrony_installed = False
+ defaults['clock'] = dict(
+ enabled=True,
+ chrony_installed=chrony_installed)
+
if 'cloudprovider' in roles:
defaults['cloudprovider'] = dict(kind=None)
@@ -1763,6 +1775,13 @@ class OpenShiftFacts(object):
router=dict()
)
+ if 'loadbalancer' in roles:
+ loadbalancer = dict(frontend_port='8443',
+ default_maxconn='20000',
+ global_maxconn='20000',
+ limit_nofile='100000')
+ defaults['loadbalancer'] = loadbalancer
+
return defaults
def guess_host_provider(self):
diff --git a/roles/haproxy/README.md b/roles/openshift_loadbalancer/README.md
index 5bc415066..81fc282be 100644
--- a/roles/haproxy/README.md
+++ b/roles/openshift_loadbalancer/README.md
@@ -1,5 +1,5 @@
-HAProxy
-=======
+OpenShift HAProxy Loadbalancer
+==============================
TODO
diff --git a/roles/haproxy/defaults/main.yml b/roles/openshift_loadbalancer/defaults/main.yml
index a1524cfe1..a1524cfe1 100644
--- a/roles/haproxy/defaults/main.yml
+++ b/roles/openshift_loadbalancer/defaults/main.yml
diff --git a/roles/haproxy/handlers/main.yml b/roles/openshift_loadbalancer/handlers/main.yml
index 5b8691b26..5b8691b26 100644
--- a/roles/haproxy/handlers/main.yml
+++ b/roles/openshift_loadbalancer/handlers/main.yml
diff --git a/roles/haproxy/meta/main.yml b/roles/openshift_loadbalancer/meta/main.yml
index 0fad106a9..fe336acf7 100644
--- a/roles/haproxy/meta/main.yml
+++ b/roles/openshift_loadbalancer/meta/main.yml
@@ -1,7 +1,7 @@
---
galaxy_info:
author: Jason DeTiberus
- description: HAProxy
+ description: OpenShift haproxy loadbalancer
company: Red Hat, Inc.
license: Apache License, Version 2.0
min_ansible_version: 1.9
@@ -10,5 +10,6 @@ galaxy_info:
versions:
- 7
dependencies:
-- { role: os_firewall }
-- { role: openshift_repos }
+- role: openshift_facts
+- role: os_firewall
+- role: openshift_repos
diff --git a/roles/openshift_loadbalancer/tasks/main.yml b/roles/openshift_loadbalancer/tasks/main.yml
new file mode 100644
index 000000000..5514aa70b
--- /dev/null
+++ b/roles/openshift_loadbalancer/tasks/main.yml
@@ -0,0 +1,73 @@
+---
+- name: Set haproxy frontend port
+ openshift_facts:
+ role: loadbalancer
+ local_facts:
+ frontend_port: "{{ openshift_master_api_port | default(None) }}"
+
+- name: Set loadbalancer facts
+ openshift_facts:
+ role: loadbalancer
+ local_facts:
+ limit_nofile: "{{ openshift_loadbalancer_limit_nofile | default(None) }}"
+ default_maxconn: "{{ openshift_loadbalancer_default_maxconn | default(None) }}"
+ global_maxconn: "{{ openshift_loadbalancer_global_maxconn | default(None) }}"
+ frontends:
+ - name: atomic-openshift-api
+ mode: tcp
+ options:
+ - tcplog
+ binds:
+ - "*:{{ openshift.loadbalancer.frontend_port }}"
+ default_backend: atomic-openshift-api
+ backends:
+ - name: atomic-openshift-api
+ mode: tcp
+ option: tcplog
+ balance: source
+ servers: "{{ hostvars
+ | oo_select_keys(groups['oo_masters'])
+ | oo_haproxy_backend_masters(openshift.loadbalancer.frontend_port) }}"
+
+- name: Install haproxy
+ action: "{{ ansible_pkg_mgr }} name=haproxy state=present"
+ when: not openshift.common.is_containerized | bool
+
+- name: Configure systemd service directory for haproxy
+ file:
+ path: /etc/systemd/system/haproxy.service.d
+ state: directory
+ when: "'limit_nofile' in openshift.loadbalancer"
+
+- name: Configure the nofile limits for haproxy
+ ini_file:
+ dest: /etc/systemd/system/haproxy.service.d/limits.conf
+ section: Service
+ option: LimitNOFILE
+ value: "{{ openshift.loadbalancer.limit_nofile }}"
+ when: "'limit_nofile' in openshift.loadbalancer"
+ notify: restart haproxy
+ register: nofile_limit_result
+
+- name: Reload systemd if needed
+ command: systemctl daemon-reload
+ when: nofile_limit_result | changed
+
+- name: Configure haproxy
+ template:
+ src: haproxy.cfg.j2
+ dest: /etc/haproxy/haproxy.cfg
+ owner: root
+ group: root
+ mode: 0644
+ notify: restart haproxy
+
+- name: Enable and start haproxy
+ service:
+ name: haproxy
+ state: started
+ enabled: yes
+ register: start_result
+
+- set_fact:
+ haproxy_start_result_changed: "{{ start_result | changed }}"
diff --git a/roles/haproxy/templates/haproxy.cfg.j2 b/roles/openshift_loadbalancer/templates/haproxy.cfg.j2
index cb4380971..05e360d3b 100644
--- a/roles/haproxy/templates/haproxy.cfg.j2
+++ b/roles/openshift_loadbalancer/templates/haproxy.cfg.j2
@@ -3,7 +3,7 @@
global
chroot /var/lib/haproxy
pidfile /var/run/haproxy.pid
- maxconn {{ haproxy_global_maxconn | default('4000') }}
+ maxconn {{ openshift.loadbalancer.global_maxconn }}
user haproxy
group haproxy
daemon
@@ -31,14 +31,14 @@ defaults
timeout server 300s
timeout http-keep-alive 10s
timeout check 10s
- maxconn {{ haproxy_default_maxconn | default('3000') }}
+ maxconn {{ openshift.loadbalancer.default_maxconn }}
listen stats :9000
mode http
stats enable
stats uri /
-{% for frontend in haproxy_frontends %}
+{% for frontend in openshift.loadbalancer.frontends %}
frontend {{ frontend.name }}
{% for bind in frontend.binds %}
bind {{ bind }}
@@ -59,7 +59,7 @@ frontend {{ frontend.name }}
{% endif %}
{% endfor %}
-{% for backend in haproxy_backends %}
+{% for backend in openshift.loadbalancer.backends %}
backend {{ backend.name }}
balance {{ backend.balance }}
{% if 'mode' in backend %}
diff --git a/roles/openshift_master/meta/main.yml b/roles/openshift_master/meta/main.yml
index d8834d27f..f6b926d74 100644
--- a/roles/openshift_master/meta/main.yml
+++ b/roles/openshift_master/meta/main.yml
@@ -12,8 +12,10 @@ galaxy_info:
categories:
- cloud
dependencies:
+- role: openshift_clock
- role: openshift_docker
- role: openshift_cli
+- role: openshift_master_certificates
- role: openshift_cloud_provider
- role: openshift_builddefaults
- role: openshift_master_facts
diff --git a/roles/openshift_master_ca/README.md b/roles/openshift_master_ca/README.md
deleted file mode 100644
index 5b2d3601b..000000000
--- a/roles/openshift_master_ca/README.md
+++ /dev/null
@@ -1,34 +0,0 @@
-OpenShift Master CA
-========================
-
-TODO
-
-Requirements
-------------
-
-TODO
-
-Role Variables
---------------
-
-TODO
-
-Dependencies
-------------
-
-TODO
-
-Example Playbook
-----------------
-
-TODO
-
-License
--------
-
-Apache License Version 2.0
-
-Author Information
-------------------
-
-Jason DeTiberus (jdetiber@redhat.com)
diff --git a/roles/openshift_master_certificates/README.md b/roles/openshift_master_certificates/README.md
index ba3d5f28c..a80d47040 100644
--- a/roles/openshift_master_certificates/README.md
+++ b/roles/openshift_master_certificates/README.md
@@ -1,27 +1,44 @@
OpenShift Master Certificates
========================
-TODO
+This role determines if OpenShift master certificates must be created, delegates certificate creation to the `openshift_ca_host` and then deploys those certificates to master hosts which this role is being applied to. If this role is applied to the `openshift_ca_host`, certificate deployment will be skipped.
Requirements
------------
-TODO
-
Role Variables
--------------
-TODO
+From `openshift_ca`:
+
+| Name | Default value | Description |
+|---------------------------------------|---------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------|
+| openshift_ca_host | None (Required) | The hostname of the system where the OpenShift CA will be (or has been) created. |
+
+From this role:
+
+| Name | Default value | Description |
+|---------------------------------------|---------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------|
+| openshift_generated_configs_dir | `{{ openshift.common.config_base }}/generated-configs` | Directory in which per-master generated config directories will be created on the `openshift_ca_host`. |
+| openshift_master_cert_subdir | `master-{{ openshift.common.hostname }}` | Directory within `openshift_generated_configs_dir` where per-master configurations will be placed on the `openshift_ca_host`. |
+| openshift_master_config_dir | `{{ openshift.common.config_base }}/master` | Master configuration directory in which certificates will be deployed on masters. |
+| openshift_master_generated_config_dir | `{{ openshift_generated_configs_dir }}/{{ openshift_master_cert_subdir }` | Full path to the per-master generated config directory. |
Dependencies
------------
-TODO
+* openshift_ca
Example Playbook
----------------
-TODO
+```
+- name: Create OpenShift Master Certificates
+ hosts: masters
+ roles:
+ - role: openshift_master_certificates
+ openshift_ca_host: master1.example.com
+```
License
-------
diff --git a/roles/openshift_master_certificates/meta/main.yml b/roles/openshift_master_certificates/meta/main.yml
index fd7b73b0f..90fc0fb10 100644
--- a/roles/openshift_master_certificates/meta/main.yml
+++ b/roles/openshift_master_certificates/meta/main.yml
@@ -1,10 +1,10 @@
---
galaxy_info:
author: Jason DeTiberus
- description:
+ description: OpenShift Master Certificates
company: Red Hat, Inc.
license: Apache License, Version 2.0
- min_ansible_version: 1.8
+ min_ansible_version: 1.9.4
platforms:
- name: EL
versions:
@@ -13,4 +13,4 @@ galaxy_info:
- cloud
- system
dependencies:
-- { role: openshift_master_ca }
+- role: openshift_ca
diff --git a/roles/openshift_master_certificates/tasks/main.yml b/roles/openshift_master_certificates/tasks/main.yml
index 394f9d381..dd105652b 100644
--- a/roles/openshift_master_certificates/tasks/main.yml
+++ b/roles/openshift_master_certificates/tasks/main.yml
@@ -1,38 +1,121 @@
---
+- set_fact:
+ openshift_master_certs_no_etcd:
+ - admin.crt
+ - master.kubelet-client.crt
+ - "{{ 'master.proxy-client.crt' if openshift.common.version_gte_3_1_or_1_1 else omit }}"
+ - master.server.crt
+ - openshift-master.crt
+ - openshift-registry.crt
+ - openshift-router.crt
+ - etcd.server.crt
+ openshift_master_certs_etcd:
+ - master.etcd-client.crt
+
+- set_fact:
+ openshift_master_certs: "{{ (openshift_master_certs_no_etcd | union(openshift_master_certs_etcd )) if openshift_master_etcd_hosts | length > 0 else openshift_master_certs_no_etcd }}"
+
+- name: Check status of master certificates
+ stat:
+ path: "{{ openshift_master_config_dir }}/{{ item }}"
+ with_items:
+ - "{{ openshift_master_certs }}"
+ register: g_master_cert_stat_result
+
+- set_fact:
+ master_certs_missing: "{{ False in (g_master_cert_stat_result.results
+ | oo_collect(attribute='stat.exists')
+ | list) }}"
+
- name: Ensure the generated_configs directory present
file:
- path: "{{ openshift_generated_configs_dir }}/{{ item.master_cert_subdir }}"
+ path: "{{ openshift_master_generated_config_dir }}"
state: directory
mode: 0700
- with_items: "{{ masters_needing_certs | default([]) }}"
+ when: master_certs_missing | bool
+ delegate_to: "{{ openshift_ca_host }}"
- file:
- src: "{{ openshift_master_config_dir }}/{{ item.1 }}"
- dest: "{{ openshift_generated_configs_dir }}/{{ item.0.master_cert_subdir }}/{{ item.1 }}"
+ src: "{{ openshift_master_config_dir }}/{{ item }}"
+ dest: "{{ openshift_master_generated_config_dir }}/{{ item }}"
state: hard
- with_nested:
- - "{{ masters_needing_certs | default([]) }}"
- -
- - ca.crt
- - ca.key
- - ca.serial.txt
+ with_items:
+ - ca.crt
+ - ca.key
+ - ca.serial.txt
+ when: master_certs_missing | bool
+ delegate_to: "{{ openshift_ca_host }}"
- name: Create the master certificates if they do not already exist
command: >
{{ openshift.common.admin_binary }} create-master-certs
- --hostnames={{ item.openshift.common.all_hostnames | join(',') }}
- --master={{ item.openshift.master.api_url }}
- --public-master={{ item.openshift.master.public_api_url }}
- --cert-dir={{ openshift_generated_configs_dir }}/{{ item.master_cert_subdir }}
+ --hostnames={{ openshift.common.all_hostnames | join(',') }}
+ --master={{ openshift.master.api_url }}
+ --public-master={{ openshift.master.public_api_url }}
+ --cert-dir={{ openshift_master_generated_config_dir }}
--overwrite=false
- when: item.master_certs_missing | bool
- with_items: "{{ masters_needing_certs | default([]) }}"
+ when: master_certs_missing | bool
+ delegate_to: "{{ openshift_ca_host }}"
- file:
- src: "{{ openshift_master_config_dir }}/{{ item.1 }}"
- dest: "{{ openshift_generated_configs_dir }}/{{ item.0.master_cert_subdir }}/{{ item.1 }}"
+ src: "{{ openshift_master_config_dir }}/{{ item }}"
+ dest: "{{ openshift_master_generated_config_dir }}/{{ item }}"
state: hard
force: true
- with_nested:
- - "{{ masters_needing_certs | default([]) }}"
+ with_items:
- "{{ hostvars[inventory_hostname] | certificates_to_synchronize }}"
+ when: master_certs_missing | bool
+ delegate_to: "{{ openshift_ca_host }}"
+
+- name: Remove generated etcd client certs when using external etcd
+ file:
+ path: "{{ openshift_master_generated_config_dir }}/{{ item }}"
+ state: absent
+ when: openshift_master_etcd_hosts | length > 0
+ with_items:
+ - master.etcd-client.crt
+ - master.etcd-client.key
+ delegate_to: "{{ openshift_ca_host }}"
+
+- name: Create local temp directory for syncing certs
+ local_action: command mktemp -d /tmp/openshift-ansible-XXXXXXX
+ register: g_master_mktemp
+ changed_when: False
+ when: master_certs_missing | bool
+ delegate_to: localhost
+
+- name: Create a tarball of the master certs
+ command: >
+ tar -czvf {{ openshift_master_generated_config_dir }}.tgz
+ -C {{ openshift_master_generated_config_dir }} .
+ args:
+ creates: "{{ openshift_master_generated_config_dir }}.tgz"
+ when: master_certs_missing | bool and inventory_hostname != openshift_ca_host
+ delegate_to: "{{ openshift_ca_host }}"
+
+- name: Retrieve the master cert tarball from the master
+ fetch:
+ src: "{{ openshift_master_generated_config_dir }}.tgz"
+ dest: "{{ g_master_mktemp.stdout }}/"
+ flat: yes
+ fail_on_missing: yes
+ validate_checksum: yes
+ when: master_certs_missing | bool and inventory_hostname != openshift_ca_host
+ delegate_to: "{{ openshift_ca_host }}"
+
+- name: Ensure certificate directory exists
+ file:
+ path: "{{ openshift_master_config_dir }}"
+ state: directory
+ when: master_certs_missing | bool and inventory_hostname != openshift_ca_host
+
+- name: Unarchive the tarball on the master
+ unarchive:
+ src: "{{ g_master_mktemp.stdout }}/{{ openshift_master_cert_subdir }}.tgz"
+ dest: "{{ openshift_master_config_dir }}"
+ when: master_certs_missing | bool and inventory_hostname != openshift_ca_host
+
+- file: name={{ g_master_mktemp.stdout }} state=absent
+ changed_when: False
+ when: master_certs_missing | bool
+ delegate_to: localhost
diff --git a/roles/openshift_master_certificates/vars/main.yml b/roles/openshift_master_certificates/vars/main.yml
index 3f18ddc79..66f2e5162 100644
--- a/roles/openshift_master_certificates/vars/main.yml
+++ b/roles/openshift_master_certificates/vars/main.yml
@@ -1,3 +1,5 @@
---
openshift_generated_configs_dir: "{{ openshift.common.config_base }}/generated-configs"
+openshift_master_cert_subdir: "master-{{ openshift.common.hostname }}"
openshift_master_config_dir: "{{ openshift.common.config_base }}/master"
+openshift_master_generated_config_dir: "{{ openshift_generated_configs_dir }}/{{ openshift_master_cert_subdir }}"
diff --git a/roles/openshift_node/meta/main.yml b/roles/openshift_node/meta/main.yml
index db1776632..ea52bbb99 100644
--- a/roles/openshift_node/meta/main.yml
+++ b/roles/openshift_node/meta/main.yml
@@ -12,10 +12,11 @@ galaxy_info:
categories:
- cloud
dependencies:
+- role: openshift_clock
- role: openshift_docker
+- role: openshift_node_certificates
- role: openshift_cloud_provider
- role: openshift_common
- role: openshift_node_dnsmasq
when: openshift.common.use_dnsmasq
- role: os_firewall
-
diff --git a/roles/openshift_node_certificates/README.md b/roles/openshift_node_certificates/README.md
index 6264d253a..f56066b29 100644
--- a/roles/openshift_node_certificates/README.md
+++ b/roles/openshift_node_certificates/README.md
@@ -1,27 +1,44 @@
-OpenShift/Atomic Enterprise Node Certificates
-=============================================
+OpenShift Node Certificates
+===========================
-TODO
+This role determines if OpenShift node certificates must be created, delegates certificate creation to the `openshift_ca_host` and then deploys those certificates to node hosts which this role is being applied to.
Requirements
------------
-TODO
-
Role Variables
--------------
-TODO
+From `openshift_ca`:
+
+| Name | Default value | Description |
+|-------------------------------------|-------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------|
+| openshift_ca_host | None (Required) | The hostname of the system where the OpenShift CA will be (or has been) created. |
+
+From this role:
+
+| Name | Default value | Description |
+|-------------------------------------|-------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------|
+| openshift_generated_configs_dir | `{{ openshift.common.config_base }}/generated-configs` | Directory in which per-node generated config directories will be created on the `openshift_ca_host`. |
+| openshift_node_cert_subdir | `node-{{ openshift.common.hostname }}` | Directory within `openshift_generated_configs_dir` where per-node certificates will be placed on the `openshift_ca_host`. |
+| openshift_node_config_dir | `{{ openshift.common.config_base }}/node` | Node configuration directory in which certificates will be deployed on nodes. |
+| openshift_node_generated_config_dir | `{{ openshift_generated_configs_dir }}/{{ openshift_node_cert_subdir }` | Full path to the per-node generated config directory. |
Dependencies
------------
-TODO
+* openshift_ca
Example Playbook
----------------
-TODO
+```
+- name: Create OpenShift Node Certificates
+ hosts: nodes
+ roles:
+ - role: openshift_node_certificates
+ openshift_ca_host: master1.example.com
+```
License
-------
diff --git a/roles/openshift_node_certificates/meta/main.yml b/roles/openshift_node_certificates/meta/main.yml
index f3236e850..3caa1cdf1 100644
--- a/roles/openshift_node_certificates/meta/main.yml
+++ b/roles/openshift_node_certificates/meta/main.yml
@@ -1,10 +1,10 @@
---
galaxy_info:
author: Jason DeTiberus
- description:
+ description: OpenShift Node Certificates
company: Red Hat, Inc.
license: Apache License, Version 2.0
- min_ansible_version: 1.8
+ min_ansible_version: 1.9.4
platforms:
- name: EL
versions:
@@ -13,4 +13,4 @@ galaxy_info:
- cloud
- system
dependencies:
-- { role: openshift_facts }
+- role: openshift_ca
diff --git a/roles/openshift_node_certificates/tasks/main.yml b/roles/openshift_node_certificates/tasks/main.yml
index 216c11093..147a432a4 100644
--- a/roles/openshift_node_certificates/tasks/main.yml
+++ b/roles/openshift_node_certificates/tasks/main.yml
@@ -1,36 +1,95 @@
---
-- name: Create openshift_generated_configs_dir if it doesn\'t exist
+- name: Check status of node certificates
+ stat:
+ path: "{{ openshift.common.config_base }}/node/{{ item }}"
+ with_items:
+ - "system:node:{{ openshift.common.hostname }}.crt"
+ - "system:node:{{ openshift.common.hostname }}.key"
+ - "system:node:{{ openshift.common.hostname }}.kubeconfig"
+ - ca.crt
+ - server.key
+ - server.crt
+ register: g_node_cert_stat_result
+
+- set_fact:
+ node_certs_missing: "{{ False in (g_node_cert_stat_result.results
+ | oo_collect(attribute='stat.exists')
+ | list) }}"
+
+- name: Create openshift_generated_configs_dir if it does not exist
file:
path: "{{ openshift_generated_configs_dir }}"
state: directory
mode: 0700
- when: nodes_needing_certs | length > 0
+ when: node_certs_missing | bool
+ delegate_to: "{{ openshift_ca_host }}"
- name: Generate the node client config
command: >
{{ openshift.common.admin_binary }} create-api-client-config
- --certificate-authority={{ openshift_master_ca_cert }}
- --client-dir={{ openshift_generated_configs_dir }}/node-{{ item.openshift.common.hostname }}
+ --certificate-authority={{ openshift_ca_cert }}
+ --client-dir={{ openshift_node_generated_config_dir }}
--groups=system:nodes
- --master={{ openshift.master.api_url }}
- --signer-cert={{ openshift_master_ca_cert }}
- --signer-key={{ openshift_master_ca_key }}
- --signer-serial={{ openshift_master_ca_serial }}
- --user=system:node:{{ item.openshift.common.hostname }}
+ --master={{ hostvars[openshift_ca_host].openshift.master.api_url }}
+ --signer-cert={{ openshift_ca_cert }}
+ --signer-key={{ openshift_ca_key }}
+ --signer-serial={{ openshift_ca_serial }}
+ --user=system:node:{{ openshift.common.hostname }}
args:
- creates: "{{ openshift_generated_configs_dir }}/node-{{ item.openshift.common.hostname }}"
- with_items: "{{ nodes_needing_certs | default([]) }}"
+ creates: "{{ openshift_node_generated_config_dir }}"
+ when: node_certs_missing | bool
+ delegate_to: "{{ openshift_ca_host }}"
- name: Generate the node server certificate
command: >
{{ openshift.common.admin_binary }} ca create-server-cert
- --cert={{ openshift_generated_configs_dir }}/node-{{ item.openshift.common.hostname }}/server.crt
- --key={{ openshift_generated_configs_dir }}/node-{{ item.openshift.common.hostname }}/server.key
+ --cert={{ openshift_node_generated_config_dir }}/server.crt
+ --key={{ openshift_generated_configs_dir }}/node-{{ openshift.common.hostname }}/server.key
--overwrite=true
- --hostnames={{ item.openshift.common.all_hostnames |join(",") }}
- --signer-cert={{ openshift_master_ca_cert }}
- --signer-key={{ openshift_master_ca_key }}
- --signer-serial={{ openshift_master_ca_serial }}
+ --hostnames={{ openshift.common.all_hostnames |join(",") }}
+ --signer-cert={{ openshift_ca_cert }}
+ --signer-key={{ openshift_ca_key }}
+ --signer-serial={{ openshift_ca_serial }}
+ args:
+ creates: "{{ openshift_node_generated_config_dir }}/server.crt"
+ when: node_certs_missing | bool
+ delegate_to: "{{ openshift_ca_host}}"
+
+- name: Create local temp directory for syncing certs
+ local_action: command mktemp -d /tmp/openshift-ansible-XXXXXXX
+ register: node_cert_mktemp
+ changed_when: False
+ when: node_certs_missing | bool
+ delegate_to: localhost
+
+- name: Create a tarball of the node config directories
+ command: >
+ tar -czvf {{ openshift_node_generated_config_dir }}.tgz
+ --transform 's|system:{{ openshift_node_cert_subdir }}|node|'
+ -C {{ openshift_node_generated_config_dir }} .
args:
- creates: "{{ openshift_generated_configs_dir }}/node-{{ item.openshift.common.hostname }}/server.crt"
- with_items: "{{ nodes_needing_certs | default([]) }}"
+ creates: "{{ openshift_node_generated_config_dir }}.tgz"
+ when: node_certs_missing | bool
+ delegate_to: "{{ openshift_ca_host }}"
+
+- name: Retrieve the node config tarballs from the master
+ fetch:
+ src: "{{ openshift_node_generated_config_dir }}.tgz"
+ dest: "{{ node_cert_mktemp.stdout }}/"
+ flat: yes
+ fail_on_missing: yes
+ validate_checksum: yes
+ when: node_certs_missing | bool
+ delegate_to: "{{ openshift_ca_host }}"
+
+- name: Ensure certificate directory exists
+ file:
+ path: "{{ openshift_node_cert_dir }}"
+ state: directory
+ when: node_certs_missing | bool
+
+- name: Unarchive the tarball on the node
+ unarchive:
+ src: "{{ node_cert_mktemp.stdout }}/{{ openshift_node_cert_subdir }}.tgz"
+ dest: "{{ openshift_node_cert_dir }}"
+ when: node_certs_missing | bool
diff --git a/roles/openshift_node_certificates/vars/main.yml b/roles/openshift_node_certificates/vars/main.yml
index 61fbb1e51..2fafc7387 100644
--- a/roles/openshift_node_certificates/vars/main.yml
+++ b/roles/openshift_node_certificates/vars/main.yml
@@ -1,7 +1,6 @@
---
-openshift_node_config_dir: "{{ openshift.common.config_base }}/node"
-openshift_master_config_dir: "{{ openshift.common.config_base }}/master"
openshift_generated_configs_dir: "{{ openshift.common.config_base }}/generated-configs"
-openshift_master_ca_cert: "{{ openshift_master_config_dir }}/ca.crt"
-openshift_master_ca_key: "{{ openshift_master_config_dir }}/ca.key"
-openshift_master_ca_serial: "{{ openshift_master_config_dir }}/ca.serial.txt"
+openshift_node_cert_dir: "{{ openshift.common.config_base }}/node"
+openshift_node_cert_subdir: "node-{{ openshift.common.hostname }}"
+openshift_node_config_dir: "{{ openshift.common.config_base }}/node"
+openshift_node_generated_config_dir: "{{ openshift_generated_configs_dir }}/{{ openshift_node_cert_subdir }}"
diff --git a/roles/openshift_node_dnsmasq/files/networkmanager/99-origin-dns.sh b/roles/openshift_node_dnsmasq/files/networkmanager/99-origin-dns.sh
index 0d7941e4c..51a43d113 100755
--- a/roles/openshift_node_dnsmasq/files/networkmanager/99-origin-dns.sh
+++ b/roles/openshift_node_dnsmasq/files/networkmanager/99-origin-dns.sh
@@ -51,6 +51,9 @@ EOF
done
systemctl restart dnsmasq
- sed -i 's/^nameserver.*$/nameserver '"${def_route_ip}"' # updated by \/etc\/NetworkManager\/dispatcher.d\/99-origin-dns.sh/g' /etc/resolv.conf
+ sed -i 's/^nameserver.*$/nameserver '"${def_route_ip}"'/g' /etc/resolv.conf
+ if ! grep -q '99-origin-dns.sh' /etc/resolv.conf; then
+ echo "# nameserver updated by /etc/NetworkManager/dispatcher.d/99-origin-dns.sh" >> /etc/resolv.conf
+ fi
fi
fi
diff --git a/test/env-setup b/test/env-setup
deleted file mode 100644
index 7456a641b..000000000
--- a/test/env-setup
+++ /dev/null
@@ -1,8 +0,0 @@
-#!/bin/bash
-
-CUR_PATH=$(pwd)
-
-PREFIX_PYTHONPATH=$CUR_PATH/inventory/:$CUR_PATH/roles/lib_yaml_editor/library
-
-
-export PYTHONPATH=$PREFIX_PYTHONPATH:$PYTHONPATH
diff --git a/test/units/README.md b/test/units/README.md
deleted file mode 100644
index 78a02c3ea..000000000
--- a/test/units/README.md
+++ /dev/null
@@ -1,7 +0,0 @@
-Location for python unittests.
-
-These should be run by sourcing the env-setup:
-$ source test/env-setup
-
-Then navigate to the test/units/ directory.
-$ python -m unittest multi_inventory_test
diff --git a/test/units/multi_inventory_test.py b/test/units/multi_inventory_test.py
deleted file mode 100755
index 168cd82b7..000000000
--- a/test/units/multi_inventory_test.py
+++ /dev/null
@@ -1,114 +0,0 @@
-#!/usr/bin/env python2
-'''
- Unit tests for MultiInventory
-'''
-
-import unittest
-import multi_inventory
-
-# Removing invalid variable names for tests so that I can
-# keep them brief
-# pylint: disable=invalid-name
-class MultiInventoryTest(unittest.TestCase):
- '''
- Test class for multiInventory
- '''
-
-# def setUp(self):
-# '''setup method'''
-# pass
-
- def test_merge_simple_1(self):
- '''Testing a simple merge of 2 dictionaries'''
- a = {"key1" : 1}
- b = {"key1" : 2}
- result = {}
- _ = [multi_inventory.MultiInventory.merge_destructively(result, x) for x in [a, b]]
- self.assertEqual(result, {"key1": [1, 2]})
-
- def test_merge_b_empty(self):
- '''Testing a merge of an emtpy dictionary'''
- a = {"key1" : 1}
- b = {}
- result = {}
- _ = [multi_inventory.MultiInventory.merge_destructively(result, x) for x in [a, b]]
- self.assertEqual(result, {"key1": 1})
-
- def test_merge_a_empty(self):
- '''Testing a merge of an emtpy dictionary'''
- b = {"key1" : 1}
- a = {}
- result = {}
- _ = [multi_inventory.MultiInventory.merge_destructively(result, x) for x in [a, b]]
- self.assertEqual(result, {"key1": 1})
-
- def test_merge_hash_array(self):
- '''Testing a merge of a dictionary and a dictionary with an array'''
- a = {"key1" : {"hasha": 1}}
- b = {"key1" : [1, 2]}
- result = {}
- _ = [multi_inventory.MultiInventory.merge_destructively(result, x) for x in [a, b]]
- self.assertEqual(result, {"key1": [{"hasha": 1}, 1, 2]})
-
- def test_merge_array_hash(self):
- '''Testing a merge of a dictionary with an array and a dictionary with a hash'''
- a = {"key1" : [1, 2]}
- b = {"key1" : {"hasha": 1}}
- result = {}
- _ = [multi_inventory.MultiInventory.merge_destructively(result, x) for x in [a, b]]
- self.assertEqual(result, {"key1": [1, 2, {"hasha": 1}]})
-
- def test_merge_keys_1(self):
- '''Testing a merge on a dictionary for keys'''
- a = {"key1" : [1, 2], "key2" : {"hasha": 2}}
- b = {"key2" : {"hashb": 1}}
- result = {}
- _ = [multi_inventory.MultiInventory.merge_destructively(result, x) for x in [a, b]]
- self.assertEqual(result, {"key1": [1, 2], "key2": {"hasha": 2, "hashb": 1}})
-
- def test_merge_recursive_1(self):
- '''Testing a recursive merge'''
- a = {"a" : {"b": {"c": 1}}}
- b = {"a" : {"b": {"c": 2}}}
- result = {}
- _ = [multi_inventory.MultiInventory.merge_destructively(result, x) for x in [a, b]]
- self.assertEqual(result, {"a": {"b": {"c": [1, 2]}}})
-
- def test_merge_recursive_array_item(self):
- '''Testing a recursive merge for an array'''
- a = {"a" : {"b": {"c": [1]}}}
- b = {"a" : {"b": {"c": 2}}}
- result = {}
- _ = [multi_inventory.MultiInventory.merge_destructively(result, x) for x in [a, b]]
- self.assertEqual(result, {"a": {"b": {"c": [1, 2]}}})
-
- def test_merge_recursive_hash_item(self):
- '''Testing a recursive merge for a hash'''
- a = {"a" : {"b": {"c": {"d": 1}}}}
- b = {"a" : {"b": {"c": 2}}}
- result = {}
- _ = [multi_inventory.MultiInventory.merge_destructively(result, x) for x in [a, b]]
- self.assertEqual(result, {"a": {"b": {"c": [{"d": 1}, 2]}}})
-
- def test_merge_recursive_array_hash(self):
- '''Testing a recursive merge for an array and a hash'''
- a = {"a" : [{"b": {"c": 1}}]}
- b = {"a" : {"b": {"c": 1}}}
- result = {}
- _ = [multi_inventory.MultiInventory.merge_destructively(result, x) for x in [a, b]]
- self.assertEqual(result, {"a": [{"b": {"c": 1}}]})
-
- def test_merge_recursive_hash_array(self):
- '''Testing a recursive merge for an array and a hash'''
- a = {"a" : {"b": {"c": 1}}}
- b = {"a" : [{"b": {"c": 1}}]}
- result = {}
- _ = [multi_inventory.MultiInventory.merge_destructively(result, x) for x in [a, b]]
- self.assertEqual(result, {"a": [{"b": {"c": 1}}]})
-
-# def tearDown(self):
-# '''TearDown method'''
-# pass
-
-if __name__ == "__main__":
- unittest.main()
diff --git a/test/units/yedit_test.py b/test/units/yedit_test.py
deleted file mode 100755
index 09a65e888..000000000
--- a/test/units/yedit_test.py
+++ /dev/null
@@ -1,143 +0,0 @@
-#!/usr/bin/env python2
-'''
- Unit tests for yedit
-'''
-
-import unittest
-import os
-
-# Removing invalid variable names for tests so that I can
-# keep them brief
-# pylint: disable=invalid-name,no-name-in-module
-from yedit import Yedit
-
-class YeditTest(unittest.TestCase):
- '''
- Test class for yedit
- '''
- data = {'a': 'a',
- 'b': {'c': {'d': [{'e': 'x'}, 'f', 'g']}},
- }
-
- filename = 'yedit_test.yml'
-
- def setUp(self):
- ''' setup method will create a file and set to known configuration '''
- yed = Yedit(YeditTest.filename)
- yed.yaml_dict = YeditTest.data
- yed.write()
-
- def test_load(self):
- ''' Testing a get '''
- yed = Yedit('yedit_test.yml')
- self.assertEqual(yed.yaml_dict, self.data)
-
- def test_write(self):
- ''' Testing a simple write '''
- yed = Yedit('yedit_test.yml')
- yed.put('key1', 1)
- yed.write()
- self.assertTrue(yed.yaml_dict.has_key('key1'))
- self.assertEqual(yed.yaml_dict['key1'], 1)
-
- def test_write_x_y_z(self):
- '''Testing a write of multilayer key'''
- yed = Yedit('yedit_test.yml')
- yed.put('x.y.z', 'modified')
- yed.write()
- yed.load()
- self.assertEqual(yed.get('x.y.z'), 'modified')
-
- def test_delete_a(self):
- '''Testing a simple delete '''
- yed = Yedit('yedit_test.yml')
- yed.delete('a')
- yed.write()
- yed.load()
- self.assertTrue(not yed.yaml_dict.has_key('a'))
-
- def test_delete_b_c(self):
- '''Testing delete of layered key '''
- yed = Yedit('yedit_test.yml')
- yed.delete('b.c')
- yed.write()
- yed.load()
- self.assertTrue(yed.yaml_dict.has_key('b'))
- self.assertFalse(yed.yaml_dict['b'].has_key('c'))
-
- def test_create(self):
- '''Testing a create '''
- os.unlink(YeditTest.filename)
- yed = Yedit('yedit_test.yml')
- yed.create('foo', 'bar')
- yed.write()
- yed.load()
- self.assertTrue(yed.yaml_dict.has_key('foo'))
- self.assertTrue(yed.yaml_dict['foo'] == 'bar')
-
- def test_create_content(self):
- '''Testing a create with content '''
- content = {"foo": "bar"}
- yed = Yedit("yedit_test.yml", content)
- yed.write()
- yed.load()
- self.assertTrue(yed.yaml_dict.has_key('foo'))
- self.assertTrue(yed.yaml_dict['foo'], 'bar')
-
- def test_array_insert(self):
- '''Testing a create with content '''
- yed = Yedit("yedit_test.yml")
- yed.put('b.c.d[0]', 'inject')
- self.assertTrue(yed.get('b.c.d[0]') == 'inject')
-
- def test_array_insert_first_index(self):
- '''Testing a create with content '''
- yed = Yedit("yedit_test.yml")
- yed.put('b.c.d[0]', 'inject')
- self.assertTrue(yed.get('b.c.d[1]') == 'f')
-
- def test_array_insert_second_index(self):
- '''Testing a create with content '''
- yed = Yedit("yedit_test.yml")
- yed.put('b.c.d[0]', 'inject')
- self.assertTrue(yed.get('b.c.d[2]') == 'g')
-
- def test_dict_array_dict_access(self):
- '''Testing a create with content'''
- yed = Yedit("yedit_test.yml")
- yed.put('b.c.d[0]', [{'x': {'y': 'inject'}}])
- self.assertTrue(yed.get('b.c.d[0].[0].x.y') == 'inject')
-
- def test_dict_array_dict_replace(self):
- '''Testing multilevel delete'''
- yed = Yedit("yedit_test.yml")
- yed.put('b.c.d[0]', [{'x': {'y': 'inject'}}])
- yed.put('b.c.d[0].[0].x.y', 'testing')
- self.assertTrue(yed.yaml_dict.has_key('b'))
- self.assertTrue(yed.yaml_dict['b'].has_key('c'))
- self.assertTrue(yed.yaml_dict['b']['c'].has_key('d'))
- self.assertTrue(isinstance(yed.yaml_dict['b']['c']['d'], list))
- self.assertTrue(isinstance(yed.yaml_dict['b']['c']['d'][0], list))
- self.assertTrue(isinstance(yed.yaml_dict['b']['c']['d'][0][0], dict))
- self.assertTrue(yed.yaml_dict['b']['c']['d'][0][0]['x'].has_key('y'))
- self.assertTrue(yed.yaml_dict['b']['c']['d'][0][0]['x']['y'], 'testing')
-
- def test_dict_array_dict_remove(self):
- '''Testing multilevel delete'''
- yed = Yedit("yedit_test.yml")
- yed.put('b.c.d[0]', [{'x': {'y': 'inject'}}])
- yed.delete('b.c.d[0].[0].x.y')
- self.assertTrue(yed.yaml_dict.has_key('b'))
- self.assertTrue(yed.yaml_dict['b'].has_key('c'))
- self.assertTrue(yed.yaml_dict['b']['c'].has_key('d'))
- self.assertTrue(isinstance(yed.yaml_dict['b']['c']['d'], list))
- self.assertTrue(isinstance(yed.yaml_dict['b']['c']['d'][0], list))
- self.assertTrue(isinstance(yed.yaml_dict['b']['c']['d'][0][0], dict))
- self.assertFalse(yed.yaml_dict['b']['c']['d'][0][0]['x'].has_key('y'))
-
- def tearDown(self):
- '''TearDown method'''
- os.unlink(YeditTest.filename)
-
-if __name__ == "__main__":
- unittest.main()