summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rwxr-xr-x.papr.sh2
-rw-r--r--.tito/packages/openshift-ansible2
-rw-r--r--ansible.cfg4
-rw-r--r--images/installer/root/exports/manifest.json2
-rw-r--r--inventory/.gitignore (renamed from inventory/byo/.gitignore)0
-rw-r--r--inventory/README.md6
-rw-r--r--inventory/hosts.example (renamed from inventory/byo/hosts.example)4
-rw-r--r--inventory/hosts.glusterfs.external.example (renamed from inventory/byo/hosts.byo.glusterfs.external.example)10
-rw-r--r--inventory/hosts.glusterfs.mixed.example (renamed from inventory/byo/hosts.byo.glusterfs.mixed.example)10
-rw-r--r--inventory/hosts.glusterfs.native.example (renamed from inventory/byo/hosts.byo.glusterfs.native.example)10
-rw-r--r--inventory/hosts.glusterfs.registry-only.example (renamed from inventory/byo/hosts.byo.glusterfs.registry-only.example)8
-rw-r--r--inventory/hosts.glusterfs.storage-and-registry.example (renamed from inventory/byo/hosts.byo.glusterfs.storage-and-registry.example)8
-rw-r--r--inventory/hosts.openstack (renamed from inventory/byo/hosts.openstack)2
-rw-r--r--openshift-ansible.spec243
-rw-r--r--playbooks/adhoc/uninstall.yml2
-rw-r--r--playbooks/aws/README.md6
-rwxr-xr-xplaybooks/aws/openshift-cluster/accept.yml4
-rw-r--r--playbooks/aws/provisioning-inventory.example.ini1
-rw-r--r--playbooks/byo/config.yml3
-rw-r--r--playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml14
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml35
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml2
-rw-r--r--playbooks/init/evaluate_groups.yml7
-rw-r--r--playbooks/openshift-etcd/private/migrate.yml4
-rw-r--r--playbooks/openshift-etcd/private/upgrade_backup.yml1
-rw-r--r--playbooks/openshift-glusterfs/README.md3
-rw-r--r--playbooks/openshift-logging/config.yml2
-rw-r--r--playbooks/openshift-master/private/redeploy-openshift-ca.yml2
-rw-r--r--playbooks/openshift-node/private/additional_config.yml1
-rw-r--r--playbooks/openstack/README.md2
-rw-r--r--playbooks/openstack/advanced-configuration.md25
-rw-r--r--roles/calico/tasks/main.yml1
-rw-r--r--roles/etcd/meta/main.yml1
-rw-r--r--roles/flannel/defaults/main.yaml6
-rw-r--r--roles/flannel_register/defaults/main.yaml2
-rw-r--r--roles/lib_utils/library/oo_ec2_group.py903
-rw-r--r--roles/openshift_aws/defaults/main.yml41
-rw-r--r--roles/openshift_aws/filter_plugins/openshift_aws_filters.py35
-rw-r--r--roles/openshift_aws/tasks/accept_nodes.yml11
-rw-r--r--roles/openshift_aws/tasks/build_node_group.yml31
-rw-r--r--roles/openshift_aws/tasks/iam_role.yml14
-rw-r--r--roles/openshift_aws/tasks/launch_config.yml37
-rw-r--r--roles/openshift_aws/tasks/launch_config_create.yml26
-rw-r--r--roles/openshift_aws/tasks/provision.yml9
-rw-r--r--roles/openshift_aws/tasks/provision_instance.yml2
-rw-r--r--roles/openshift_aws/tasks/provision_nodes.yml19
-rw-r--r--roles/openshift_aws/tasks/remove_scale_group.yml9
-rw-r--r--roles/openshift_aws/tasks/s3.yml2
-rw-r--r--roles/openshift_aws/tasks/scale_group.yml36
-rw-r--r--roles/openshift_aws/tasks/seal_ami.yml6
-rw-r--r--roles/openshift_aws/tasks/security_group.yml30
-rw-r--r--roles/openshift_aws/tasks/security_group_create.yml25
-rw-r--r--roles/openshift_aws/tasks/setup_master_group.yml6
-rw-r--r--roles/openshift_aws/tasks/setup_scale_group_facts.yml22
-rw-r--r--roles/openshift_aws/tasks/upgrade_node_group.yml18
-rw-r--r--roles/openshift_aws/tasks/wait_for_groups.yml22
-rw-r--r--roles/openshift_aws/templates/user_data.j26
-rw-r--r--roles/openshift_ca/meta/main.yml1
-rwxr-xr-xroles/openshift_facts/library/openshift_facts.py3
-rw-r--r--roles/openshift_health_checker/HOWTO_CHECKS.md2
-rw-r--r--roles/openshift_hosted/tasks/registry.yml2
-rw-r--r--roles/openshift_logging_elasticsearch/templates/es.j21
-rw-r--r--roles/openshift_management/README.md10
-rw-r--r--roles/openshift_management/defaults/main.yml2
-rw-r--r--roles/openshift_management/meta/main.yml1
-rw-r--r--roles/openshift_master/templates/master.yaml.v1.j218
-rw-r--r--roles/openshift_master_facts/tasks/main.yml1
-rw-r--r--roles/openshift_openstack/defaults/main.yml3
-rw-r--r--roles/openshift_openstack/tasks/populate-dns.yml4
-rw-r--r--roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml2
-rw-r--r--roles/openshift_version/meta/main.yml1
-rw-r--r--setup.py50
-rw-r--r--[-rwxr-xr-x]test/integration/build-images.sh0
-rw-r--r--[-rwxr-xr-x]test/integration/run-tests.sh0
-rw-r--r--tox.ini3
81 files changed, 1476 insertions, 384 deletions
diff --git a/.papr.sh b/.papr.sh
index 80453d4b2..c7b06f059 100755
--- a/.papr.sh
+++ b/.papr.sh
@@ -35,7 +35,7 @@ trap upload_journals ERR
# run the actual installer
# FIXME: override openshift_image_tag defined in the inventory until
# https://github.com/openshift/openshift-ansible/issues/4478 is fixed.
-ansible-playbook -vvv -i .papr.inventory playbooks/byo/config.yml -e "openshift_image_tag=$OPENSHIFT_IMAGE_TAG"
+ansible-playbook -vvv -i .papr.inventory playbooks/deploy_cluster.yml -e "openshift_image_tag=$OPENSHIFT_IMAGE_TAG"
### DISABLING TESTS FOR NOW, SEE:
### https://github.com/openshift/openshift-ansible/pull/6132
diff --git a/.tito/packages/openshift-ansible b/.tito/packages/openshift-ansible
index 9db0b5c98..822a6dca2 100644
--- a/.tito/packages/openshift-ansible
+++ b/.tito/packages/openshift-ansible
@@ -1 +1 @@
-3.8.0-0.13.0 ./
+3.9.0-0.6.0 ./
diff --git a/ansible.cfg b/ansible.cfg
index 9900d28f8..e4d72553e 100644
--- a/ansible.cfg
+++ b/ansible.cfg
@@ -30,8 +30,8 @@ inventory_ignore_extensions = secrets.py, .pyc, .cfg, .crt
# work around privilege escalation timeouts in ansible:
timeout = 30
-# Uncomment to use the provided BYO inventory
-#inventory = inventory/byo/hosts.example
+# Uncomment to use the provided example inventory
+#inventory = inventory/hosts.example
[inventory]
# fail more helpfully when the inventory file does not parse (Ansible 2.4+)
diff --git a/images/installer/root/exports/manifest.json b/images/installer/root/exports/manifest.json
index 8b984d7a3..53696b03e 100644
--- a/images/installer/root/exports/manifest.json
+++ b/images/installer/root/exports/manifest.json
@@ -4,7 +4,7 @@
"OPTS": "",
"VAR_LIB_OPENSHIFT_INSTALLER" : "/var/lib/openshift-installer",
"VAR_LOG_OPENSHIFT_LOG": "/var/log/ansible.log",
- "PLAYBOOK_FILE": "/usr/share/ansible/openshift-ansible/playbooks/byo/config.yml",
+ "PLAYBOOK_FILE": "/usr/share/ansible/openshift-ansible/playbooks/deploy_cluster.yml",
"HOME_ROOT": "/root",
"ANSIBLE_CONFIG": "/usr/share/atomic-openshift-utils/ansible.cfg",
"INVENTORY_FILE": "/dev/null"
diff --git a/inventory/byo/.gitignore b/inventory/.gitignore
index 6ff331c7e..6ff331c7e 100644
--- a/inventory/byo/.gitignore
+++ b/inventory/.gitignore
diff --git a/inventory/README.md b/inventory/README.md
index 5e26e3c32..2e348194f 100644
--- a/inventory/README.md
+++ b/inventory/README.md
@@ -1,5 +1 @@
-# OpenShift Ansible inventory config files
-
-You can install OpenShift on:
-
-* [BYO](byo/) (Bring your own), use this inventory config file to install OpenShift on your pre-existing hosts
+# OpenShift Ansible example inventory config files
diff --git a/inventory/byo/hosts.example b/inventory/hosts.example
index e3b56d7a1..c18a53671 100644
--- a/inventory/byo/hosts.example
+++ b/inventory/hosts.example
@@ -1,4 +1,4 @@
-# This is an example of a bring your own (byo) host inventory
+# This is an example of an OpenShift-Ansible host inventory
# Create an OSEv3 group that contains the masters and nodes groups
[OSEv3:children]
@@ -1047,7 +1047,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# name and password AND are trying to use integration scripts.
#
# For example, adding this cluster as a container provider,
-# playbooks/byo/openshift-management/add_container_provider.yml
+# playbooks/openshift-management/add_container_provider.yml
#openshift_management_username: admin
#openshift_management_password: smartvm
diff --git a/inventory/byo/hosts.byo.glusterfs.external.example b/inventory/hosts.glusterfs.external.example
index acf68266e..bf2557cf0 100644
--- a/inventory/byo/hosts.byo.glusterfs.external.example
+++ b/inventory/hosts.glusterfs.external.example
@@ -1,11 +1,11 @@
-# This is an example of a bring your own (byo) host inventory for a cluster
+# This is an example of an OpenShift-Ansible host inventory for a cluster
# with natively hosted, containerized GlusterFS storage.
#
-# This inventory may be used with the byo/config.yml playbook to deploy a new
+# This inventory may be used with the deploy_cluster.yml playbook to deploy a new
# cluster with GlusterFS storage, which will use that storage to create a
# volume that will provide backend storage for a hosted Docker registry.
#
-# This inventory may also be used with byo/openshift-glusterfs/config.yml to
+# This inventory may also be used with openshift-glusterfs/config.yml to
# deploy GlusterFS storage on an existing cluster. With this playbook, the
# registry backend volume will be created but the administrator must then
# either deploy a hosted registry or change an existing hosted registry to use
@@ -13,7 +13,7 @@
#
# There are additional configuration parameters that can be specified to
# control the deployment and state of a GlusterFS cluster. Please see the
-# documentation in playbooks/byo/openshift-glusterfs/README.md and
+# documentation in playbooks/openshift-glusterfs/README.md and
# roles/openshift_storage_glusterfs/README.md for additional details.
[OSEv3:children]
@@ -44,7 +44,7 @@ node2 openshift_schedulable=True
master
# Specify the glusterfs group, which contains the nodes of the external
-# GlusterFS cluster. At a minimum, each node must have "glusterfs_hostname"
+# GlusterFS cluster. At a minimum, each node must have "glusterfs_hostname"
# and "glusterfs_devices" variables defined.
#
# The first variable indicates the hostname of the external GLusterFS node,
diff --git a/inventory/byo/hosts.byo.glusterfs.mixed.example b/inventory/hosts.glusterfs.mixed.example
index a559dc377..8a20a037e 100644
--- a/inventory/byo/hosts.byo.glusterfs.mixed.example
+++ b/inventory/hosts.glusterfs.mixed.example
@@ -1,11 +1,11 @@
-# This is an example of a bring your own (byo) host inventory for a cluster
+# This is an example of an OpenShift-Ansible host inventory for a cluster
# with natively hosted, containerized GlusterFS storage.
#
-# This inventory may be used with the byo/config.yml playbook to deploy a new
+# This inventory may be used with the deploy_cluster.yml playbook to deploy a new
# cluster with GlusterFS storage, which will use that storage to create a
# volume that will provide backend storage for a hosted Docker registry.
#
-# This inventory may also be used with byo/openshift-glusterfs/config.yml to
+# This inventory may also be used with openshift-glusterfs/config.yml to
# deploy GlusterFS storage on an existing cluster. With this playbook, the
# registry backend volume will be created but the administrator must then
# either deploy a hosted registry or change an existing hosted registry to use
@@ -13,7 +13,7 @@
#
# There are additional configuration parameters that can be specified to
# control the deployment and state of a GlusterFS cluster. Please see the
-# documentation in playbooks/byo/openshift-glusterfs/README.md and
+# documentation in playbooks/openshift-glusterfs/README.md and
# roles/openshift_storage_glusterfs/README.md for additional details.
[OSEv3:children]
@@ -47,7 +47,7 @@ node2 openshift_schedulable=True
master
# Specify the glusterfs group, which contains the nodes of the external
-# GlusterFS cluster. At a minimum, each node must have "glusterfs_hostname"
+# GlusterFS cluster. At a minimum, each node must have "glusterfs_hostname"
# and "glusterfs_devices" variables defined.
#
# The first variable indicates the hostname of the external GLusterFS node,
diff --git a/inventory/byo/hosts.byo.glusterfs.native.example b/inventory/hosts.glusterfs.native.example
index ca4765c53..59acf1194 100644
--- a/inventory/byo/hosts.byo.glusterfs.native.example
+++ b/inventory/hosts.glusterfs.native.example
@@ -1,16 +1,16 @@
-# This is an example of a bring your own (byo) host inventory for a cluster
+# This is an example of an OpenShift-Ansible host inventory for a cluster
# with natively hosted, containerized GlusterFS storage for applications. It
-# will also autmatically create a StorageClass for this purpose.
+# will also automatically create a StorageClass for this purpose.
#
-# This inventory may be used with the byo/config.yml playbook to deploy a new
+# This inventory may be used with the deploy_cluster.yml playbook to deploy a new
# cluster with GlusterFS storage.
#
-# This inventory may also be used with byo/openshift-glusterfs/config.yml to
+# This inventory may also be used with openshift-glusterfs/config.yml to
# deploy GlusterFS storage on an existing cluster.
#
# There are additional configuration parameters that can be specified to
# control the deployment and state of a GlusterFS cluster. Please see the
-# documentation in playbooks/byo/openshift-glusterfs/README.md and
+# documentation in playbooks/openshift-glusterfs/README.md and
# roles/openshift_storage_glusterfs/README.md for additional details.
[OSEv3:children]
diff --git a/inventory/byo/hosts.byo.glusterfs.registry-only.example b/inventory/hosts.glusterfs.registry-only.example
index 32040f593..6f33e9f6d 100644
--- a/inventory/byo/hosts.byo.glusterfs.registry-only.example
+++ b/inventory/hosts.glusterfs.registry-only.example
@@ -1,12 +1,12 @@
-# This is an example of a bring your own (byo) host inventory for a cluster
+# This is an example of an OpenShift-Ansible host inventory for a cluster
# with natively hosted, containerized GlusterFS storage for exclusive use
# as storage for a natively hosted Docker registry.
#
-# This inventory may be used with the byo/config.yml playbook to deploy a new
+# This inventory may be used with the deploy_cluster.yml playbook to deploy a new
# cluster with GlusterFS storage, which will use that storage to create a
# volume that will provide backend storage for a hosted Docker registry.
#
-# This inventory may also be used with byo/openshift-glusterfs/registry.yml to
+# This inventory may also be used with openshift-glusterfs/registry.yml to
# deploy GlusterFS storage on an existing cluster. With this playbook, the
# registry backend volume will be created but the administrator must then
# either deploy a hosted registry or change an existing hosted registry to use
@@ -14,7 +14,7 @@
#
# There are additional configuration parameters that can be specified to
# control the deployment and state of a GlusterFS cluster. Please see the
-# documentation in playbooks/byo/openshift-glusterfs/README.md and
+# documentation in playbooks/openshift-glusterfs/README.md and
# roles/openshift_storage_glusterfs/README.md for additional details.
[OSEv3:children]
diff --git a/inventory/byo/hosts.byo.glusterfs.storage-and-registry.example b/inventory/hosts.glusterfs.storage-and-registry.example
index 9bd37cbf6..1f3a4282a 100644
--- a/inventory/byo/hosts.byo.glusterfs.storage-and-registry.example
+++ b/inventory/hosts.glusterfs.storage-and-registry.example
@@ -1,12 +1,12 @@
-# This is an example of a bring your own (byo) host inventory for a cluster
+# This is an example of an OpenShift-Ansible host inventory for a cluster
# with natively hosted, containerized GlusterFS storage for both general
# application use and a natively hosted Docker registry. It will also create a
# StorageClass for the general storage.
#
-# This inventory may be used with the byo/config.yml playbook to deploy a new
+# This inventory may be used with the deploy_cluster.yml playbook to deploy a new
# cluster with GlusterFS storage.
#
-# This inventory may also be used with byo/openshift-glusterfs/config.yml to
+# This inventory may also be used with openshift-glusterfs/config.yml to
# deploy GlusterFS storage on an existing cluster. With this playbook, the
# registry backend volume will be created but the administrator must then
# either deploy a hosted registry or change an existing hosted registry to use
@@ -14,7 +14,7 @@
#
# There are additional configuration parameters that can be specified to
# control the deployment and state of a GlusterFS cluster. Please see the
-# documentation in playbooks/byo/openshift-glusterfs/README.md and
+# documentation in playbooks/openshift-glusterfs/README.md and
# roles/openshift_storage_glusterfs/README.md for additional details.
[OSEv3:children]
diff --git a/inventory/byo/hosts.openstack b/inventory/hosts.openstack
index c648078c4..d928c2b86 100644
--- a/inventory/byo/hosts.openstack
+++ b/inventory/hosts.openstack
@@ -1,4 +1,4 @@
-# This is an example of a bring your own (byo) host inventory
+# This is an example of an OpenShift-Ansible host inventory
# Create an OSEv3 group that contains the masters and nodes groups
[OSEv3:children]
diff --git a/openshift-ansible.spec b/openshift-ansible.spec
index 7d543afdd..9db43e4cc 100644
--- a/openshift-ansible.spec
+++ b/openshift-ansible.spec
@@ -10,7 +10,7 @@
Name: openshift-ansible
Version: 3.9.0
-Release: 0.0.0%{?dist}
+Release: 0.6.0%{?dist}
Summary: Openshift and Atomic Enterprise Ansible
License: ASL 2.0
URL: https://github.com/openshift/openshift-ansible
@@ -67,7 +67,7 @@ rm -f %{buildroot}%{python_sitelib}/openshift_ansible/gce
# openshift-ansible-docs install
# Install example inventory into docs/examples
mkdir -p docs/example-inventories
-cp inventory/byo/* docs/example-inventories/
+cp inventory/* docs/example-inventories/
# openshift-ansible-files install
cp -rp files %{buildroot}%{_datadir}/ansible/%{name}/
@@ -285,14 +285,151 @@ Atomic OpenShift Utilities includes
%changelog
+* Tue Dec 12 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.9.0-0.6.0
+- add openshift_master_api_port var to example inventory (jdiaz@redhat.com)
+- Allow 2 sets of hostnames for openstack provider (bdobreli@redhat.com)
+
+* Mon Dec 11 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.9.0-0.5.0
+- Remove unneeded embedded etcd logic (mgugino@redhat.com)
+
+* Mon Dec 11 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.9.0-0.4.0
+- Copying upstream fix for ansible 2.4 ec2_group module. (kwoodson@redhat.com)
+- Add missing dependencies on openshift_facts role (sdodson@redhat.com)
+
+* Mon Dec 11 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.9.0-0.3.0
+- remove integration tests from tox (lmeyer@redhat.com)
+- correct ansible-playbook command syntax (jdiaz@redhat.com)
+- Add openshift_facts to upgrade plays for service_type (mgugino@redhat.com)
+- Check for openshift attribute before using it during CNS install.
+ (jmencak@redhat.com)
+
+* Mon Dec 11 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.9.0-0.2.0
+- GlusterFS: Add playbook doc note (jarrpa@redhat.com)
+- Fix openshift hosted registry rollout (rteague@redhat.com)
+- Remove container_runtime from the openshift_version (sdodson@redhat.com)
+
+* Fri Dec 08 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.9.0-0.1.0
+- Cleanup byo references (rteague@redhat.com)
+- openshift_node: reintroduce restart of CRI-O. (gscrivan@redhat.com)
+- container-engine: skip openshift_docker_log_driver when it is False
+ (gscrivan@redhat.com)
+- container-engine: log-opts is a dictionary in the daemon.json file
+ (gscrivan@redhat.com)
+- openshift_version: add dependency to openshift_facts (gscrivan@redhat.com)
+- openshift_version: define openshift_use_crio_only (gscrivan@redhat.com)
+- openshift_version: add dependency to container_runtime (gscrivan@redhat.com)
+- crio: define and use l_is_node_system_container (gscrivan@redhat.com)
+- Update deprecation checks - include: (rteague@redhat.com)
+- Add os_firewall to prerequisites.yml (mgugino@redhat.com)
+- add 3.8 templates for gluster ep and svc (lmeyer@redhat.com)
+- Remove openshift.common.service_type (mgugino@redhat.com)
+- Remove unused openshift_env_structures and openshift_env (mgugino@redhat.com)
+- Fix incorrect register name master registry auth (mgugino@redhat.com)
+- Include Deprecation: Convert to import_playbook (rteague@redhat.com)
+- add 3.8 templates for gluster ep and svc (m.judeikis@gmail.com)
+- Remove all uses of openshift.common.admin_binary (sdodson@redhat.com)
+- Implement container_runtime playbooks and changes (mgugino@redhat.com)
+- Playbook Consolidation - byo/config.yml (rteague@redhat.com)
+- openshift_logging_kibana: fix mixing paren (lmeyer@redhat.com)
+- Fix ami building. (kwoodson@redhat.com)
+- Include Deprecation: Convert to include_tasks (rteague@redhat.com)
+- Add missing symlinks in openshift-logging (rteague@redhat.com)
+- Fix generate_pv_pvcs_list plugin undef (mgugino@redhat.com)
+- Playbook Consolidation - etcd Upgrade (rteague@redhat.com)
+- bug 1519622. Disable rollback of ES DCs (jcantril@redhat.com)
+- Remove all references to pacemaker (pcs, pcsd) and
+ openshift.master.cluster_method. (abutcher@redhat.com)
+- Remove entry point files no longer needed by CI (rteague@redhat.com)
+- Don't check for the deployment_type (tomas@sedovic.cz)
+- Get the correct value out of openshift_release (tomas@sedovic.cz)
+- Fix oreg_auth_credentials_create register var (mgugino@redhat.com)
+- Fix and cleanup not required dns bits (bdobreli@redhat.com)
+- Fix hosted vars (mgugino@redhat.com)
+- Remove duplicate init import in network_manager.yml (rteague@redhat.com)
+- Document testing repos for dev purposes (bdobreli@redhat.com)
+- Remove unused protected_facts_to_overwrite (mgugino@redhat.com)
+- Use openshift testing repos for openstack (bdobreli@redhat.com)
+- Use openshift_release instead of ose_version (tomas@sedovic.cz)
+- Remove the ose_version check (tomas@sedovic.cz)
+- Allow number of retries in openshift_management to be configurable
+ (ealfassa@redhat.com)
+- Bumping to 3.9 (smunilla@redhat.com)
+- Cleanup unused openstack provider code (bdobreli@redhat.com)
+- Adding 3.9 tito releaser (smunilla@redhat.com)
+- Implement container runtime role (mgugino@redhat.com)
+- Fix glusterfs checkpoint info (rteague@redhat.com)
+- storage_glusterfs: fix typo (lmeyer@redhat.com)
+- Playbook Consolidation - Redeploy Certificates (rteague@redhat.com)
+- Fix tox (tomas@sedovic.cz)
+- Remove shell environment lookup (tomas@sedovic.cz)
+- Revert "Fix syntax error caused by an extra paren" (tomas@sedovic.cz)
+- Revert "Fix the env lookup fallback in rhel_subscribe" (tomas@sedovic.cz)
+- Remove reading shell environment in rhel_subscribe (tomas@sedovic.cz)
+- retry package operations (lmeyer@redhat.com)
+- Add v3.9 support (sdodson@redhat.com)
+- Playbook Consolidation - openshift-logging (rteague@redhat.com)
+- Do not escalate privileges in jks generation tasks (iacopo.rozzo@amadeus.com)
+- Fix inventory symlinks in origin-ansible container. (dgoodwin@redhat.com)
+- Initial upgrade for scale groups. (kwoodson@redhat.com)
+- Update the doc text (tomas@sedovic.cz)
+- Optionally subscribe OpenStack RHEL nodes (tomas@sedovic.cz)
+- Fix the env lookup fallback in rhel_subscribe (tomas@sedovic.cz)
+- Fix syntax error caused by an extra paren (tomas@sedovic.cz)
+- Fix no_log warnings for custom module (mgugino@redhat.com)
+- Add external_svc_subnet for k8s loadbalancer type service
+ (jihoon.o@samsung.com)
+- Remove openshift_facts project_cfg_facts (mgugino@redhat.com)
+- Remove dns_port fact (mgugino@redhat.com)
+- Bug 1512793- Fix idempotence issues in ASB deploy (fabian@fabianism.us)
+- Remove unused task file from etcd role (rteague@redhat.com)
+- fix type in authroize (jchaloup@redhat.com)
+- Use IP addresses for OpenStack nodes (tomas@sedovic.cz)
+- Update prometheus to 2.0.0 GA (zgalor@redhat.com)
+- remove schedulable from openshift_facts (mgugino@redhat.com)
+- inventory: Add example for service catalog vars (smilner@redhat.com)
+- Correct usage of include_role (rteague@redhat.com)
+- Remove openshift.common.cli_image (mgugino@redhat.com)
+- Fix openshift_env fact creation within openshift_facts. (abutcher@redhat.com)
+- Combine openshift_node and openshift_node_dnsmasq (mgugino@redhat.com)
+- GlusterFS: Remove extraneous line from glusterblock template
+ (jarrpa@redhat.com)
+- Remove openshift_clock from meta depends (mgugino@redhat.com)
+- Simplify is_master_system_container logic (mgugino@redhat.com)
+- dist.iteritems() no longer exists in Python 3. (jpazdziora@redhat.com)
+- Remove spurrious file committed by error (diego.abelenda@camptocamp.com)
+- Fix name of the service pointed to by hostname
+ (diego.abelenda@camptocamp.com)
+- Missed the default value after the variable name change...
+ (diego.abelenda@camptocamp.com)
+- Change the name of the variable and explicitely document the names
+ (diego.abelenda@camptocamp.com)
+- Allow to set the hostname for routes to prometheus and alertmanager
+ (diego.abelenda@camptocamp.com)
+- Allow openshift_install_examples to be false (michael.fraenkel@gmail.com)
+- Include Deprecation - openshift-service-catalog (rteague@redhat.com)
+- Remove is_openvswitch_system_container from facts (mgugino@redhat.com)
+- Workaround the fact that package state=present with dnf fails for already
+ installed but excluded packages. (jpazdziora@redhat.com)
+- With dnf repoquery and excluded packages, --disableexcludes=all is needed to
+ list the package with --installed. (jpazdziora@redhat.com)
+- Add support for external glusterfs as registry backend (m.judeikis@gmail.com)
+- cri-o: honor additional and insecure registries again (gscrivan@redhat.com)
+- docker: copy Docker metadata to the alternative storage path
+ (gscrivan@redhat.com)
+- Add check for gluterFS DS to stop restarts (m.judeikis@gmail.com)
+- Bug 1514417 - Adding correct advertise-client-urls (shawn.hurley21@gmail.com)
+- Uninstall tuned-profiles-atomic-openshift-node as defined in origin.spec
+ (jmencak@redhat.com)
+- Mod startup script to publish all frontend binds (cwilkers@redhat.com)
+
* Thu Nov 23 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.8.0-0.13.0
--
+-
* Thu Nov 23 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.8.0-0.12.0
--
+-
* Thu Nov 23 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.8.0-0.11.0
--
+-
* Thu Nov 23 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.8.0-0.10.0
- tox.ini: simplify unit test reqs (lmeyer@redhat.com)
@@ -341,16 +478,16 @@ Atomic OpenShift Utilities includes
- Include Deprecation - Init Playbook Paths (rteague@redhat.com)
* Mon Nov 20 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.8.0-0.8.0
--
+-
* Mon Nov 20 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.8.0-0.7.0
--
+-
* Mon Nov 20 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.8.0-0.6.0
--
+-
* Sun Nov 19 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.8.0-0.5.0
--
+-
* Sun Nov 19 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.8.0-0.4.0
- bug 1498398. Enclose content between store tag (rromerom@redhat.com)
@@ -643,10 +780,10 @@ Atomic OpenShift Utilities includes
- Allow cluster IP for docker-registry service to be set (hansmi@vshn.ch)
* Thu Nov 09 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.5-1
--
+-
* Wed Nov 08 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.4-1
--
+-
* Wed Nov 08 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.3-1
- Adding configuration for keeping transient namespace on error.
@@ -816,10 +953,10 @@ Atomic OpenShift Utilities includes
- GlusterFS: Remove image option from heketi command (jarrpa@redhat.com)
* Mon Oct 30 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.187.0
--
+-
* Sun Oct 29 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.186.0
--
+-
* Sat Oct 28 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.185.0
- bug 1506073. Lower cpu request for logging when it exceeds limit
@@ -849,7 +986,7 @@ Atomic OpenShift Utilities includes
- Refactor health check playbooks (rteague@redhat.com)
* Fri Oct 27 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.183.0
--
+-
* Thu Oct 26 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.182.0
- Fixing documentation for the cert_key_path variable name.
@@ -923,16 +1060,16 @@ Atomic OpenShift Utilities includes
(hansmi@vshn.ch)
* Mon Oct 23 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.175.0
--
+-
* Sun Oct 22 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.174.0
--
+-
* Sun Oct 22 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.173.0
--
+-
* Sun Oct 22 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.172.0
--
+-
* Sat Oct 21 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.171.0
- Use "requests" for CPU resources instead of limits
@@ -956,16 +1093,16 @@ Atomic OpenShift Utilities includes
(dymurray@redhat.com)
* Fri Oct 20 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.168.0
--
+-
* Thu Oct 19 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.167.0
--
+-
* Thu Oct 19 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.166.0
--
+-
* Thu Oct 19 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.165.0
--
+-
* Thu Oct 19 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.164.0
- Change to service-signer.crt for template_service_broker CA_BUNDLE
@@ -988,7 +1125,7 @@ Atomic OpenShift Utilities includes
- Remove unneeded master config updates during upgrades (mgugino@redhat.com)
* Wed Oct 18 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.161.0
--
+-
* Wed Oct 18 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.160.0
- Fix pvc selector default to be empty dict instead of string
@@ -1030,16 +1167,16 @@ Atomic OpenShift Utilities includes
(jchaloup@redhat.com)
* Sun Oct 15 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.155.0
--
+-
* Sat Oct 14 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.154.0
--
+-
* Fri Oct 13 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.153.0
- default groups.oo_new_etcd_to_config to an empty list (jchaloup@redhat.com)
* Fri Oct 13 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.152.0
--
+-
* Fri Oct 13 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.151.0
- updated dynamic provision section for openshift metrics to support storage
@@ -1448,7 +1585,7 @@ Atomic OpenShift Utilities includes
- oc_atomic_container: support Skopeo output (gscrivan@redhat.com)
* Tue Sep 05 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.125.0
--
+-
* Tue Sep 05 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.124.0
- Fix ansible_syntax check (rteague@redhat.com)
@@ -1475,7 +1612,7 @@ Atomic OpenShift Utilities includes
(miciah.masters@gmail.com)
* Wed Aug 30 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.123.0
--
+-
* Wed Aug 30 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.122.0
- Update openshift_hosted_routers example to be in ini format.
@@ -1537,10 +1674,10 @@ Atomic OpenShift Utilities includes
- Add missing hostnames to registry cert (sdodson@redhat.com)
* Fri Aug 25 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.115.0
--
+-
* Fri Aug 25 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.114.0
--
+-
* Fri Aug 25 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.113.0
- openshift_version: enterprise accepts new style pre-release
@@ -1558,13 +1695,13 @@ Atomic OpenShift Utilities includes
- Setup tuned profiles in /etc/tuned (jmencak@redhat.com)
* Thu Aug 24 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.109.0
--
+-
* Thu Aug 24 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.108.0
--
+-
* Thu Aug 24 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.107.0
--
+-
* Thu Aug 24 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.106.0
- Add dotnet 2.0 to v3.6 (sdodson@redhat.com)
@@ -1601,13 +1738,13 @@ Atomic OpenShift Utilities includes
(sdodson@redhat.com)
* Sat Aug 19 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.103.0
--
+-
* Fri Aug 18 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.102.0
--
+-
* Fri Aug 18 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.101.0
--
+-
* Fri Aug 18 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.100.0
- Change memory requests and limits units (mak@redhat.com)
@@ -1906,13 +2043,13 @@ Atomic OpenShift Utilities includes
(kwoodson@redhat.com)
* Mon Jul 17 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.152-1
--
+-
* Sun Jul 16 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.151-1
--
+-
* Sun Jul 16 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.150-1
--
+-
* Sat Jul 15 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.149-1
- Config was missed before replace. (jkaur@redhat.com)
@@ -1935,7 +2072,7 @@ Atomic OpenShift Utilities includes
- GlusterFS: Fix SSH-based heketi configuration (jarrpa@redhat.com)
* Wed Jul 12 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.143-1
--
+-
* Wed Jul 12 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.142-1
- add scheduled pods check (jvallejo@redhat.com)
@@ -1960,7 +2097,7 @@ Atomic OpenShift Utilities includes
- updating fetch tasks to be flat paths (ewolinet@redhat.com)
* Mon Jul 10 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.140-1
--
+-
* Sat Jul 08 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.139-1
- increase implicit 300s default timeout to explicit 600s (jchaloup@redhat.com)
@@ -2008,7 +2145,7 @@ Atomic OpenShift Utilities includes
- Fully qualify ocp ansible_service_broker_image_prefix (sdodson@redhat.com)
* Wed Jul 05 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.134-1
--
+-
* Tue Jul 04 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.133-1
- etcd, syscontainer: fix copy of existing datastore (gscrivan@redhat.com)
@@ -2020,7 +2157,7 @@ Atomic OpenShift Utilities includes
- Fixes to storage migration (sdodson@redhat.com)
* Mon Jul 03 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.132-1
--
+-
* Sun Jul 02 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.131-1
- Fix upgrade (sdodson@redhat.com)
@@ -2161,7 +2298,7 @@ Atomic OpenShift Utilities includes
- bug 1457642. Use same SG index to avoid seeding timeout (jcantril@redhat.com)
* Wed Jun 21 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.122-1
--
+-
* Tue Jun 20 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.121-1
- Updating default from null to "" (ewolinet@redhat.com)
@@ -2205,7 +2342,7 @@ Atomic OpenShift Utilities includes
- CloudForms 4.5 templates (simaishi@redhat.com)
* Fri Jun 16 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.114-1
--
+-
* Fri Jun 16 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.113-1
- Make rollout status check best-effort, add poll (skuznets@redhat.com)
@@ -2267,7 +2404,7 @@ Atomic OpenShift Utilities includes
- singletonize some role tasks that repeat a lot (lmeyer@redhat.com)
* Wed Jun 14 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.109-1
--
+-
* Wed Jun 14 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.108-1
- Upgraded Calico to 2.2.1 Release (vincent.schwarzer@yahoo.de)
@@ -2323,7 +2460,7 @@ Atomic OpenShift Utilities includes
- Install default storageclass in AWS & GCE envs (hekumar@redhat.com)
* Fri Jun 09 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.98-1
--
+-
* Fri Jun 09 2017 Scott Dodson <sdodson@redhat.com> 3.6.97-1
- Updated to using oo_random_word for secret gen (ewolinet@redhat.com)
@@ -2355,7 +2492,7 @@ Atomic OpenShift Utilities includes
loopback kubeconfigs. (abutcher@redhat.com)
* Tue Jun 06 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.89.2-1
--
+-
* Tue Jun 06 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.89.1-1
- Updating image for registry_console (ewolinet@redhat.com)
@@ -2602,13 +2739,13 @@ Atomic OpenShift Utilities includes
- Fix additional master cert & client config creation. (abutcher@redhat.com)
* Tue May 09 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.62-1
--
+-
* Tue May 09 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.61-1
--
+-
* Mon May 08 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.60-1
--
+-
* Mon May 08 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.59-1
- Updating logging and metrics to restart api, ha and controllers when updating
@@ -2621,10 +2758,10 @@ Atomic OpenShift Utilities includes
- Moving Dockerfile content to images dir (jupierce@redhat.com)
* Mon May 08 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.57-1
--
+-
* Sun May 07 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.56-1
--
+-
* Sat May 06 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.55-1
- Fix 1448368, and some other minors issues (ghuang@redhat.com)
diff --git a/playbooks/adhoc/uninstall.yml b/playbooks/adhoc/uninstall.yml
index ed7a7bd1a..9f044c089 100644
--- a/playbooks/adhoc/uninstall.yml
+++ b/playbooks/adhoc/uninstall.yml
@@ -123,7 +123,7 @@
- origin-clients
- origin-node
- origin-sdn-ovs
- - tuned-profiles-openshift-node
+ - tuned-profiles-atomic-openshift-node
- tuned-profiles-origin-node
register: result
until: result | success
diff --git a/playbooks/aws/README.md b/playbooks/aws/README.md
index 417fb539a..d203b9cda 100644
--- a/playbooks/aws/README.md
+++ b/playbooks/aws/README.md
@@ -75,7 +75,7 @@ If customization is required for the instances, scale groups, or any other confi
In order to create the bootstrap-able AMI we need to create a basic openshift-ansible inventory. This enables us to create the AMI using the openshift-ansible node roles. This inventory should not include any hosts, but certain variables should be defined in the appropriate groups, just as deploying a cluster
using the normal openshift-ansible method. See provisioning-inventory.example.ini for an example.
-There are more examples of cluster inventory settings [`here`](../../inventory/byo/).
+There are more examples of cluster inventory settings [`here`](../../inventory/).
#### Step 0 (optional)
@@ -134,11 +134,11 @@ At this point we have successfully created the infrastructure including the mast
Now it is time to install Openshift using the openshift-ansible installer. This can be achieved by running the following playbook:
```
-$ ansible-playbook -i inventory.yml install.yml @provisioning_vars.yml
+$ ansible-playbook -i inventory.yml install.yml -e @provisioning_vars.yml
```
This playbook accomplishes the following:
1. Builds a dynamic inventory file by querying AWS.
-2. Runs the [`byo`](../../common/openshift-cluster/config.yml)
+2. Runs the [`deploy_cluster.yml`](../deploy_cluster.yml)
Once this playbook completes, the cluster masters should be installed and configured.
diff --git a/playbooks/aws/openshift-cluster/accept.yml b/playbooks/aws/openshift-cluster/accept.yml
index cab2f1e40..e7bed4f6e 100755
--- a/playbooks/aws/openshift-cluster/accept.yml
+++ b/playbooks/aws/openshift-cluster/accept.yml
@@ -18,7 +18,7 @@
name: lib_openshift
- name: fetch masters
- ec2_remote_facts:
+ ec2_instance_facts:
region: "{{ openshift_aws_region | default('us-east-1') }}"
filters:
"tag:clusterid": "{{ openshift_aws_clusterid | default('default') }}"
@@ -30,7 +30,7 @@
until: "'instances' in mastersout and mastersout.instances|length > 0"
- name: fetch new node instances
- ec2_remote_facts:
+ ec2_instance_facts:
region: "{{ openshift_aws_region | default('us-east-1') }}"
filters:
"tag:clusterid": "{{ openshift_aws_clusterid | default('default') }}"
diff --git a/playbooks/aws/provisioning-inventory.example.ini b/playbooks/aws/provisioning-inventory.example.ini
index 238a7eb2f..cf76c9d10 100644
--- a/playbooks/aws/provisioning-inventory.example.ini
+++ b/playbooks/aws/provisioning-inventory.example.ini
@@ -11,6 +11,7 @@ etcd
openshift_deployment_type=origin
openshift_master_bootstrap_enabled=True
+openshift_master_api_port=443
openshift_hosted_router_wait=False
openshift_hosted_registry_wait=False
diff --git a/playbooks/byo/config.yml b/playbooks/byo/config.yml
deleted file mode 100644
index 4b74e5bce..000000000
--- a/playbooks/byo/config.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-# TODO (rteague): Temporarily leaving this playbook to allow CI tests to operate until CI jobs are updated.
-- import_playbook: ../deploy_cluster.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml
index 5c6def484..fcb828808 100644
--- a/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml
@@ -32,6 +32,7 @@
any_errors_fatal: true
roles:
+ - openshift_facts
- lib_openshift
tasks:
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
index 37fc8a0f6..7b82fe05b 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
@@ -2,20 +2,6 @@
###############################################################################
# Upgrade Masters
###############################################################################
-
-# If facts cache were for some reason deleted, this fact may not be set, and if not set
-# it will always default to true. This causes problems for the etcd data dir fact detection
-# so we must first make sure this is set correctly before attempting the backup.
-- name: Set master embedded_etcd fact
- hosts: oo_masters_to_config
- roles:
- - openshift_facts
- tasks:
- - openshift_facts:
- role: master
- local_facts:
- embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"
-
- name: Backup and upgrade etcd
import_playbook: ../../../openshift-etcd/private/upgrade_main.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml
index 47410dff3..4fc897a57 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml
@@ -11,25 +11,19 @@
msg: "Ensure that new scale groups were provisioned before proceeding to update."
when:
- "'oo_sg_new_nodes' not in groups or groups.oo_sg_new_nodes|length == 0"
+ - "'oo_sg_current_nodes' not in groups or groups.oo_sg_current_nodes|length == 0"
+ - groups.oo_sg_current_nodes == groups.oo_sg_new_nodes
- name: initialize upgrade bits
import_playbook: init.yml
-- name: Drain and upgrade nodes
+- name: unschedule nodes
hosts: oo_sg_current_nodes
- # This var must be set with -e on invocation, as it is not a per-host inventory var
- # and is evaluated early. Values such as "20%" can also be used.
- serial: "{{ openshift_upgrade_nodes_serial | default(1) }}"
- max_fail_percentage: "{{ openshift_upgrade_nodes_max_fail_percentage | default(0) }}"
-
- pre_tasks:
+ tasks:
- name: Load lib_openshift modules
- include_role:
+ import_role:
name: ../roles/lib_openshift
- # TODO: To better handle re-trying failed upgrades, it would be nice to check if the node
- # or docker actually needs an upgrade before proceeding. Perhaps best to save this until
- # we merge upgrade functionality into the base roles and a normal config.yml playbook run.
- name: Mark node unschedulable
oc_adm_manage_node:
node: "{{ openshift.node.nodename | lower }}"
@@ -40,14 +34,27 @@
register: node_unschedulable
until: node_unschedulable|succeeded
+- name: Drain nodes
+ hosts: oo_sg_current_nodes
+ # This var must be set with -e on invocation, as it is not a per-host inventory var
+ # and is evaluated early. Values such as "20%" can also be used.
+ serial: "{{ openshift_upgrade_nodes_serial | default(1) }}"
+ max_fail_percentage: "{{ openshift_upgrade_nodes_max_fail_percentage | default(0) }}"
+ tasks:
- name: Drain Node for Kubelet upgrade
command: >
- {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm drain {{ openshift.node.nodename | lower }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets
+ {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm drain {{ openshift.node.nodename | lower }}
+ --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+ --force --delete-local-data --ignore-daemonsets
+ --timeout={{ openshift_upgrade_nodes_drain_timeout | default(0) }}s
delegate_to: "{{ groups.oo_first_master.0 }}"
register: l_upgrade_nodes_drain_result
until: not l_upgrade_nodes_drain_result | failed
- retries: 60
- delay: 60
+ retries: "{{ 1 if openshift_upgrade_nodes_drain_timeout | default(0) == '0' else 0 | int }}"
+ delay: 5
+ failed_when:
+ - l_upgrade_nodes_drain_result | failed
+ - openshift_upgrade_nodes_drain_timeout | default(0) == '0'
# Alright, let's clean up!
- name: clean up the old scale group
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml
index 0c1a99272..9ec788e76 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml
@@ -121,6 +121,8 @@
- name: Cycle all controller services to force new leader election mode
hosts: oo_masters_to_config
gather_facts: no
+ roles:
+ - role: openshift_facts
tasks:
- name: Stop {{ openshift_service_type }}-master-controllers
systemd:
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml
index 9dcad352c..ad67b6c44 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml
@@ -125,6 +125,8 @@
- name: Cycle all controller services to force new leader election mode
hosts: oo_masters_to_config
gather_facts: no
+ roles:
+ - role: openshift_facts
tasks:
- name: Stop {{ openshift_service_type }}-master-controllers
systemd:
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml
index ead2efbd0..60ec79df5 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml
@@ -121,6 +121,8 @@
- name: Cycle all controller services to force new leader election mode
hosts: oo_masters_to_config
gather_facts: no
+ roles:
+ - role: openshift_facts
tasks:
- name: Stop {{ openshift_service_type }}-master-controllers
systemd:
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml
index ae37b1359..c1a3f64f2 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml
@@ -125,6 +125,8 @@
- name: Cycle all controller services to force new leader election mode
hosts: oo_masters_to_config
gather_facts: no
+ roles:
+ - role: openshift_facts
tasks:
- name: Stop {{ openshift_service_type }}-master-controllers
systemd:
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml
index eb688f189..1e704b66c 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml
@@ -127,6 +127,8 @@
- name: Cycle all controller services to force new leader election mode
hosts: oo_masters_to_config
gather_facts: no
+ roles:
+ - role: openshift_facts
tasks:
- name: Stop {{ openshift.common.service_type }}-master-controllers
systemd:
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml
index 983bb4a63..a9689da1f 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml
@@ -131,6 +131,8 @@
- name: Cycle all controller services to force new leader election mode
hosts: oo_masters_to_config
gather_facts: no
+ roles:
+ - role: openshift_facts
tasks:
- name: Stop {{ openshift.common.service_type }}-master-controllers
systemd:
diff --git a/playbooks/init/evaluate_groups.yml b/playbooks/init/evaluate_groups.yml
index 8787c87e1..8087f6ffc 100644
--- a/playbooks/init/evaluate_groups.yml
+++ b/playbooks/init/evaluate_groups.yml
@@ -46,14 +46,9 @@
- name: Evaluate groups - Fail if no etcd hosts group is defined
fail:
msg: >
- Running etcd as an embedded service is no longer supported. If this is a
- new install please define an 'etcd' group with either one or three
- hosts. These hosts may be the same hosts as your masters. If this is an
- upgrade you may set openshift_master_unsupported_embedded_etcd=true
- until a migration playbook becomes available.
+ Running etcd as an embedded service is no longer supported.
when:
- g_etcd_hosts | default([]) | length not in [3,1]
- - not openshift_master_unsupported_embedded_etcd | default(False)
- not (openshift_node_bootstrap | default(False))
- name: Evaluate oo_all_hosts
diff --git a/playbooks/openshift-etcd/private/migrate.yml b/playbooks/openshift-etcd/private/migrate.yml
index 9ddb4afe2..313ed8bec 100644
--- a/playbooks/openshift-etcd/private/migrate.yml
+++ b/playbooks/openshift-etcd/private/migrate.yml
@@ -19,7 +19,6 @@
name: etcd
tasks_from: migrate.pre_check.yml
vars:
- r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"
etcd_peer: "{{ ansible_default_ipv4.address }}"
# TODO: This will be different for release-3.6 branch
@@ -49,7 +48,6 @@
tasks_from: backup.yml
vars:
r_etcd_common_backup_tag: pre-migration
- r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"
r_etcd_common_backup_sufix_name: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}"
- name: Gate on etcd backup
@@ -84,7 +82,6 @@
name: etcd
tasks_from: migrate.yml
vars:
- r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"
etcd_peer: "{{ openshift.common.ip }}"
etcd_url_scheme: "https"
etcd_peer_url_scheme: "https"
@@ -97,7 +94,6 @@
name: etcd
tasks_from: clean_data.yml
vars:
- r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"
etcd_peer: "{{ openshift.common.ip }}"
etcd_url_scheme: "https"
etcd_peer_url_scheme: "https"
diff --git a/playbooks/openshift-etcd/private/upgrade_backup.yml b/playbooks/openshift-etcd/private/upgrade_backup.yml
index 22ed7e610..7dfea07f1 100644
--- a/playbooks/openshift-etcd/private/upgrade_backup.yml
+++ b/playbooks/openshift-etcd/private/upgrade_backup.yml
@@ -9,7 +9,6 @@
tasks_from: backup.yml
vars:
r_etcd_common_backup_tag: "{{ etcd_backup_tag }}"
- r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"
r_etcd_common_backup_sufix_name: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}"
- name: Gate on etcd backup
diff --git a/playbooks/openshift-glusterfs/README.md b/playbooks/openshift-glusterfs/README.md
index f62aea229..107bbfff6 100644
--- a/playbooks/openshift-glusterfs/README.md
+++ b/playbooks/openshift-glusterfs/README.md
@@ -26,6 +26,9 @@ file. The hosts in this group are the nodes of the GlusterFS cluster.
devices but you must specify the following variables in `[OSEv3:vars]`:
* `openshift_storage_glusterfs_is_missing=False`
* `openshift_storage_glusterfs_heketi_is_missing=False`
+ * If GlusterFS will be running natively, the target hosts must also be listed
+ in the `nodes` group. They must also already be configured as OpenShift
+ nodes before this playbook runs.
By default, pods for a native GlusterFS cluster will be created in the
`default` namespace. To change this, specify
diff --git a/playbooks/openshift-logging/config.yml b/playbooks/openshift-logging/config.yml
index d71b4f1c5..83d330284 100644
--- a/playbooks/openshift-logging/config.yml
+++ b/playbooks/openshift-logging/config.yml
@@ -1,7 +1,7 @@
---
#
# This playbook is a preview of upcoming changes for installing
-# Hosted logging on. See inventory/byo/hosts.*.example for the
+# Hosted logging on. See inventory/hosts.example for the
# currently supported method.
#
- import_playbook: ../init/main.yml
diff --git a/playbooks/openshift-master/private/redeploy-openshift-ca.yml b/playbooks/openshift-master/private/redeploy-openshift-ca.yml
index 2a190935e..9f5502141 100644
--- a/playbooks/openshift-master/private/redeploy-openshift-ca.yml
+++ b/playbooks/openshift-master/private/redeploy-openshift-ca.yml
@@ -56,7 +56,7 @@
- groups.oo_etcd_to_config | default([]) | length == 0
- (g_master_config_output.content|b64decode|from_yaml).etcdConfig.servingInfo.clientCA != 'ca-bundle.crt'
# Set servingInfo.clientCA to client-ca-bundle.crt in order to roll the CA certificate.
- # This change will be reverted in playbooks/byo/openshift-cluster/redeploy-certificates.yml
+ # This change will be reverted in playbooks/redeploy-certificates.yml
- modify_yaml:
dest: "{{ openshift.common.config_base }}/master/master-config.yaml"
yaml_key: servingInfo.clientCA
diff --git a/playbooks/openshift-node/private/additional_config.yml b/playbooks/openshift-node/private/additional_config.yml
index 261e2048f..b86cb3cc2 100644
--- a/playbooks/openshift-node/private/additional_config.yml
+++ b/playbooks/openshift-node/private/additional_config.yml
@@ -33,7 +33,6 @@
roles:
- role: flannel
etcd_urls: "{{ hostvars[groups.oo_first_master.0].openshift.master.etcd_urls }}"
- embedded_etcd: "{{ hostvars[groups.oo_first_master.0].openshift.master.embedded_etcd }}"
when: openshift_use_flannel | default(false) | bool
- name: Additional node config
diff --git a/playbooks/openstack/README.md b/playbooks/openstack/README.md
index f567242cd..d361d6278 100644
--- a/playbooks/openstack/README.md
+++ b/playbooks/openstack/README.md
@@ -226,7 +226,7 @@ advanced configuration:
[hardware-requirements]: https://docs.openshift.org/latest/install_config/install/prerequisites.html#hardware
[origin]: https://www.openshift.org/
[centos7]: https://www.centos.org/
-[sample-openshift-inventory]: https://github.com/openshift/openshift-ansible/blob/master/inventory/byo/hosts.example
+[sample-openshift-inventory]: https://github.com/openshift/openshift-ansible/blob/master/inventory/hosts.example
[advanced-configuration]: ./advanced-configuration.md
[accessing-openshift]: ./advanced-configuration.md#accessing-the-openshift-cluster
[uninstall-openshift]: ./advanced-configuration.md#removing-the-openshift-cluster
diff --git a/playbooks/openstack/advanced-configuration.md b/playbooks/openstack/advanced-configuration.md
index db2a13d38..2c9b70b5f 100644
--- a/playbooks/openstack/advanced-configuration.md
+++ b/playbooks/openstack/advanced-configuration.md
@@ -159,11 +159,22 @@ So the provisioned cluster nodes will start using those natively as
default nameservers. Technically, this allows to deploy OpenShift clusters
without dnsmasq proxies.
-The `openshift_openstack_clusterid` and `openshift_openstack_public_dns_domain` will form the cluster's DNS domain all
-your servers will be under. With the default values, this will be
-`openshift.example.com`. For workloads, the default subdomain is 'apps'.
-That sudomain can be set as well by the `openshift_openstack_app_subdomain` variable in
-the inventory.
+The `openshift_openstack_clusterid` and `openshift_openstack_public_dns_domain`
+will form the cluster's public DNS domain all your servers will be under. With
+the default values, this will be `openshift.example.com`. For workloads, the
+default subdomain is 'apps'. That sudomain can be set as well by the
+`openshift_openstack_app_subdomain` variable in the inventory.
+
+If you want to use a two sets of hostnames for public and private/prefixed DNS
+records for your externally managed public DNS server, you can specify
+`openshift_openstack_public_hostname_suffix` and/or
+`openshift_openstack_private_hostname_suffix`. The suffixes will be added
+to the nsupdate records sent to the external DNS server. Those are empty by default.
+
+**Note** the real hostnames, Nova servers' or ansible hostnames and inventory
+variables will not be updated. The deployment may be done on arbitrary named
+hosts with the hostnames managed by cloud-init. Inventory hostnames will ignore
+the suffixes.
The `openstack_<role name>_hostname` is a set of variables used for customising
public names of Nova servers provisioned with a given role. When such a variable stays commented,
@@ -343,7 +354,7 @@ installation for example by specifying the authentication.
The full list of options is available in this sample inventory:
-https://github.com/openshift/openshift-ansible/blob/master/inventory/byo/hosts.ose.example
+https://github.com/openshift/openshift-ansible/blob/master/inventory/hosts.example
Note, that in order to deploy OpenShift origin, you should update the following
variables for the `inventory/group_vars/OSEv3.yml`, `all.yml`:
@@ -604,7 +615,7 @@ A library of custom post-provision actions exists in `openshift-ansible-contrib/
Once it succeeds, you can install openshift by running:
- ansible-playbook openshift-ansible/playbooks/byo/config.yml
+ ansible-playbook openshift-ansible/playbooks/deploy_cluster.yml
## Access UI
diff --git a/roles/calico/tasks/main.yml b/roles/calico/tasks/main.yml
index 0e3863304..bbc6edd48 100644
--- a/roles/calico/tasks/main.yml
+++ b/roles/calico/tasks/main.yml
@@ -14,7 +14,6 @@
vars:
etcd_cert_prefix: calico.etcd-
etcd_cert_config_dir: "{{ openshift.common.config_base }}/calico"
- embedded_etcd: "{{ hostvars[groups.oo_first_master.0].openshift.master.embedded_etcd }}"
etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
etcd_cert_subdir: "openshift-calico-{{ openshift.common.hostname }}"
diff --git a/roles/etcd/meta/main.yml b/roles/etcd/meta/main.yml
index 879ca4f4e..f2e1fc310 100644
--- a/roles/etcd/meta/main.yml
+++ b/roles/etcd/meta/main.yml
@@ -19,3 +19,4 @@ dependencies:
- role: lib_openshift
- role: lib_os_firewall
- role: lib_utils
+- role: openshift_facts
diff --git a/roles/flannel/defaults/main.yaml b/roles/flannel/defaults/main.yaml
index 488b6b0bc..2e4a0dc39 100644
--- a/roles/flannel/defaults/main.yaml
+++ b/roles/flannel/defaults/main.yaml
@@ -2,8 +2,8 @@
flannel_interface: "{{ ansible_default_ipv4.interface }}"
flannel_etcd_key: /openshift.com/network
etcd_hosts: "{{ etcd_urls }}"
-etcd_peer_ca_file: "{{ openshift.common.config_base }}/node/{{ 'ca' if (embedded_etcd | bool) else 'flannel.etcd-ca' }}.crt"
-etcd_peer_cert_file: "{{ openshift.common.config_base }}/node/{{ 'system:node:' + openshift.common.hostname if (embedded_etcd | bool) else 'flannel.etcd-client' }}.crt"
-etcd_peer_key_file: "{{ openshift.common.config_base }}/node/{{ 'system:node:' + openshift.common.hostname if (embedded_etcd | bool) else 'flannel.etcd-client' }}.key"
+etcd_peer_ca_file: "{{ openshift.common.config_base }}/node/flannel.etcd-ca.crt"
+etcd_peer_cert_file: "{{ openshift.common.config_base }}/node/flannel.etcd-client.crt"
+etcd_peer_key_file: "{{ openshift.common.config_base }}/node/flannel.etcd-client.key"
openshift_docker_service_name: "{{ 'container-engine' if (openshift_docker_use_system_container | default(False)) else 'docker' }}"
diff --git a/roles/flannel_register/defaults/main.yaml b/roles/flannel_register/defaults/main.yaml
index 1d0f5df6a..cd11fd9ff 100644
--- a/roles/flannel_register/defaults/main.yaml
+++ b/roles/flannel_register/defaults/main.yaml
@@ -4,6 +4,6 @@ flannel_subnet_len: "{{ 32 - (openshift.master.sdn_host_subnet_length | int) }}"
flannel_etcd_key: /openshift.com/network
etcd_hosts: "{{ etcd_urls }}"
etcd_conf_dir: "{{ openshift.common.config_base }}/master"
-etcd_peer_ca_file: "{{ etcd_conf_dir + '/ca.crt' if (openshift.master.embedded_etcd | bool) else etcd_conf_dir + '/master.etcd-ca.crt' }}"
+etcd_peer_ca_file: "{{ etcd_conf_dir + '/master.etcd-ca.crt' }}"
etcd_peer_cert_file: "{{ etcd_conf_dir }}/master.etcd-client.crt"
etcd_peer_key_file: "{{ etcd_conf_dir }}/master.etcd-client.key"
diff --git a/roles/lib_utils/library/oo_ec2_group.py b/roles/lib_utils/library/oo_ec2_group.py
new file mode 100644
index 000000000..615021ac5
--- /dev/null
+++ b/roles/lib_utils/library/oo_ec2_group.py
@@ -0,0 +1,903 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# pylint: skip-file
+# flake8: noqa
+
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['stableinterface'],
+ 'supported_by': 'core'}
+
+DOCUMENTATION = '''
+---
+module: ec2_group
+author: "Andrew de Quincey (@adq)"
+version_added: "1.3"
+requirements: [ boto3 ]
+short_description: maintain an ec2 VPC security group.
+description:
+ - maintains ec2 security groups. This module has a dependency on python-boto >= 2.5
+options:
+ name:
+ description:
+ - Name of the security group.
+ - One of and only one of I(name) or I(group_id) is required.
+ - Required if I(state=present).
+ required: false
+ group_id:
+ description:
+ - Id of group to delete (works only with absent).
+ - One of and only one of I(name) or I(group_id) is required.
+ required: false
+ version_added: "2.4"
+ description:
+ description:
+ - Description of the security group. Required when C(state) is C(present).
+ required: false
+ vpc_id:
+ description:
+ - ID of the VPC to create the group in.
+ required: false
+ rules:
+ description:
+ - List of firewall inbound rules to enforce in this group (see example). If none are supplied,
+ no inbound rules will be enabled. Rules list may include its own name in `group_name`.
+ This allows idempotent loopback additions (e.g. allow group to access itself).
+ Rule sources list support was added in version 2.4. This allows to define multiple sources per
+ source type as well as multiple source types per rule. Prior to 2.4 an individual source is allowed.
+ required: false
+ rules_egress:
+ description:
+ - List of firewall outbound rules to enforce in this group (see example). If none are supplied,
+ a default all-out rule is assumed. If an empty list is supplied, no outbound rules will be enabled.
+ Rule Egress sources list support was added in version 2.4.
+ required: false
+ version_added: "1.6"
+ state:
+ version_added: "1.4"
+ description:
+ - Create or delete a security group
+ required: false
+ default: 'present'
+ choices: [ "present", "absent" ]
+ aliases: []
+ purge_rules:
+ version_added: "1.8"
+ description:
+ - Purge existing rules on security group that are not found in rules
+ required: false
+ default: 'true'
+ aliases: []
+ purge_rules_egress:
+ version_added: "1.8"
+ description:
+ - Purge existing rules_egress on security group that are not found in rules_egress
+ required: false
+ default: 'true'
+ aliases: []
+ tags:
+ version_added: "2.4"
+ description:
+ - A dictionary of one or more tags to assign to the security group.
+ required: false
+ purge_tags:
+ version_added: "2.4"
+ description:
+ - If yes, existing tags will be purged from the resource to match exactly what is defined by I(tags) parameter. If the I(tags) parameter is not set then
+ tags will not be modified.
+ required: false
+ default: yes
+ choices: [ 'yes', 'no' ]
+
+extends_documentation_fragment:
+ - aws
+ - ec2
+
+notes:
+ - If a rule declares a group_name and that group doesn't exist, it will be
+ automatically created. In that case, group_desc should be provided as well.
+ The module will refuse to create a depended-on group without a description.
+'''
+
+EXAMPLES = '''
+- name: example ec2 group
+ ec2_group:
+ name: example
+ description: an example EC2 group
+ vpc_id: 12345
+ region: eu-west-1
+ aws_secret_key: SECRET
+ aws_access_key: ACCESS
+ rules:
+ - proto: tcp
+ from_port: 80
+ to_port: 80
+ cidr_ip: 0.0.0.0/0
+ - proto: tcp
+ from_port: 22
+ to_port: 22
+ cidr_ip: 10.0.0.0/8
+ - proto: tcp
+ from_port: 443
+ to_port: 443
+ group_id: amazon-elb/sg-87654321/amazon-elb-sg
+ - proto: tcp
+ from_port: 3306
+ to_port: 3306
+ group_id: 123412341234/sg-87654321/exact-name-of-sg
+ - proto: udp
+ from_port: 10050
+ to_port: 10050
+ cidr_ip: 10.0.0.0/8
+ - proto: udp
+ from_port: 10051
+ to_port: 10051
+ group_id: sg-12345678
+ - proto: icmp
+ from_port: 8 # icmp type, -1 = any type
+ to_port: -1 # icmp subtype, -1 = any subtype
+ cidr_ip: 10.0.0.0/8
+ - proto: all
+ # the containing group name may be specified here
+ group_name: example
+ rules_egress:
+ - proto: tcp
+ from_port: 80
+ to_port: 80
+ cidr_ip: 0.0.0.0/0
+ cidr_ipv6: 64:ff9b::/96
+ group_name: example-other
+ # description to use if example-other needs to be created
+ group_desc: other example EC2 group
+
+- name: example2 ec2 group
+ ec2_group:
+ name: example2
+ description: an example2 EC2 group
+ vpc_id: 12345
+ region: eu-west-1
+ rules:
+ # 'ports' rule keyword was introduced in version 2.4. It accepts a single port value or a list of values including ranges (from_port-to_port).
+ - proto: tcp
+ ports: 22
+ group_name: example-vpn
+ - proto: tcp
+ ports:
+ - 80
+ - 443
+ - 8080-8099
+ cidr_ip: 0.0.0.0/0
+ # Rule sources list support was added in version 2.4. This allows to define multiple sources per source type as well as multiple source types per rule.
+ - proto: tcp
+ ports:
+ - 6379
+ - 26379
+ group_name:
+ - example-vpn
+ - example-redis
+ - proto: tcp
+ ports: 5665
+ group_name: example-vpn
+ cidr_ip:
+ - 172.16.1.0/24
+ - 172.16.17.0/24
+ cidr_ipv6:
+ - 2607:F8B0::/32
+ - 64:ff9b::/96
+ group_id:
+ - sg-edcd9784
+
+- name: "Delete group by its id"
+ ec2_group:
+ group_id: sg-33b4ee5b
+ state: absent
+'''
+
+RETURN = '''
+group_name:
+ description: Security group name
+ sample: My Security Group
+ type: string
+ returned: on create/update
+group_id:
+ description: Security group id
+ sample: sg-abcd1234
+ type: string
+ returned: on create/update
+description:
+ description: Description of security group
+ sample: My Security Group
+ type: string
+ returned: on create/update
+tags:
+ description: Tags associated with the security group
+ sample:
+ Name: My Security Group
+ Purpose: protecting stuff
+ type: dict
+ returned: on create/update
+vpc_id:
+ description: ID of VPC to which the security group belongs
+ sample: vpc-abcd1234
+ type: string
+ returned: on create/update
+ip_permissions:
+ description: Inbound rules associated with the security group.
+ sample:
+ - from_port: 8182
+ ip_protocol: tcp
+ ip_ranges:
+ - cidr_ip: "1.1.1.1/32"
+ ipv6_ranges: []
+ prefix_list_ids: []
+ to_port: 8182
+ user_id_group_pairs: []
+ type: list
+ returned: on create/update
+ip_permissions_egress:
+ description: Outbound rules associated with the security group.
+ sample:
+ - ip_protocol: -1
+ ip_ranges:
+ - cidr_ip: "0.0.0.0/0"
+ ipv6_ranges: []
+ prefix_list_ids: []
+ user_id_group_pairs: []
+ type: list
+ returned: on create/update
+owner_id:
+ description: AWS Account ID of the security group
+ sample: 123456789012
+ type: int
+ returned: on create/update
+'''
+
+import json
+import re
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.ec2 import boto3_conn
+from ansible.module_utils.ec2 import get_aws_connection_info
+from ansible.module_utils.ec2 import ec2_argument_spec
+from ansible.module_utils.ec2 import camel_dict_to_snake_dict
+from ansible.module_utils.ec2 import HAS_BOTO3
+from ansible.module_utils.ec2 import boto3_tag_list_to_ansible_dict, ansible_dict_to_boto3_tag_list, compare_aws_tags
+from ansible.module_utils.ec2 import AWSRetry
+import traceback
+
+try:
+ import botocore
+except ImportError:
+ pass # caught by imported HAS_BOTO3
+
+
+@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
+def get_security_groups_with_backoff(connection, **kwargs):
+ return connection.describe_security_groups(**kwargs)
+
+
+def deduplicate_rules_args(rules):
+ """Returns unique rules"""
+ if rules is None:
+ return None
+ return list(dict(zip((json.dumps(r, sort_keys=True) for r in rules), rules)).values())
+
+
+def make_rule_key(prefix, rule, group_id, cidr_ip):
+ if 'proto' in rule:
+ proto, from_port, to_port = [rule.get(x, None) for x in ('proto', 'from_port', 'to_port')]
+ elif 'IpProtocol' in rule:
+ proto, from_port, to_port = [rule.get(x, None) for x in ('IpProtocol', 'FromPort', 'ToPort')]
+ if proto not in ['icmp', 'tcp', 'udp'] and from_port == -1 and to_port == -1:
+ from_port = 'none'
+ to_port = 'none'
+ key = "%s-%s-%s-%s-%s-%s" % (prefix, proto, from_port, to_port, group_id, cidr_ip)
+ return key.lower().replace('-none', '-None')
+
+
+def add_rules_to_lookup(ipPermissions, group_id, prefix, dict):
+ for rule in ipPermissions:
+ for groupGrant in rule.get('UserIdGroupPairs', []):
+ dict[make_rule_key(prefix, rule, group_id, groupGrant.get('GroupId'))] = (rule, groupGrant)
+ for ipv4Grants in rule.get('IpRanges', []):
+ dict[make_rule_key(prefix, rule, group_id, ipv4Grants.get('CidrIp'))] = (rule, ipv4Grants)
+ for ipv6Grants in rule.get('Ipv6Ranges', []):
+ dict[make_rule_key(prefix, rule, group_id, ipv6Grants.get('CidrIpv6'))] = (rule, ipv6Grants)
+
+
+def validate_rule(module, rule):
+ VALID_PARAMS = ('cidr_ip', 'cidr_ipv6',
+ 'group_id', 'group_name', 'group_desc',
+ 'proto', 'from_port', 'to_port')
+ if not isinstance(rule, dict):
+ module.fail_json(msg='Invalid rule parameter type [%s].' % type(rule))
+ for k in rule:
+ if k not in VALID_PARAMS:
+ module.fail_json(msg='Invalid rule parameter \'{}\''.format(k))
+
+ if 'group_id' in rule and 'cidr_ip' in rule:
+ module.fail_json(msg='Specify group_id OR cidr_ip, not both')
+ elif 'group_name' in rule and 'cidr_ip' in rule:
+ module.fail_json(msg='Specify group_name OR cidr_ip, not both')
+ elif 'group_id' in rule and 'cidr_ipv6' in rule:
+ module.fail_json(msg="Specify group_id OR cidr_ipv6, not both")
+ elif 'group_name' in rule and 'cidr_ipv6' in rule:
+ module.fail_json(msg="Specify group_name OR cidr_ipv6, not both")
+ elif 'cidr_ip' in rule and 'cidr_ipv6' in rule:
+ module.fail_json(msg="Specify cidr_ip OR cidr_ipv6, not both")
+ elif 'group_id' in rule and 'group_name' in rule:
+ module.fail_json(msg='Specify group_id OR group_name, not both')
+
+
+def get_target_from_rule(module, client, rule, name, group, groups, vpc_id):
+ """
+ Returns tuple of (group_id, ip) after validating rule params.
+
+ rule: Dict describing a rule.
+ name: Name of the security group being managed.
+ groups: Dict of all available security groups.
+
+ AWS accepts an ip range or a security group as target of a rule. This
+ function validate the rule specification and return either a non-None
+ group_id or a non-None ip range.
+ """
+
+ FOREIGN_SECURITY_GROUP_REGEX = '^(\S+)/(sg-\S+)/(\S+)'
+ group_id = None
+ group_name = None
+ ip = None
+ ipv6 = None
+ target_group_created = False
+
+ if 'group_id' in rule and 'cidr_ip' in rule:
+ module.fail_json(msg="Specify group_id OR cidr_ip, not both")
+ elif 'group_name' in rule and 'cidr_ip' in rule:
+ module.fail_json(msg="Specify group_name OR cidr_ip, not both")
+ elif 'group_id' in rule and 'cidr_ipv6' in rule:
+ module.fail_json(msg="Specify group_id OR cidr_ipv6, not both")
+ elif 'group_name' in rule and 'cidr_ipv6' in rule:
+ module.fail_json(msg="Specify group_name OR cidr_ipv6, not both")
+ elif 'group_id' in rule and 'group_name' in rule:
+ module.fail_json(msg="Specify group_id OR group_name, not both")
+ elif 'cidr_ip' in rule and 'cidr_ipv6' in rule:
+ module.fail_json(msg="Specify cidr_ip OR cidr_ipv6, not both")
+ elif rule.get('group_id') and re.match(FOREIGN_SECURITY_GROUP_REGEX, rule['group_id']):
+ # this is a foreign Security Group. Since you can't fetch it you must create an instance of it
+ owner_id, group_id, group_name = re.match(FOREIGN_SECURITY_GROUP_REGEX, rule['group_id']).groups()
+ group_instance = dict(GroupId=group_id, GroupName=group_name)
+ groups[group_id] = group_instance
+ groups[group_name] = group_instance
+ elif 'group_id' in rule:
+ group_id = rule['group_id']
+ elif 'group_name' in rule:
+ group_name = rule['group_name']
+ if group_name == name:
+ group_id = group['GroupId']
+ groups[group_id] = group
+ groups[group_name] = group
+ elif group_name in groups and group.get('VpcId') and groups[group_name].get('VpcId'):
+ # both are VPC groups, this is ok
+ group_id = groups[group_name]['GroupId']
+ elif group_name in groups and not (group.get('VpcId') or groups[group_name].get('VpcId')):
+ # both are EC2 classic, this is ok
+ group_id = groups[group_name]['GroupId']
+ else:
+ # if we got here, either the target group does not exist, or there
+ # is a mix of EC2 classic + VPC groups. Mixing of EC2 classic + VPC
+ # is bad, so we have to create a new SG because no compatible group
+ # exists
+ if not rule.get('group_desc', '').strip():
+ module.fail_json(msg="group %s will be automatically created by rule %s and "
+ "no description was provided" % (group_name, rule))
+ if not module.check_mode:
+ params = dict(GroupName=group_name, Description=rule['group_desc'])
+ if vpc_id:
+ params['VpcId'] = vpc_id
+ auto_group = client.create_security_group(**params)
+ group_id = auto_group['GroupId']
+ groups[group_id] = auto_group
+ groups[group_name] = auto_group
+ target_group_created = True
+ elif 'cidr_ip' in rule:
+ ip = rule['cidr_ip']
+ elif 'cidr_ipv6' in rule:
+ ipv6 = rule['cidr_ipv6']
+
+ return group_id, ip, ipv6, target_group_created
+
+
+def ports_expand(ports):
+ # takes a list of ports and returns a list of (port_from, port_to)
+ ports_expanded = []
+ for port in ports:
+ if not isinstance(port, str):
+ ports_expanded.append((port,) * 2)
+ elif '-' in port:
+ ports_expanded.append(tuple(p.strip() for p in port.split('-', 1)))
+ else:
+ ports_expanded.append((port.strip(),) * 2)
+
+ return ports_expanded
+
+
+def rule_expand_ports(rule):
+ # takes a rule dict and returns a list of expanded rule dicts
+ if 'ports' not in rule:
+ return [rule]
+
+ ports = rule['ports'] if isinstance(rule['ports'], list) else [rule['ports']]
+
+ rule_expanded = []
+ for from_to in ports_expand(ports):
+ temp_rule = rule.copy()
+ del temp_rule['ports']
+ temp_rule['from_port'], temp_rule['to_port'] = from_to
+ rule_expanded.append(temp_rule)
+
+ return rule_expanded
+
+
+def rules_expand_ports(rules):
+ # takes a list of rules and expands it based on 'ports'
+ if not rules:
+ return rules
+
+ return [rule for rule_complex in rules
+ for rule in rule_expand_ports(rule_complex)]
+
+
+def rule_expand_source(rule, source_type):
+ # takes a rule dict and returns a list of expanded rule dicts for specified source_type
+ sources = rule[source_type] if isinstance(rule[source_type], list) else [rule[source_type]]
+ source_types_all = ('cidr_ip', 'cidr_ipv6', 'group_id', 'group_name')
+
+ rule_expanded = []
+ for source in sources:
+ temp_rule = rule.copy()
+ for s in source_types_all:
+ temp_rule.pop(s, None)
+ temp_rule[source_type] = source
+ rule_expanded.append(temp_rule)
+
+ return rule_expanded
+
+
+def rule_expand_sources(rule):
+ # takes a rule dict and returns a list of expanded rule discts
+ source_types = (stype for stype in ('cidr_ip', 'cidr_ipv6', 'group_id', 'group_name') if stype in rule)
+
+ return [r for stype in source_types
+ for r in rule_expand_source(rule, stype)]
+
+
+def rules_expand_sources(rules):
+ # takes a list of rules and expands it based on 'cidr_ip', 'group_id', 'group_name'
+ if not rules:
+ return rules
+
+ return [rule for rule_complex in rules
+ for rule in rule_expand_sources(rule_complex)]
+
+
+def authorize_ip(type, changed, client, group, groupRules,
+ ip, ip_permission, module, rule, ethertype):
+ # If rule already exists, don't later delete it
+ for thisip in ip:
+ rule_id = make_rule_key(type, rule, group['GroupId'], thisip)
+ if rule_id in groupRules:
+ del groupRules[rule_id]
+ else:
+ if not module.check_mode:
+ ip_permission = serialize_ip_grant(rule, thisip, ethertype)
+ if ip_permission:
+ try:
+ if type == "in":
+ client.authorize_security_group_ingress(GroupId=group['GroupId'],
+ IpPermissions=[ip_permission])
+ elif type == "out":
+ client.authorize_security_group_egress(GroupId=group['GroupId'],
+ IpPermissions=[ip_permission])
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg="Unable to authorize %s for ip %s security group '%s' - %s" %
+ (type, thisip, group['GroupName'], e),
+ exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
+ changed = True
+ return changed, ip_permission
+
+
+def serialize_group_grant(group_id, rule):
+ permission = {'IpProtocol': rule['proto'],
+ 'FromPort': rule['from_port'],
+ 'ToPort': rule['to_port'],
+ 'UserIdGroupPairs': [{'GroupId': group_id}]}
+
+ return fix_port_and_protocol(permission)
+
+
+def serialize_revoke(grant, rule):
+ permission = dict()
+ fromPort = rule['FromPort'] if 'FromPort' in rule else None
+ toPort = rule['ToPort'] if 'ToPort' in rule else None
+ if 'GroupId' in grant:
+ permission = {'IpProtocol': rule['IpProtocol'],
+ 'FromPort': fromPort,
+ 'ToPort': toPort,
+ 'UserIdGroupPairs': [{'GroupId': grant['GroupId']}]
+ }
+ elif 'CidrIp' in grant:
+ permission = {'IpProtocol': rule['IpProtocol'],
+ 'FromPort': fromPort,
+ 'ToPort': toPort,
+ 'IpRanges': [grant]
+ }
+ elif 'CidrIpv6' in grant:
+ permission = {'IpProtocol': rule['IpProtocol'],
+ 'FromPort': fromPort,
+ 'ToPort': toPort,
+ 'Ipv6Ranges': [grant]
+ }
+ return fix_port_and_protocol(permission)
+
+
+def serialize_ip_grant(rule, thisip, ethertype):
+ permission = {'IpProtocol': rule['proto'],
+ 'FromPort': rule['from_port'],
+ 'ToPort': rule['to_port']}
+ if ethertype == "ipv4":
+ permission['IpRanges'] = [{'CidrIp': thisip}]
+ elif ethertype == "ipv6":
+ permission['Ipv6Ranges'] = [{'CidrIpv6': thisip}]
+
+ return fix_port_and_protocol(permission)
+
+
+def fix_port_and_protocol(permission):
+ for key in ['FromPort', 'ToPort']:
+ if key in permission:
+ if permission[key] is None:
+ del permission[key]
+ else:
+ permission[key] = int(permission[key])
+
+ permission['IpProtocol'] = str(permission['IpProtocol'])
+
+ return permission
+
+
+def main():
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(dict(
+ name=dict(),
+ group_id=dict(),
+ description=dict(),
+ vpc_id=dict(),
+ rules=dict(type='list'),
+ rules_egress=dict(type='list'),
+ state=dict(default='present', type='str', choices=['present', 'absent']),
+ purge_rules=dict(default=True, required=False, type='bool'),
+ purge_rules_egress=dict(default=True, required=False, type='bool'),
+ tags=dict(required=False, type='dict', aliases=['resource_tags']),
+ purge_tags=dict(default=True, required=False, type='bool')
+ )
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_one_of=[['name', 'group_id']],
+ required_if=[['state', 'present', ['name']]],
+ )
+
+ if not HAS_BOTO3:
+ module.fail_json(msg='boto3 required for this module')
+
+ name = module.params['name']
+ group_id = module.params['group_id']
+ description = module.params['description']
+ vpc_id = module.params['vpc_id']
+ rules = deduplicate_rules_args(rules_expand_sources(rules_expand_ports(module.params['rules'])))
+ rules_egress = deduplicate_rules_args(rules_expand_sources(rules_expand_ports(module.params['rules_egress'])))
+ state = module.params.get('state')
+ purge_rules = module.params['purge_rules']
+ purge_rules_egress = module.params['purge_rules_egress']
+ tags = module.params['tags']
+ purge_tags = module.params['purge_tags']
+
+ if state == 'present' and not description:
+ module.fail_json(msg='Must provide description when state is present.')
+
+ changed = False
+ region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)
+ if not region:
+ module.fail_json(msg="The AWS region must be specified as an "
+ "environment variable or in the AWS credentials "
+ "profile.")
+ client = boto3_conn(module, conn_type='client', resource='ec2', endpoint=ec2_url, region=region, **aws_connect_params)
+ group = None
+ groups = dict()
+ security_groups = []
+ # do get all security groups
+ # find if the group is present
+ try:
+ response = get_security_groups_with_backoff(client)
+ security_groups = response.get('SecurityGroups', [])
+ except botocore.exceptions.NoCredentialsError as e:
+ module.fail_json(msg="Error in describe_security_groups: %s" % "Unable to locate credentials", exception=traceback.format_exc())
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg="Error in describe_security_groups: %s" % e, exception=traceback.format_exc(),
+ **camel_dict_to_snake_dict(e.response))
+
+ for sg in security_groups:
+ groups[sg['GroupId']] = sg
+ groupName = sg['GroupName']
+ if groupName in groups:
+ # Prioritise groups from the current VPC
+ # even if current VPC is EC2-Classic
+ if groups[groupName].get('VpcId') == vpc_id:
+ # Group saved already matches current VPC, change nothing
+ pass
+ elif vpc_id is None and groups[groupName].get('VpcId') is None:
+ # We're in EC2 classic, and the group already saved is as well
+ # No VPC groups can be used alongside EC2 classic groups
+ pass
+ else:
+ # the current SG stored has no direct match, so we can replace it
+ groups[groupName] = sg
+ else:
+ groups[groupName] = sg
+
+ if group_id and sg['GroupId'] == group_id:
+ group = sg
+ elif groupName == name and (vpc_id is None or sg.get('VpcId') == vpc_id):
+ group = sg
+
+ # Ensure requested group is absent
+ if state == 'absent':
+ if group:
+ # found a match, delete it
+ try:
+ if not module.check_mode:
+ client.delete_security_group(GroupId=group['GroupId'])
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg="Unable to delete security group '%s' - %s" % (group, e),
+ exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
+ else:
+ group = None
+ changed = True
+ else:
+ # no match found, no changes required
+ pass
+
+ # Ensure requested group is present
+ elif state == 'present':
+ if group:
+ # existing group
+ if group['Description'] != description:
+ module.warn("Group description does not match existing group. Descriptions cannot be changed without deleting "
+ "and re-creating the security group. Try using state=absent to delete, then rerunning this task.")
+
+ # if the group doesn't exist, create it now
+ else:
+ # no match found, create it
+ if not module.check_mode:
+ params = dict(GroupName=name, Description=description)
+ if vpc_id:
+ params['VpcId'] = vpc_id
+ group = client.create_security_group(**params)
+ # When a group is created, an egress_rule ALLOW ALL
+ # to 0.0.0.0/0 is added automatically but it's not
+ # reflected in the object returned by the AWS API
+ # call. We re-read the group for getting an updated object
+ # amazon sometimes takes a couple seconds to update the security group so wait till it exists
+ while True:
+ group = get_security_groups_with_backoff(client, GroupIds=[group['GroupId']])['SecurityGroups'][0]
+ if group.get('VpcId') and not group.get('IpPermissionsEgress'):
+ pass
+ else:
+ break
+
+ changed = True
+
+ if tags is not None:
+ current_tags = boto3_tag_list_to_ansible_dict(group.get('Tags', []))
+ tags_need_modify, tags_to_delete = compare_aws_tags(current_tags, tags, purge_tags)
+ if tags_to_delete:
+ try:
+ client.delete_tags(Resources=[group['GroupId']], Tags=[{'Key': tag} for tag in tags_to_delete])
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg=e.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
+ changed = True
+
+ # Add/update tags
+ if tags_need_modify:
+ try:
+ client.create_tags(Resources=[group['GroupId']], Tags=ansible_dict_to_boto3_tag_list(tags_need_modify))
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg=e.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
+ changed = True
+
+ else:
+ module.fail_json(msg="Unsupported state requested: %s" % state)
+
+ # create a lookup for all existing rules on the group
+ ip_permission = []
+ if group:
+ # Manage ingress rules
+ groupRules = {}
+ add_rules_to_lookup(group['IpPermissions'], group['GroupId'], 'in', groupRules)
+ # Now, go through all provided rules and ensure they are there.
+ if rules is not None:
+ for rule in rules:
+ validate_rule(module, rule)
+ group_id, ip, ipv6, target_group_created = get_target_from_rule(module, client, rule, name,
+ group, groups, vpc_id)
+ if target_group_created:
+ changed = True
+
+ if rule['proto'] in ('all', '-1', -1):
+ rule['proto'] = -1
+ rule['from_port'] = None
+ rule['to_port'] = None
+
+ if group_id:
+ rule_id = make_rule_key('in', rule, group['GroupId'], group_id)
+ if rule_id in groupRules:
+ del groupRules[rule_id]
+ else:
+ if not module.check_mode:
+ ip_permission = serialize_group_grant(group_id, rule)
+ if ip_permission:
+ ips = ip_permission
+ if vpc_id:
+ [useridpair.update({'VpcId': vpc_id}) for useridpair in
+ ip_permission.get('UserIdGroupPairs', [])]
+ try:
+ client.authorize_security_group_ingress(GroupId=group['GroupId'], IpPermissions=[ips])
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(
+ msg="Unable to authorize ingress for group %s security group '%s' - %s" %
+ (group_id, group['GroupName'], e),
+ exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
+ changed = True
+ elif ip:
+ # Convert ip to list we can iterate over
+ if ip and not isinstance(ip, list):
+ ip = [ip]
+
+ changed, ip_permission = authorize_ip("in", changed, client, group, groupRules, ip, ip_permission,
+ module, rule, "ipv4")
+ elif ipv6:
+ # Convert ip to list we can iterate over
+ if not isinstance(ipv6, list):
+ ipv6 = [ipv6]
+ # If rule already exists, don't later delete it
+ changed, ip_permission = authorize_ip("in", changed, client, group, groupRules, ipv6, ip_permission,
+ module, rule, "ipv6")
+ # Finally, remove anything left in the groupRules -- these will be defunct rules
+ if purge_rules:
+ for (rule, grant) in groupRules.values():
+ ip_permission = serialize_revoke(grant, rule)
+ if not module.check_mode:
+ try:
+ client.revoke_security_group_ingress(GroupId=group['GroupId'], IpPermissions=[ip_permission])
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(
+ msg="Unable to revoke ingress for security group '%s' - %s" %
+ (group['GroupName'], e),
+ exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
+ changed = True
+
+ # Manage egress rules
+ groupRules = {}
+ add_rules_to_lookup(group['IpPermissionsEgress'], group['GroupId'], 'out', groupRules)
+ # Now, go through all provided rules and ensure they are there.
+ if rules_egress is not None:
+ for rule in rules_egress:
+ validate_rule(module, rule)
+ group_id, ip, ipv6, target_group_created = get_target_from_rule(module, client, rule, name,
+ group, groups, vpc_id)
+ if target_group_created:
+ changed = True
+
+ if rule['proto'] in ('all', '-1', -1):
+ rule['proto'] = -1
+ rule['from_port'] = None
+ rule['to_port'] = None
+
+ if group_id:
+ rule_id = make_rule_key('out', rule, group['GroupId'], group_id)
+ if rule_id in groupRules:
+ del groupRules[rule_id]
+ else:
+ if not module.check_mode:
+ ip_permission = serialize_group_grant(group_id, rule)
+ if ip_permission:
+ ips = ip_permission
+ if vpc_id:
+ [useridpair.update({'VpcId': vpc_id}) for useridpair in
+ ip_permission.get('UserIdGroupPairs', [])]
+ try:
+ client.authorize_security_group_egress(GroupId=group['GroupId'], IpPermissions=[ips])
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(
+ msg="Unable to authorize egress for group %s security group '%s' - %s" %
+ (group_id, group['GroupName'], e),
+ exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
+ changed = True
+ elif ip:
+ # Convert ip to list we can iterate over
+ if not isinstance(ip, list):
+ ip = [ip]
+ changed, ip_permission = authorize_ip("out", changed, client, group, groupRules, ip,
+ ip_permission, module, rule, "ipv4")
+ elif ipv6:
+ # Convert ip to list we can iterate over
+ if not isinstance(ipv6, list):
+ ipv6 = [ipv6]
+ # If rule already exists, don't later delete it
+ changed, ip_permission = authorize_ip("out", changed, client, group, groupRules, ipv6,
+ ip_permission, module, rule, "ipv6")
+ elif vpc_id is not None:
+ # when no egress rules are specified and we're in a VPC,
+ # we add in a default allow all out rule, which was the
+ # default behavior before egress rules were added
+ default_egress_rule = 'out--1-None-None-' + group['GroupId'] + '-0.0.0.0/0'
+ if default_egress_rule not in groupRules:
+ if not module.check_mode:
+ ip_permission = [{'IpProtocol': '-1',
+ 'IpRanges': [{'CidrIp': '0.0.0.0/0'}]
+ }
+ ]
+ try:
+ client.authorize_security_group_egress(GroupId=group['GroupId'], IpPermissions=ip_permission)
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg="Unable to authorize egress for ip %s security group '%s' - %s" %
+ ('0.0.0.0/0',
+ group['GroupName'],
+ e),
+ exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
+ changed = True
+ else:
+ # make sure the default egress rule is not removed
+ del groupRules[default_egress_rule]
+
+ # Finally, remove anything left in the groupRules -- these will be defunct rules
+ if purge_rules_egress and vpc_id is not None:
+ for (rule, grant) in groupRules.values():
+ # we shouldn't be revoking 0.0.0.0 egress
+ if grant != '0.0.0.0/0':
+ ip_permission = serialize_revoke(grant, rule)
+ if not module.check_mode:
+ try:
+ client.revoke_security_group_egress(GroupId=group['GroupId'], IpPermissions=[ip_permission])
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg="Unable to revoke egress for ip %s security group '%s' - %s" %
+ (grant, group['GroupName'], e),
+ exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
+ changed = True
+
+ if group:
+ security_group = get_security_groups_with_backoff(client, GroupIds=[group['GroupId']])['SecurityGroups'][0]
+ security_group = camel_dict_to_snake_dict(security_group)
+ security_group['tags'] = boto3_tag_list_to_ansible_dict(security_group.get('tags', []),
+ tag_name_key_name='key', tag_value_key_name='value')
+ module.exit_json(changed=changed, **security_group)
+ else:
+ module.exit_json(changed=changed, group_id=None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/roles/openshift_aws/defaults/main.yml b/roles/openshift_aws/defaults/main.yml
index 42ef22846..74e5d1dde 100644
--- a/roles/openshift_aws/defaults/main.yml
+++ b/roles/openshift_aws/defaults/main.yml
@@ -6,9 +6,7 @@ openshift_aws_create_security_groups: True
openshift_aws_create_launch_config: True
openshift_aws_create_scale_group: True
-openshift_aws_current_version: ''
-openshift_aws_new_version: ''
-
+openshift_aws_node_group_upgrade: False
openshift_aws_wait_for_ssh: True
openshift_aws_clusterid: default
@@ -19,7 +17,6 @@ openshift_aws_build_ami_group: "{{ openshift_aws_clusterid }}"
openshift_aws_iam_cert_name: "{{ openshift_aws_clusterid }}-master-external"
openshift_aws_iam_cert_path: ''
openshift_aws_iam_cert_key_path: ''
-openshift_aws_scale_group_basename: "{{ openshift_aws_clusterid }} openshift"
openshift_aws_iam_role_name: openshift_node_describe_instances
openshift_aws_iam_role_policy_json: "{{ lookup('file', 'describeinstances.json') }}"
@@ -34,14 +31,12 @@ openshift_aws_ami_name: openshift-gi
openshift_aws_base_ami_name: ami_base
openshift_aws_launch_config_bootstrap_token: ''
-openshift_aws_launch_config_basename: "{{ openshift_aws_clusterid }}"
openshift_aws_users: []
openshift_aws_ami_tags:
bootstrap: "true"
openshift-created: "true"
- clusterid: "{{ openshift_aws_clusterid }}"
parent: "{{ openshift_aws_base_ami | default('unknown') }}"
openshift_aws_s3_mode: create
@@ -124,6 +119,20 @@ openshift_aws_ami_map:
infra: "{{ openshift_aws_ami }}"
compute: "{{ openshift_aws_ami }}"
+openshift_aws_master_group:
+- name: "{{ openshift_aws_clusterid }} master group"
+ group: master
+
+openshift_aws_node_groups:
+- name: "{{ openshift_aws_clusterid }} compute group"
+ group: compute
+- name: "{{ openshift_aws_clusterid }} infra group"
+ group: infra
+
+openshift_aws_created_asgs: []
+openshift_aws_current_asgs: []
+
+# these will be used during upgrade
openshift_aws_master_group_config:
# The 'master' key is always required here.
master:
@@ -139,7 +148,6 @@ openshift_aws_master_group_config:
host-type: master
sub-host-type: default
runtime: docker
- version: "{{ openshift_aws_new_version }}"
wait_for_instances: True
termination_policy: "{{ openshift_aws_node_group_termination_policy }}"
replace_all_instances: "{{ openshift_aws_node_group_replace_all_instances }}"
@@ -163,7 +171,6 @@ openshift_aws_node_group_config:
host-type: node
sub-host-type: compute
runtime: docker
- version: "{{ openshift_aws_new_version }}"
termination_policy: "{{ openshift_aws_node_group_termination_policy }}"
replace_all_instances: "{{ openshift_aws_node_group_replace_all_instances }}"
iam_role: "{{ openshift_aws_iam_role_name }}"
@@ -183,7 +190,6 @@ openshift_aws_node_group_config:
host-type: node
sub-host-type: infra
runtime: docker
- version: "{{ openshift_aws_new_version }}"
termination_policy: "{{ openshift_aws_node_group_termination_policy }}"
replace_all_instances: "{{ openshift_aws_node_group_replace_all_instances }}"
iam_role: "{{ openshift_aws_iam_role_name }}"
@@ -283,21 +289,4 @@ openshift_aws_node_run_bootstrap_startup: True
openshift_aws_node_user_data: ''
openshift_aws_node_config_namespace: openshift-node
-openshift_aws_node_groups: nodes
-
openshift_aws_masters_groups: masters,etcd,nodes
-
-# If creating extra node groups, you'll need to define all of the following
-
-# The format is the same as openshift_aws_node_group_config, but the top-level
-# key names should be different (ie, not == master or infra).
-# openshift_aws_node_group_config_extra: {}
-
-# This variable should look like openshift_aws_launch_config_security_groups
-# and contain a one-to-one mapping of top level keys that are defined in
-# openshift_aws_node_group_config_extra.
-# openshift_aws_launch_config_security_groups_extra: {}
-
-# openshift_aws_node_security_groups_extra: {}
-
-# openshift_aws_ami_map_extra: {}
diff --git a/roles/openshift_aws/filter_plugins/openshift_aws_filters.py b/roles/openshift_aws/filter_plugins/openshift_aws_filters.py
index e707abd3f..dfcb11da3 100644
--- a/roles/openshift_aws/filter_plugins/openshift_aws_filters.py
+++ b/roles/openshift_aws/filter_plugins/openshift_aws_filters.py
@@ -4,11 +4,43 @@
Custom filters for use in openshift_aws
'''
+from ansible import errors
+
class FilterModule(object):
''' Custom ansible filters for use by openshift_aws role'''
@staticmethod
+ def scale_groups_serial(scale_group_info, upgrade=False):
+ ''' This function will determine what the deployment serial should be and return it
+
+ Search through the tags and find the deployment_serial tag. Once found,
+ determine if an increment is needed during an upgrade.
+ if upgrade is true then increment the serial and return it
+ else return the serial
+ '''
+ if scale_group_info == []:
+ return 1
+
+ scale_group_info = scale_group_info[0]
+
+ if not isinstance(scale_group_info, dict):
+ raise errors.AnsibleFilterError("|filter plugin failed: Expected scale_group_info to be a dict")
+
+ serial = None
+
+ for tag in scale_group_info['tags']:
+ if tag['key'] == 'deployment_serial':
+ serial = int(tag['value'])
+ if upgrade:
+ serial += 1
+ break
+ else:
+ raise errors.AnsibleFilterError("|filter plugin failed: deployment_serial tag was not found")
+
+ return serial
+
+ @staticmethod
def scale_groups_match_capacity(scale_group_info):
''' This function will verify that the scale group instance count matches
the scale group desired capacity
@@ -38,4 +70,5 @@ class FilterModule(object):
def filters(self):
''' returns a mapping of filters to methods '''
return {'build_instance_tags': self.build_instance_tags,
- 'scale_groups_match_capacity': self.scale_groups_match_capacity}
+ 'scale_groups_match_capacity': self.scale_groups_match_capacity,
+ 'scale_groups_serial': self.scale_groups_serial}
diff --git a/roles/openshift_aws/tasks/accept_nodes.yml b/roles/openshift_aws/tasks/accept_nodes.yml
index ae320962f..c2a2cea30 100644
--- a/roles/openshift_aws/tasks/accept_nodes.yml
+++ b/roles/openshift_aws/tasks/accept_nodes.yml
@@ -1,6 +1,6 @@
---
- name: fetch masters
- ec2_remote_facts:
+ ec2_instance_facts:
region: "{{ openshift_aws_region | default('us-east-1') }}"
filters:
"{{ {'tag:kubernetes.io/cluster/' ~ openshift_aws_clusterid: openshift_aws_clusterid,
@@ -11,7 +11,7 @@
until: "'instances' in mastersout and mastersout.instances|length > 0"
- name: fetch new node instances
- ec2_remote_facts:
+ ec2_instance_facts:
region: "{{ openshift_aws_region }}"
filters:
"{{ {'tag:kubernetes.io/cluster/' ~ openshift_aws_clusterid: openshift_aws_clusterid,
@@ -22,9 +22,14 @@
delay: 3
until: "'instances' in instancesout and instancesout.instances|length > 0"
-- debug:
+- name: Dump the private dns names
+ debug:
msg: "{{ instancesout.instances|map(attribute='private_dns_name') | list }}"
+- name: Dump the master public ip address
+ debug:
+ msg: "{{ mastersout.instances[0].public_ip_address }}"
+
- name: approve nodes
oc_adm_csr:
#approve_all: True
diff --git a/roles/openshift_aws/tasks/build_node_group.yml b/roles/openshift_aws/tasks/build_node_group.yml
index 2c1e88cfb..7fb617dd5 100644
--- a/roles/openshift_aws/tasks/build_node_group.yml
+++ b/roles/openshift_aws/tasks/build_node_group.yml
@@ -1,6 +1,4 @@
---
-# This task file expects l_nodes_to_build to be passed in.
-
# When openshift_aws_use_custom_ami is '' then
# we retrieve the latest build AMI.
# Then set openshift_aws_ami to the ami.
@@ -26,6 +24,35 @@
# Need to set epoch time in one place to use for launch_config and scale_group
- set_fact:
l_epoch_time: "{{ ansible_date_time.epoch }}"
+#
+# query asg's and determine if we need to create the others.
+# if we find more than 1 for each type, then exit
+- name: query all asg's for this cluster
+ ec2_asg_facts:
+ region: "{{ openshift_aws_region }}"
+ tags: "{{ {'kubernetes.io/cluster/' ~ openshift_aws_clusterid: openshift_aws_clusterid} | combine(l_node_group_config[openshift_aws_node_group.group].tags) }}"
+ register: asgs
+
+- fail:
+ msg: "Found more than 1 auto scaling group that matches the query for group: {{ openshift_aws_node_group }}"
+ when:
+ - asgs.results|length > 1
+
+- debug:
+ msg: "{{ asgs }}"
+
+- name: set the value for the deployment_serial and the current asgs
+ set_fact:
+ l_deployment_serial: "{{ openshift_aws_node_group_deployment_serial if openshift_aws_node_group_deployment_serial is defined else asgs.results | scale_groups_serial(openshift_aws_node_group_upgrade) }}"
+ openshift_aws_current_asgs: "{{ asgs.results | map(attribute='auto_scaling_group_name') | list | union(openshift_aws_current_asgs) }}"
+
+- name: dump deployment serial
+ debug:
+ msg: "Deployment serial: {{ l_deployment_serial }}"
+
+- name: dump current_asgs
+ debug:
+ msg: "openshift_aws_current_asgs: {{ openshift_aws_current_asgs }}"
- when: openshift_aws_create_iam_role
include_tasks: iam_role.yml
diff --git a/roles/openshift_aws/tasks/iam_role.yml b/roles/openshift_aws/tasks/iam_role.yml
index d9910d938..cf3bb28fb 100644
--- a/roles/openshift_aws/tasks/iam_role.yml
+++ b/roles/openshift_aws/tasks/iam_role.yml
@@ -13,11 +13,10 @@
#####
- name: Create an iam role
iam_role:
- name: "{{ item.value.iam_role }}"
+ name: "{{ l_node_group_config[openshift_aws_node_group.group].iam_role }}"
assume_role_policy_document: "{{ lookup('file','trustpolicy.json') }}"
state: "{{ openshift_aws_iam_role_state | default('present') }}"
- when: item.value.iam_role is defined
- with_dict: "{{ l_nodes_to_build }}"
+ when: l_node_group_config[openshift_aws_node_group.group].iam_role is defined
#####
# The second part of this task file is linking the role to a policy
@@ -28,9 +27,8 @@
- name: create an iam policy
iam_policy:
iam_type: role
- iam_name: "{{ item.value.iam_role }}"
- policy_json: "{{ item.value.policy_json }}"
- policy_name: "{{ item.value.policy_name }}"
+ iam_name: "{{ l_node_group_config[openshift_aws_node_group.group].iam_role }}"
+ policy_json: "{{ l_node_group_config[openshift_aws_node_group.group].policy_json }}"
+ policy_name: "{{ l_node_group_config[openshift_aws_node_group.group].policy_name }}"
state: "{{ openshift_aws_iam_role_state | default('present') }}"
- when: item.value.iam_role is defined
- with_dict: "{{ l_nodes_to_build }}"
+ when: "'iam_role' in l_node_group_config[openshift_aws_node_group.group]"
diff --git a/roles/openshift_aws/tasks/launch_config.yml b/roles/openshift_aws/tasks/launch_config.yml
index fed80b7eb..6f78ee00a 100644
--- a/roles/openshift_aws/tasks/launch_config.yml
+++ b/roles/openshift_aws/tasks/launch_config.yml
@@ -1,15 +1,26 @@
---
-- fail:
- msg: "Ensure that an AMI value is defined for openshift_aws_ami or openshift_aws_launch_config_custom_image."
- when:
- - openshift_aws_ami is undefined
+- name: fetch the security groups for launch config
+ ec2_group_facts:
+ filters:
+ group-name: "{{ openshift_aws_launch_config_security_groups[openshift_aws_node_group.group] }}"
+ vpc-id: "{{ vpcout.vpcs[0].id }}"
+ region: "{{ openshift_aws_region }}"
+ register: ec2sgs
-- fail:
- msg: "Ensure that openshift_deployment_type is defined."
- when:
- - openshift_deployment_type is undefined
-
-- include_tasks: launch_config_create.yml
- with_dict: "{{ l_nodes_to_build }}"
- loop_control:
- loop_var: launch_config_item
+# Create the scale group config
+- name: Create the node scale group launch config
+ ec2_lc:
+ name: "{{ openshift_aws_node_group.name }}-{{ openshift_aws_ami_map[openshift_aws_node_group.group] | default(openshift_aws_ami) }}-{{ l_epoch_time }}"
+ region: "{{ openshift_aws_region }}"
+ image_id: "{{ openshift_aws_ami_map[openshift_aws_node_group.group] | default(openshift_aws_ami) }}"
+ instance_type: "{{ l_node_group_config[openshift_aws_node_group.group].instance_type }}"
+ security_groups: "{{ openshift_aws_launch_config_security_group_id | default(ec2sgs.security_groups | map(attribute='group_id')| list) }}"
+ instance_profile_name: "{{ l_node_group_config[openshift_aws_node_group.group].iam_role if l_node_group_config[openshift_aws_node_group.group].iam_role is defined and
+ l_node_group_config[openshift_aws_node_group.group].iam_role != '' and
+ openshift_aws_create_iam_role
+ else omit }}"
+ user_data: "{{ lookup('template', 'user_data.j2') }}"
+ key_name: "{{ openshift_aws_ssh_key_name }}"
+ ebs_optimized: False
+ volumes: "{{ l_node_group_config[openshift_aws_node_group.group].volumes }}"
+ assign_public_ip: True
diff --git a/roles/openshift_aws/tasks/launch_config_create.yml b/roles/openshift_aws/tasks/launch_config_create.yml
deleted file mode 100644
index f7f0f0953..000000000
--- a/roles/openshift_aws/tasks/launch_config_create.yml
+++ /dev/null
@@ -1,26 +0,0 @@
----
-- name: fetch the security groups for launch config
- ec2_group_facts:
- filters:
- group-name: "{{ l_launch_config_security_groups[launch_config_item.key] }}"
- vpc-id: "{{ vpcout.vpcs[0].id }}"
- region: "{{ openshift_aws_region }}"
- register: ec2sgs
-
-# Create the scale group config
-- name: Create the node scale group launch config
- ec2_lc:
- name: "{{ openshift_aws_launch_config_basename }}-{{ launch_config_item.key }}{{'-' ~ openshift_aws_new_version if openshift_aws_new_version != '' else '' }}"
- region: "{{ openshift_aws_region }}"
- image_id: "{{ l_aws_ami_map[launch_config_item.key] | default(openshift_aws_ami) }}"
- instance_type: "{{ launch_config_item.value.instance_type }}"
- security_groups: "{{ openshift_aws_launch_config_security_group_id | default(ec2sgs.security_groups | map(attribute='group_id')| list) }}"
- instance_profile_name: "{{ launch_config_item.value.iam_role if launch_config_item.value.iam_role is defined and
- launch_config_item.value.iam_role != '' and
- openshift_aws_create_iam_role
- else omit }}"
- user_data: "{{ lookup('template', 'user_data.j2') }}"
- key_name: "{{ openshift_aws_ssh_key_name }}"
- ebs_optimized: False
- volumes: "{{ launch_config_item.value.volumes }}"
- assign_public_ip: True
diff --git a/roles/openshift_aws/tasks/provision.yml b/roles/openshift_aws/tasks/provision.yml
index 06f649343..786a2e4cf 100644
--- a/roles/openshift_aws/tasks/provision.yml
+++ b/roles/openshift_aws/tasks/provision.yml
@@ -20,13 +20,14 @@
- name: include scale group creation for master
include_tasks: build_node_group.yml
+ with_items: "{{ openshift_aws_master_group }}"
vars:
- l_nodes_to_build: "{{ openshift_aws_master_group_config }}"
- l_launch_config_security_groups: "{{ openshift_aws_launch_config_security_groups }}"
- l_aws_ami_map: "{{ openshift_aws_ami_map }}"
+ l_node_group_config: "{{ openshift_aws_master_group_config }}"
+ loop_control:
+ loop_var: openshift_aws_node_group
- name: fetch newly created instances
- ec2_remote_facts:
+ ec2_instance_facts:
region: "{{ openshift_aws_region }}"
filters:
"tag:clusterid": "{{ openshift_aws_clusterid }}"
diff --git a/roles/openshift_aws/tasks/provision_instance.yml b/roles/openshift_aws/tasks/provision_instance.yml
index 8cc75cd0c..696b323c0 100644
--- a/roles/openshift_aws/tasks/provision_instance.yml
+++ b/roles/openshift_aws/tasks/provision_instance.yml
@@ -27,7 +27,7 @@
Name: "{{ openshift_aws_base_ami_name }}"
- name: fetch newly created instances
- ec2_remote_facts:
+ ec2_instance_facts:
region: "{{ openshift_aws_region }}"
filters:
"tag:Name": "{{ openshift_aws_base_ami_name }}"
diff --git a/roles/openshift_aws/tasks/provision_nodes.yml b/roles/openshift_aws/tasks/provision_nodes.yml
index 041ed0791..d82f18574 100644
--- a/roles/openshift_aws/tasks/provision_nodes.yml
+++ b/roles/openshift_aws/tasks/provision_nodes.yml
@@ -3,7 +3,7 @@
# bootstrap should be created on first master
# need to fetch it and shove it into cloud data
- name: fetch master instances
- ec2_remote_facts:
+ ec2_instance_facts:
region: "{{ openshift_aws_region }}"
filters:
"tag:clusterid": "{{ openshift_aws_clusterid }}"
@@ -31,20 +31,15 @@
- name: include build compute and infra node groups
include_tasks: build_node_group.yml
+ with_items: "{{ openshift_aws_node_groups }}"
vars:
- l_nodes_to_build: "{{ openshift_aws_node_group_config }}"
- l_launch_config_security_groups: "{{ openshift_aws_launch_config_security_groups }}"
- l_aws_ami_map: "{{ openshift_aws_ami_map }}"
-
-- name: include build node group for extra nodes
- include_tasks: build_node_group.yml
- when: openshift_aws_node_group_config_extra is defined
- vars:
- l_nodes_to_build: "{{ openshift_aws_node_group_config_extra | default({}) }}"
- l_launch_config_security_groups: "{{ openshift_aws_launch_config_security_groups_extra }}"
- l_aws_ami_map: "{{ openshift_aws_ami_map_extra }}"
+ l_node_group_config: "{{ openshift_aws_node_group_config }}"
+ loop_control:
+ loop_var: openshift_aws_node_group
# instances aren't scaling fast enough here, we need to wait for them
- when: openshift_aws_wait_for_ssh | bool
name: wait for our new nodes to come up
include_tasks: wait_for_groups.yml
+ vars:
+ created_asgs: "{{ openshift_aws_created_asgs }}"
diff --git a/roles/openshift_aws/tasks/remove_scale_group.yml b/roles/openshift_aws/tasks/remove_scale_group.yml
index 55d1af2b5..a01cde294 100644
--- a/roles/openshift_aws/tasks/remove_scale_group.yml
+++ b/roles/openshift_aws/tasks/remove_scale_group.yml
@@ -1,10 +1,13 @@
---
+# FIGURE OUT HOW TO REMOVE SCALE GROUPS
+# use openshift_aws_current_asgs??
- name: fetch the scale groups
ec2_asg_facts:
region: "{{ openshift_aws_region }}"
+ name: "^{{ item }}$"
tags:
- "{{ {'kubernetes.io/cluster/' ~ openshift_aws_clusterid: openshift_aws_clusterid,
- 'version': openshift_aws_current_version} }}"
+ "{{ {'kubernetes.io/cluster/' ~ openshift_aws_clusterid: openshift_aws_clusterid} }}"
+ with_items: "{{ openshift_aws_current_asgs if openshift_aws_current_asgs != [] else openshift_aws_asgs_to_remove }}"
register: qasg
- name: remove non-master scale groups
@@ -14,7 +17,7 @@
name: "{{ item.auto_scaling_group_name }}"
when: "'master' not in item.auto_scaling_group_name"
register: asg_results
- with_items: "{{ qasg.results }}"
+ with_items: "{{ qasg | json_query('results[*]') | sum(attribute='results', start=[]) }}"
async: 600
poll: 0
diff --git a/roles/openshift_aws/tasks/s3.yml b/roles/openshift_aws/tasks/s3.yml
index 9cf37c840..ba70bcff6 100644
--- a/roles/openshift_aws/tasks/s3.yml
+++ b/roles/openshift_aws/tasks/s3.yml
@@ -1,6 +1,6 @@
---
- name: Create an s3 bucket
- s3:
+ aws_s3:
bucket: "{{ openshift_aws_s3_bucket_name }}"
mode: "{{ openshift_aws_s3_mode }}"
region: "{{ openshift_aws_region }}"
diff --git a/roles/openshift_aws/tasks/scale_group.yml b/roles/openshift_aws/tasks/scale_group.yml
index 30df7545d..3632f7ce9 100644
--- a/roles/openshift_aws/tasks/scale_group.yml
+++ b/roles/openshift_aws/tasks/scale_group.yml
@@ -1,20 +1,30 @@
---
+- name: set node group name
+ set_fact:
+ l_node_group_name: "{{ openshift_aws_node_group.name }} {{ l_deployment_serial }}"
+
- name: Create the scale group
ec2_asg:
- name: "{{ openshift_aws_scale_group_basename }} {{ item.key }}"
- launch_config_name: "{{ openshift_aws_launch_config_basename }}-{{ item.key }}{{ '-' ~ openshift_aws_new_version if openshift_aws_new_version != '' else '' }}"
- health_check_period: "{{ item.value.health_check.period }}"
- health_check_type: "{{ item.value.health_check.type }}"
- min_size: "{{ item.value.min_size }}"
- max_size: "{{ item.value.max_size }}"
- desired_capacity: "{{ item.value.desired_size }}"
+ name: "{{ l_node_group_name }}"
+ launch_config_name: "{{ openshift_aws_node_group.name }}-{{ openshift_aws_ami_map[openshift_aws_node_group.group] | default(openshift_aws_ami) }}-{{ l_epoch_time }}"
+ health_check_period: "{{ l_node_group_config[openshift_aws_node_group.group].health_check.period }}"
+ health_check_type: "{{ l_node_group_config[openshift_aws_node_group.group].health_check.type }}"
+ min_size: "{{ l_node_group_config[openshift_aws_node_group.group].min_size }}"
+ max_size: "{{ l_node_group_config[openshift_aws_node_group.group].max_size }}"
+ desired_capacity: "{{ l_node_group_config[openshift_aws_node_group.group].desired_size }}"
region: "{{ openshift_aws_region }}"
- termination_policies: "{{ item.value.termination_policy if 'termination_policy' in item.value else omit }}"
- load_balancers: "{{ item.value.elbs if 'elbs' in item.value else omit }}"
- wait_for_instances: "{{ item.value.wait_for_instances | default(False)}}"
+ termination_policies: "{{ l_node_group_config[openshift_aws_node_group.group].termination_policy if 'termination_policy' in l_node_group_config[openshift_aws_node_group.group] else omit }}"
+ load_balancers: "{{ l_node_group_config[openshift_aws_node_group.group].elbs if 'elbs' in l_node_group_config[openshift_aws_node_group.group] else omit }}"
+ wait_for_instances: "{{ l_node_group_config[openshift_aws_node_group.group].wait_for_instances | default(False)}}"
vpc_zone_identifier: "{{ subnetout.subnets[0].id }}"
replace_instances: "{{ openshift_aws_node_group_replace_instances if openshift_aws_node_group_replace_instances != [] else omit }}"
- replace_all_instances: "{{ omit if openshift_aws_node_group_replace_instances != [] else (item.value.replace_all_instances | default(omit)) }}"
+ replace_all_instances: "{{ omit if openshift_aws_node_group_replace_instances != []
+ else (l_node_group_config[openshift_aws_node_group.group].replace_all_instances | default(omit)) }}"
tags:
- - "{{ openshift_aws_node_group_config_tags | combine(item.value.tags) }}"
- with_dict: "{{ l_nodes_to_build }}"
+ - "{{ openshift_aws_node_group_config_tags
+ | combine(l_node_group_config[openshift_aws_node_group.group].tags)
+ | combine({'deployment_serial': l_deployment_serial, 'ami': openshift_aws_ami_map[openshift_aws_node_group.group] | default(openshift_aws_ami)}) }}"
+
+- name: append the asg name to the openshift_aws_created_asgs fact
+ set_fact:
+ openshift_aws_created_asgs: "{{ [l_node_group_name] | union(openshift_aws_created_asgs) | list }}"
diff --git a/roles/openshift_aws/tasks/seal_ami.yml b/roles/openshift_aws/tasks/seal_ami.yml
index 7a3d0fb68..74877d5c7 100644
--- a/roles/openshift_aws/tasks/seal_ami.yml
+++ b/roles/openshift_aws/tasks/seal_ami.yml
@@ -1,6 +1,6 @@
---
- name: fetch newly created instances
- ec2_remote_facts:
+ ec2_instance_facts:
region: "{{ openshift_aws_region }}"
filters:
"tag:Name": "{{ openshift_aws_base_ami_name }}"
@@ -12,7 +12,7 @@
- name: bundle ami
ec2_ami:
- instance_id: "{{ instancesout.instances.0.id }}"
+ instance_id: "{{ instancesout.instances.0.instance_id }}"
region: "{{ openshift_aws_region }}"
state: present
description: "This was provisioned {{ ansible_date_time.iso8601 }}"
@@ -46,4 +46,4 @@
ec2:
state: absent
region: "{{ openshift_aws_region }}"
- instance_ids: "{{ instancesout.instances.0.id }}"
+ instance_ids: "{{ instancesout.instances.0.instance_id }}"
diff --git a/roles/openshift_aws/tasks/security_group.yml b/roles/openshift_aws/tasks/security_group.yml
index 43834079e..0568a0190 100644
--- a/roles/openshift_aws/tasks/security_group.yml
+++ b/roles/openshift_aws/tasks/security_group.yml
@@ -6,11 +6,27 @@
"tag:Name": "{{ openshift_aws_clusterid }}"
register: vpcout
-- include_tasks: security_group_create.yml
- vars:
- l_security_groups: "{{ openshift_aws_node_security_groups }}"
+- name: create the node group sgs
+ oo_ec2_group:
+ name: "{{ item.value.name}}"
+ description: "{{ item.value.desc }}"
+ rules: "{{ item.value.rules if 'rules' in item.value else [] }}"
+ region: "{{ openshift_aws_region }}"
+ vpc_id: "{{ vpcout.vpcs[0].id }}"
+ with_dict: "{{ openshift_aws_node_security_groups }}"
+
+- name: create the k8s sgs for the node group
+ oo_ec2_group:
+ name: "{{ item.value.name }}_k8s"
+ description: "{{ item.value.desc }} for k8s"
+ region: "{{ openshift_aws_region }}"
+ vpc_id: "{{ vpcout.vpcs[0].id }}"
+ with_dict: "{{ openshift_aws_node_security_groups }}"
+ register: k8s_sg_create
-- include_tasks: security_group_create.yml
- when: openshift_aws_node_security_groups_extra is defined
- vars:
- l_security_groups: "{{ openshift_aws_node_security_groups_extra | default({}) }}"
+- name: tag sg groups with proper tags
+ ec2_tag:
+ tags: "{{ openshift_aws_security_groups_tags }}"
+ resource: "{{ item.group_id }}"
+ region: "{{ openshift_aws_region }}"
+ with_items: "{{ k8s_sg_create.results }}"
diff --git a/roles/openshift_aws/tasks/security_group_create.yml b/roles/openshift_aws/tasks/security_group_create.yml
deleted file mode 100644
index ef6060555..000000000
--- a/roles/openshift_aws/tasks/security_group_create.yml
+++ /dev/null
@@ -1,25 +0,0 @@
----
-- name: create the node group sgs
- ec2_group:
- name: "{{ item.value.name}}"
- description: "{{ item.value.desc }}"
- rules: "{{ item.value.rules if 'rules' in item.value else [] }}"
- region: "{{ openshift_aws_region }}"
- vpc_id: "{{ vpcout.vpcs[0].id }}"
- with_dict: "{{ l_security_groups }}"
-
-- name: create the k8s sgs for the node group
- ec2_group:
- name: "{{ item.value.name }}_k8s"
- description: "{{ item.value.desc }} for k8s"
- region: "{{ openshift_aws_region }}"
- vpc_id: "{{ vpcout.vpcs[0].id }}"
- with_dict: "{{ l_security_groups }}"
- register: k8s_sg_create
-
-- name: tag sg groups with proper tags
- ec2_tag:
- tags: "{{ openshift_aws_security_groups_tags }}"
- resource: "{{ item.group_id }}"
- region: "{{ openshift_aws_region }}"
- with_items: "{{ k8s_sg_create.results }}"
diff --git a/roles/openshift_aws/tasks/setup_master_group.yml b/roles/openshift_aws/tasks/setup_master_group.yml
index 05b68f460..700917ef4 100644
--- a/roles/openshift_aws/tasks/setup_master_group.yml
+++ b/roles/openshift_aws/tasks/setup_master_group.yml
@@ -8,7 +8,7 @@
msg: "openshift_aws_region={{ openshift_aws_region }}"
- name: fetch newly created instances
- ec2_remote_facts:
+ ec2_instance_facts:
region: "{{ openshift_aws_region }}"
filters:
"tag:clusterid": "{{ openshift_aws_clusterid }}"
@@ -19,11 +19,13 @@
delay: 3
until: instancesout.instances|length > 0
+- debug: var=instancesout
+
- name: add new master to masters group
add_host:
groups: "{{ openshift_aws_masters_groups }}"
name: "{{ item.public_dns_name }}"
- hostname: "{{ openshift_aws_clusterid }}-master-{{ item.id[:-5] }}"
+ hostname: "{{ openshift_aws_clusterid }}-master-{{ item.instance_id[:-5] }}"
with_items: "{{ instancesout.instances }}"
- name: wait for ssh to become available
diff --git a/roles/openshift_aws/tasks/setup_scale_group_facts.yml b/roles/openshift_aws/tasks/setup_scale_group_facts.yml
index d65fdc2de..14c5246c9 100644
--- a/roles/openshift_aws/tasks/setup_scale_group_facts.yml
+++ b/roles/openshift_aws/tasks/setup_scale_group_facts.yml
@@ -1,11 +1,15 @@
---
-- name: group scale group nodes
- ec2_remote_facts:
+- name: fetch all created instances
+ ec2_instance_facts:
region: "{{ openshift_aws_region }}"
filters:
- "{{ {'tag:kubernetes.io/cluster/' ~ openshift_aws_clusterid: openshift_aws_clusterid }}}"
+ "{{ {'tag:kubernetes.io/cluster/' ~ openshift_aws_clusterid: openshift_aws_clusterid,
+ 'instance-state-name': 'running'} }}"
register: qinstances
+# The building of new and current groups is dependent of having a list of the current asgs and the created ones
+# that can be found in the variables: openshift_aws_created_asgs, openshift_aws_current_asgs. If these do not exist, we cannot determine which hosts are
+# new and which hosts are current.
- name: Build new node group
add_host:
groups: oo_sg_new_nodes
@@ -13,10 +17,16 @@
name: "{{ item.public_dns_name }}"
hostname: "{{ item.public_dns_name }}"
when:
- - (item.tags.version | default(False)) == openshift_aws_new_version
+ - openshift_aws_created_asgs != []
+ - "'aws:autoscaling:groupName' in item.tags"
+ - item.tags['aws:autoscaling:groupName'] in openshift_aws_created_asgs
- "'node' in item.tags['host-type']"
with_items: "{{ qinstances.instances }}"
+- name: dump openshift_aws_current_asgs
+ debug:
+ msg: "{{ openshift_aws_current_asgs }}"
+
- name: Build current node group
add_host:
groups: oo_sg_current_nodes
@@ -24,7 +34,9 @@
name: "{{ item.public_dns_name }}"
hostname: "{{ item.public_dns_name }}"
when:
- - (item.tags.version | default('')) == openshift_aws_current_version
+ - openshift_aws_current_asgs != []
+ - "'aws:autoscaling:groupName' in item.tags"
+ - item.tags['aws:autoscaling:groupName'] in openshift_aws_current_asgs
- "'node' in item.tags['host-type']"
with_items: "{{ qinstances.instances }}"
diff --git a/roles/openshift_aws/tasks/upgrade_node_group.yml b/roles/openshift_aws/tasks/upgrade_node_group.yml
index c3f86f523..4f4730dd6 100644
--- a/roles/openshift_aws/tasks/upgrade_node_group.yml
+++ b/roles/openshift_aws/tasks/upgrade_node_group.yml
@@ -1,12 +1,22 @@
---
-- fail:
- msg: 'Please ensure the current_version and new_version variables are not the same.'
+- include_tasks: provision_nodes.yml
+ vars:
+ openshift_aws_node_group_upgrade: True
when:
- - openshift_aws_current_version == openshift_aws_new_version
+ - openshift_aws_upgrade_provision_nodes | default(True)
-- include_tasks: provision_nodes.yml
+- debug: var=openshift_aws_current_asgs
+- debug: var=openshift_aws_created_asgs
+
+- name: fail if asg variables aren't set
+ fail:
+ msg: "Please ensure that openshift_aws_created_asgs and openshift_aws_current_asgs are defined."
+ when:
+ - openshift_aws_created_asgs == []
+ - openshift_aws_current_asgs == []
- include_tasks: accept_nodes.yml
+ when: openshift_aws_upgrade_accept_nodes | default(True)
- include_tasks: setup_scale_group_facts.yml
diff --git a/roles/openshift_aws/tasks/wait_for_groups.yml b/roles/openshift_aws/tasks/wait_for_groups.yml
index 9f1a68a2a..1f4ef3e1c 100644
--- a/roles/openshift_aws/tasks/wait_for_groups.yml
+++ b/roles/openshift_aws/tasks/wait_for_groups.yml
@@ -1,31 +1,41 @@
---
# The idea here is to wait until all scale groups are at
# their desired capacity before continuing.
-- name: fetch the scale groups
+# This is accomplished with a custom filter_plugin and until clause
+- name: "fetch the scale groups"
ec2_asg_facts:
region: "{{ openshift_aws_region }}"
tags:
- "{{ {'kubernetes.io/cluster/' ~ openshift_aws_clusterid: openshift_aws_clusterid} }}"
+ "{{ {'kubernetes.io/cluster/' ~ openshift_aws_clusterid: openshift_aws_clusterid } }}"
register: qasg
- until: qasg.results | scale_groups_match_capacity | bool
+ until: qasg | json_query('results[*]') | scale_groups_match_capacity | bool
delay: 10
retries: 60
+- debug: var=openshift_aws_created_asgs
+
+# how do we gaurantee the instances are up?
- name: fetch newly created instances
- ec2_remote_facts:
+ ec2_instance_facts:
region: "{{ openshift_aws_region }}"
filters:
"{{ {'tag:kubernetes.io/cluster/' ~ openshift_aws_clusterid: openshift_aws_clusterid,
- 'tag:version': openshift_aws_new_version} }}"
+ 'tag:aws:autoscaling:groupName': item,
+ 'instance-state-name': 'running'} }}"
+ with_items: "{{ openshift_aws_created_asgs if openshift_aws_created_asgs != [] else qasg | sum(attribute='results', start=[]) }}"
register: instancesout
until: instancesout.instances|length > 0
delay: 5
retries: 60
+- name: dump instances
+ debug:
+ msg: "{{ instancesout.results | sum(attribute='instances', start=[]) }}"
+
- name: wait for ssh to become available
wait_for:
port: 22
host: "{{ item.public_ip_address }}"
timeout: 300
search_regex: OpenSSH
- with_items: "{{ instancesout.instances }}"
+ with_items: "{{ instancesout.results | sum(attribute='instances', start=[]) }}"
diff --git a/roles/openshift_aws/templates/user_data.j2 b/roles/openshift_aws/templates/user_data.j2
index fe0fe83d4..bda1334cd 100644
--- a/roles/openshift_aws/templates/user_data.j2
+++ b/roles/openshift_aws/templates/user_data.j2
@@ -7,8 +7,8 @@ write_files:
owner: 'root:root'
permissions: '0640'
content: |
- openshift_group_type: {{ launch_config_item.key }}
-{% if launch_config_item.key != 'master' %}
+ openshift_group_type: {{ openshift_aws_node_group.group }}
+{% if openshift_aws_node_group.group != 'master' %}
- path: /etc/origin/node/bootstrap.kubeconfig
owner: 'root:root'
permissions: '0640'
@@ -19,7 +19,7 @@ runcmd:
{% if openshift_aws_node_run_bootstrap_startup %}
- [ ansible-playbook, /root/openshift_bootstrap/bootstrap.yml]
{% endif %}
-{% if launch_config_item.key != 'master' %}
+{% if openshift_aws_node_group.group != 'master' %}
- [ systemctl, restart, NetworkManager]
- [ systemctl, enable, {% if openshift_deployment_type == 'openshift-enterprise' %}atomic-openshift{% else %}origin{% endif %}-node]
- [ systemctl, start, {% if openshift_deployment_type == 'openshift-enterprise' %}atomic-openshift{% else %}origin{% endif %}-node]
diff --git a/roles/openshift_ca/meta/main.yml b/roles/openshift_ca/meta/main.yml
index f8b784a63..81b49ce60 100644
--- a/roles/openshift_ca/meta/main.yml
+++ b/roles/openshift_ca/meta/main.yml
@@ -14,3 +14,4 @@ galaxy_info:
- system
dependencies:
- role: openshift_cli
+- role: openshift_facts
diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py
index 520c00340..a10ba9310 100755
--- a/roles/openshift_facts/library/openshift_facts.py
+++ b/roles/openshift_facts/library/openshift_facts.py
@@ -458,7 +458,6 @@ def set_url_facts_if_unset(facts):
etcd_urls = []
if etcd_hosts != '':
facts['master']['etcd_port'] = ports['etcd']
- facts['master']['embedded_etcd'] = False
for host in etcd_hosts:
etcd_urls.append(format_url(use_ssl['etcd'], host,
ports['etcd']))
@@ -1595,7 +1594,7 @@ class OpenShiftFacts(object):
console_port='8443', etcd_use_ssl=True,
etcd_hosts='', etcd_port='4001',
portal_net='172.30.0.0/16',
- embedded_etcd=True, embedded_kube=True,
+ embedded_kube=True,
embedded_dns=True,
bind_addr='0.0.0.0',
session_max_seconds=3600,
diff --git a/roles/openshift_health_checker/HOWTO_CHECKS.md b/roles/openshift_health_checker/HOWTO_CHECKS.md
index 6c5662a4e..94961f2d4 100644
--- a/roles/openshift_health_checker/HOWTO_CHECKS.md
+++ b/roles/openshift_health_checker/HOWTO_CHECKS.md
@@ -12,7 +12,7 @@ Checks are typically implemented as two parts:
The checks are called from Ansible playbooks via the `openshift_health_check`
action plugin. See
-[playbooks/byo/openshift-preflight/check.yml](../../playbooks/byo/openshift-preflight/check.yml)
+[playbooks/openshift-checks/pre-install.yml](../../playbooks/openshift-checks/pre-install.yml)
for an example.
The action plugin dynamically discovers all checks and executes only those
diff --git a/roles/openshift_hosted/tasks/registry.yml b/roles/openshift_hosted/tasks/registry.yml
index de302c740..429f0c514 100644
--- a/roles/openshift_hosted/tasks/registry.yml
+++ b/roles/openshift_hosted/tasks/registry.yml
@@ -126,7 +126,7 @@
selector: "{{ openshift_hosted_registry_selector }}"
replicas: "{{ openshift_hosted_registry_replicas | default(l_default_replicas) }}"
service_account: "{{ openshift_hosted_registry_serviceaccount }}"
- images: "{{ penshift_hosted_registry_registryurl }}"
+ images: "{{ openshift_hosted_registry_registryurl }}"
env_vars: "{{ openshift_hosted_registry_env_vars }}"
volume_mounts: "{{ openshift_hosted_registry_volumes }}"
edits: "{{ openshift_hosted_registry_edits }}"
diff --git a/roles/openshift_logging_elasticsearch/templates/es.j2 b/roles/openshift_logging_elasticsearch/templates/es.j2
index 0bfa9e85b..bf04094a3 100644
--- a/roles/openshift_logging_elasticsearch/templates/es.j2
+++ b/roles/openshift_logging_elasticsearch/templates/es.j2
@@ -9,6 +9,7 @@ metadata:
logging-infra: "{{logging_component}}"
spec:
replicas: {{es_replicas|default(1)}}
+ revisionHistoryLimit: 0
selector:
provider: openshift
component: "{{component}}"
diff --git a/roles/openshift_management/README.md b/roles/openshift_management/README.md
index 96de82669..974d9781a 100644
--- a/roles/openshift_management/README.md
+++ b/roles/openshift_management/README.md
@@ -164,14 +164,14 @@ away.
If you want to install CFME/MIQ at the same time you install your
OCP/Origin cluster, ensure that `openshift_management_install_management` is set
to `true` in your inventory. Call the standard
-`playbooks/byo/config.yml` playbook to begin the cluster and CFME/MIQ
+`playbooks/deploy_cluster.yml` playbook to begin the cluster and CFME/MIQ
installation.
If you are installing CFME/MIQ on an *already provisioned cluster*
then you can call the CFME/MIQ playbook directly:
```
-$ ansible-playbook -v -i <YOUR_INVENTORY> playbooks/byo/openshift-management/config.yml
+$ ansible-playbook -v -i <YOUR_INVENTORY> playbooks/openshift-management/config.yml
```
*Note: Use `miq-template` in the following examples for ManageIQ installs*
@@ -489,7 +489,7 @@ This playbook will:
```
-$ ansible-playbook -v -i <YOUR_INVENTORY> playbooks/byo/openshift-management/add_container_provider.yml
+$ ansible-playbook -v -i <YOUR_INVENTORY> playbooks/openshift-management/add_container_provider.yml
```
## Multiple Providers
@@ -567,7 +567,7 @@ the config file path.
```
$ ansible-playbook -v -e container_providers_config=/tmp/cp.yml \
- playbooks/byo/openshift-management/add_many_container_providers.yml
+ playbooks/openshift-management/add_many_container_providers.yml
```
Afterwards you will find two new container providers in your
@@ -579,7 +579,7 @@ to see an overview.
This role includes a playbook to uninstall and erase the CFME/MIQ
installation:
-* `playbooks/byo/openshift-management/uninstall.yml`
+* `playbooks/openshift-management/uninstall.yml`
NFS export definitions and data stored on NFS exports are not
automatically removed. You are urged to manually erase any data from
diff --git a/roles/openshift_management/defaults/main.yml b/roles/openshift_management/defaults/main.yml
index e768961ce..b5e234b7f 100644
--- a/roles/openshift_management/defaults/main.yml
+++ b/roles/openshift_management/defaults/main.yml
@@ -88,7 +88,7 @@ openshift_management_storage_nfs_local_hostname: false
# name and password AND are trying to use integration scripts.
#
# For example, adding this cluster as a container provider,
-# playbooks/byo/openshift-management/add_container_provider.yml
+# playbooks/openshift-management/add_container_provider.yml
openshift_management_username: admin
openshift_management_password: smartvm
diff --git a/roles/openshift_management/meta/main.yml b/roles/openshift_management/meta/main.yml
index 07ad51126..9f19704a8 100644
--- a/roles/openshift_management/meta/main.yml
+++ b/roles/openshift_management/meta/main.yml
@@ -16,3 +16,4 @@ galaxy_info:
dependencies:
- role: lib_openshift
- role: lib_utils
+- role: openshift_facts
diff --git a/roles/openshift_master/templates/master.yaml.v1.j2 b/roles/openshift_master/templates/master.yaml.v1.j2
index 92668b227..f1a76e5f5 100644
--- a/roles/openshift_master/templates/master.yaml.v1.j2
+++ b/roles/openshift_master/templates/master.yaml.v1.j2
@@ -69,29 +69,13 @@ dnsConfig:
bindNetwork: tcp4
{% endif %}
etcdClientInfo:
- ca: {{ "ca-bundle.crt" if (openshift.master.embedded_etcd | bool) else "master.etcd-ca.crt" }}
+ ca: master.etcd-ca.crt
certFile: master.etcd-client.crt
keyFile: master.etcd-client.key
urls:
{% for etcd_url in openshift.master.etcd_urls %}
- {{ etcd_url }}
{% endfor %}
-{% if openshift.master.embedded_etcd | bool %}
-etcdConfig:
- address: {{ openshift.common.hostname }}:{{ openshift.master.etcd_port }}
- peerAddress: {{ openshift.common.hostname }}:7001
- peerServingInfo:
- bindAddress: {{ openshift.master.bind_addr }}:7001
- certFile: etcd.server.crt
- clientCA: ca-bundle.crt
- keyFile: etcd.server.key
- servingInfo:
- bindAddress: {{ openshift.master.bind_addr }}:{{ openshift.master.etcd_port }}
- certFile: etcd.server.crt
- clientCA: ca-bundle.crt
- keyFile: etcd.server.key
- storageDirectory: {{ r_openshift_master_data_dir }}/openshift.local.etcd
-{% endif %}
etcdStorageConfig:
kubernetesStoragePrefix: kubernetes.io
kubernetesStorageVersion: v1
diff --git a/roles/openshift_master_facts/tasks/main.yml b/roles/openshift_master_facts/tasks/main.yml
index 0cb87dcaa..418dcba67 100644
--- a/roles/openshift_master_facts/tasks/main.yml
+++ b/roles/openshift_master_facts/tasks/main.yml
@@ -45,7 +45,6 @@
etcd_port: "{{ openshift_master_etcd_port | default(None) }}"
etcd_use_ssl: "{{ openshift_master_etcd_use_ssl | default(None) }}"
etcd_urls: "{{ openshift_master_etcd_urls | default(None) }}"
- embedded_etcd: "{{ openshift_master_embedded_etcd | default(None) }}"
embedded_kube: "{{ openshift_master_embedded_kube | default(None) }}"
embedded_dns: "{{ openshift_master_embedded_dns | default(None) }}"
bind_addr: "{{ openshift_master_bind_addr | default(None) }}"
diff --git a/roles/openshift_openstack/defaults/main.yml b/roles/openshift_openstack/defaults/main.yml
index 929b76f54..65a647b8f 100644
--- a/roles/openshift_openstack/defaults/main.yml
+++ b/roles/openshift_openstack/defaults/main.yml
@@ -44,6 +44,9 @@ openshift_openstack_container_storage_setup:
# populate-dns
openshift_openstack_dns_records_add: []
+openshift_openstack_public_hostname_suffix: ""
+openshift_openstack_private_hostname_suffix: ""
+openshift_openstack_public_dns_domain: "example.com"
openshift_openstack_full_dns_domain: "{{ (openshift_openstack_clusterid|trim == '') | ternary(openshift_openstack_public_dns_domain, openshift_openstack_clusterid + '.' + openshift_openstack_public_dns_domain) }}"
openshift_openstack_app_subdomain: "apps"
diff --git a/roles/openshift_openstack/tasks/populate-dns.yml b/roles/openshift_openstack/tasks/populate-dns.yml
index eae4967f7..cf2ead5c3 100644
--- a/roles/openshift_openstack/tasks/populate-dns.yml
+++ b/roles/openshift_openstack/tasks/populate-dns.yml
@@ -1,7 +1,7 @@
---
- name: "Generate list of private A records"
set_fact:
- private_records: "{{ private_records | default([]) + [ { 'type': 'A', 'hostname': hostvars[item]['ansible_hostname'], 'ip': hostvars[item]['private_v4'] } ] }}"
+ private_records: "{{ private_records | default([]) + [ { 'type': 'A', 'hostname': hostvars[item]['ansible_hostname'] + openshift_openstack_private_hostname_suffix, 'ip': hostvars[item]['private_v4'] } ] }}"
with_items: "{{ groups['cluster_hosts'] }}"
- name: "Add wildcard records to the private A records for infrahosts"
@@ -48,7 +48,7 @@
- name: "Generate list of public A records"
set_fact:
- public_records: "{{ public_records | default([]) + [ { 'type': 'A', 'hostname': hostvars[item]['ansible_hostname'], 'ip': hostvars[item]['public_v4'] } ] }}"
+ public_records: "{{ public_records | default([]) + [ { 'type': 'A', 'hostname': hostvars[item]['ansible_hostname'] + openshift_openstack_public_hostname_suffix, 'ip': hostvars[item]['public_v4'] } ] }}"
with_items: "{{ groups['cluster_hosts'] }}"
when: hostvars[item]['public_v4'] is defined
diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml
index 30e83e79b..0c2fcb2c5 100644
--- a/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml
+++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml
@@ -23,7 +23,7 @@
state: absent
labels: "{{ glusterfs_nodeselector | oo_dict_to_list_of_dict }}"
with_items: "{{ groups.all }}"
- when: glusterfs_wipe
+ when: "'openshift' in hostvars[item] and glusterfs_wipe"
- name: Delete pre-existing GlusterFS config
file:
diff --git a/roles/openshift_version/meta/main.yml b/roles/openshift_version/meta/main.yml
index 2d317700a..d0ad4b7d2 100644
--- a/roles/openshift_version/meta/main.yml
+++ b/roles/openshift_version/meta/main.yml
@@ -13,5 +13,4 @@ galaxy_info:
- cloud
dependencies:
- role: lib_utils
-- role: container_runtime
- role: openshift_facts
diff --git a/setup.py b/setup.py
index 4a25d0bce..5ba050b83 100644
--- a/setup.py
+++ b/setup.py
@@ -345,35 +345,29 @@ class OpenShiftAnsibleSyntaxCheck(Command):
print('-' * 60)
print('Syntax checking playbook: {}'.format(playbook))
- # Error on any entry points in 'common'
- if 'common' in playbook:
- print('{}Invalid entry point playbook. All playbooks must'
- ' start in playbooks/byo{}'.format(self.FAIL, self.ENDC))
- has_errors = True
# --syntax-check each entry point playbook
- else:
- try:
- # Create a host group list to avoid WARNING on unmatched host patterns
- host_group_list = [
- 'etcd,masters,nodes,OSEv3',
- 'oo_all_hosts',
- 'oo_etcd_to_config,oo_new_etcd_to_config,oo_first_etcd,oo_etcd_hosts_to_backup,'
- 'oo_etcd_hosts_to_upgrade,oo_etcd_to_migrate',
- 'oo_masters,oo_masters_to_config,oo_first_master,oo_containerized_master_nodes',
- 'oo_nodes_to_config,oo_nodes_to_upgrade',
- 'oo_nodes_use_kuryr,oo_nodes_use_flannel',
- 'oo_nodes_use_calico,oo_nodes_use_nuage,oo_nodes_use_contiv',
- 'oo_lb_to_config',
- 'oo_nfs_to_config',
- 'glusterfs,glusterfs_registry,']
- subprocess.check_output(
- ['ansible-playbook', '-i ' + ','.join(host_group_list),
- '--syntax-check', playbook]
- )
- except subprocess.CalledProcessError as cpe:
- print('{}Execution failed: {}{}'.format(
- self.FAIL, cpe, self.ENDC))
- has_errors = True
+ try:
+ # Create a host group list to avoid WARNING on unmatched host patterns
+ host_group_list = [
+ 'etcd,masters,nodes,OSEv3',
+ 'oo_all_hosts',
+ 'oo_etcd_to_config,oo_new_etcd_to_config,oo_first_etcd,oo_etcd_hosts_to_backup,'
+ 'oo_etcd_hosts_to_upgrade,oo_etcd_to_migrate',
+ 'oo_masters,oo_masters_to_config,oo_first_master,oo_containerized_master_nodes',
+ 'oo_nodes_to_config,oo_nodes_to_upgrade',
+ 'oo_nodes_use_kuryr,oo_nodes_use_flannel',
+ 'oo_nodes_use_calico,oo_nodes_use_nuage,oo_nodes_use_contiv',
+ 'oo_lb_to_config',
+ 'oo_nfs_to_config',
+ 'glusterfs,glusterfs_registry,']
+ subprocess.check_output(
+ ['ansible-playbook', '-i ' + ','.join(host_group_list),
+ '--syntax-check', playbook]
+ )
+ except subprocess.CalledProcessError as cpe:
+ print('{}Execution failed: {}{}'.format(
+ self.FAIL, cpe, self.ENDC))
+ has_errors = True
if has_errors:
raise SystemExit(1)
diff --git a/test/integration/build-images.sh b/test/integration/build-images.sh
index 74a55fa51..74a55fa51 100755..100644
--- a/test/integration/build-images.sh
+++ b/test/integration/build-images.sh
diff --git a/test/integration/run-tests.sh b/test/integration/run-tests.sh
index 680b64602..680b64602 100755..100644
--- a/test/integration/run-tests.sh
+++ b/test/integration/run-tests.sh
diff --git a/tox.ini b/tox.ini
index 46738cae5..9c35915d5 100644
--- a/tox.ini
+++ b/tox.ini
@@ -3,7 +3,6 @@ minversion=2.3.1
envlist =
py{27,35}-{flake8,pylint,unit}
py27-{yamllint,ansible_syntax,generate_validation}
- integration
skipsdist=True
skip_missing_interpreters=True
@@ -14,7 +13,6 @@ deps =
-rtest-requirements.txt
unit: -eutils
py35-flake8: flake8-bugbear==17.3.0
- integration: docker-py==1.10.6
commands =
unit: pytest {posargs}
@@ -23,4 +21,3 @@ commands =
yamllint: python setup.py yamllint
generate_validation: python setup.py generate_validation
ansible_syntax: python setup.py ansible_syntax
- integration: python -c 'print("run test/integration/run-tests.sh")'