summaryrefslogtreecommitdiffstats
path: root/roles
diff options
context:
space:
mode:
Diffstat (limited to 'roles')
-rw-r--r--roles/calico/tasks/main.yml1
-rw-r--r--roles/container_runtime/README.md25
-rw-r--r--roles/container_runtime/defaults/main.yml57
-rw-r--r--roles/container_runtime/tasks/common/atomic_proxy.yml (renamed from roles/openshift_atomic/tasks/proxy.yml)0
-rw-r--r--roles/container_runtime/tasks/common/post.yml26
-rw-r--r--roles/container_runtime/tasks/common/pre.yml12
-rw-r--r--roles/container_runtime/tasks/common/setup_docker_symlink.yml38
-rw-r--r--roles/container_runtime/tasks/common/syscontainer_packages.yml28
-rw-r--r--roles/container_runtime/tasks/common/udev_workaround.yml (renamed from roles/container_runtime/tasks/udev_workaround.yml)0
-rw-r--r--roles/container_runtime/tasks/docker_sanity.yml27
-rw-r--r--roles/container_runtime/tasks/docker_upgrade_check.yml67
-rw-r--r--roles/container_runtime/tasks/main.yml85
-rw-r--r--roles/container_runtime/tasks/package_docker.yml36
-rw-r--r--roles/container_runtime/tasks/systemcontainer_crio.yml90
-rw-r--r--roles/container_runtime/tasks/systemcontainer_docker.yml78
-rw-r--r--roles/container_runtime/templates/daemon.json4
-rw-r--r--roles/contiv/meta/main.yml2
-rw-r--r--roles/contiv/tasks/default_network.yml13
-rw-r--r--roles/contiv/tasks/netmaster_iptables.yml8
-rw-r--r--roles/contiv_facts/tasks/rpm.yml9
-rw-r--r--roles/etcd/meta/main.yml1
-rw-r--r--roles/etcd/tasks/migration/add_ttls.yml2
-rw-r--r--roles/etcd/tasks/system_container.yml5
-rw-r--r--roles/flannel/defaults/main.yaml6
-rw-r--r--roles/flannel/handlers/main.yml2
-rw-r--r--roles/flannel_register/defaults/main.yaml2
-rw-r--r--roles/installer_checkpoint/callback_plugins/installer_checkpoint.py2
-rw-r--r--roles/kuryr/tasks/node.yaml4
-rw-r--r--roles/kuryr/templates/configmap.yaml.j21
-rw-r--r--roles/lib_utils/library/oo_ec2_group.py903
-rw-r--r--roles/nuage_master/handlers/main.yaml10
-rw-r--r--roles/nuage_node/handlers/main.yaml2
-rw-r--r--roles/nuage_node/vars/main.yaml2
-rw-r--r--roles/openshift_atomic/README.md28
-rw-r--r--roles/openshift_atomic/meta/main.yml13
-rw-r--r--roles/openshift_aws/defaults/main.yml41
-rw-r--r--roles/openshift_aws/filter_plugins/openshift_aws_filters.py35
-rw-r--r--roles/openshift_aws/tasks/accept_nodes.yml11
-rw-r--r--roles/openshift_aws/tasks/build_node_group.yml37
-rw-r--r--roles/openshift_aws/tasks/iam_role.yml14
-rw-r--r--roles/openshift_aws/tasks/launch_config.yml37
-rw-r--r--roles/openshift_aws/tasks/launch_config_create.yml26
-rw-r--r--roles/openshift_aws/tasks/provision.yml19
-rw-r--r--roles/openshift_aws/tasks/provision_instance.yml4
-rw-r--r--roles/openshift_aws/tasks/provision_nodes.yml25
-rw-r--r--roles/openshift_aws/tasks/remove_scale_group.yml9
-rw-r--r--roles/openshift_aws/tasks/s3.yml2
-rw-r--r--roles/openshift_aws/tasks/scale_group.yml36
-rw-r--r--roles/openshift_aws/tasks/seal_ami.yml8
-rw-r--r--roles/openshift_aws/tasks/security_group.yml30
-rw-r--r--roles/openshift_aws/tasks/security_group_create.yml25
-rw-r--r--roles/openshift_aws/tasks/setup_master_group.yml6
-rw-r--r--roles/openshift_aws/tasks/setup_scale_group_facts.yml22
-rw-r--r--roles/openshift_aws/tasks/upgrade_node_group.yml24
-rw-r--r--roles/openshift_aws/tasks/wait_for_groups.yml22
-rw-r--r--roles/openshift_aws/templates/user_data.j26
-rw-r--r--roles/openshift_builddefaults/tasks/main.yml5
-rw-r--r--roles/openshift_ca/meta/main.yml1
-rw-r--r--roles/openshift_ca/tasks/main.yml2
-rw-r--r--roles/openshift_cli/library/openshift_container_binary_sync.py7
-rw-r--r--roles/openshift_cli/tasks/main.yml2
-rw-r--r--roles/openshift_cluster_autoscaler/tasks/main.yml2
-rw-r--r--roles/openshift_docker_gc/templates/dockergc-ds.yaml.j22
-rwxr-xr-xroles/openshift_examples/examples-sync.sh2
l---------roles/openshift_examples/files/examples/latest1
l---------roles/openshift_examples/files/examples/v3.7/v3.81
-rw-r--r--roles/openshift_examples/files/examples/v3.8/db-templates/mariadb-persistent-template.json2
-rw-r--r--roles/openshift_examples/files/examples/v3.8/db-templates/mongodb-persistent-template.json2
-rw-r--r--roles/openshift_examples/files/examples/v3.8/db-templates/mysql-persistent-template.json2
-rw-r--r--roles/openshift_examples/files/examples/v3.8/db-templates/postgresql-persistent-template.json2
-rw-r--r--roles/openshift_examples/files/examples/v3.8/db-templates/redis-persistent-template.json2
-rw-r--r--roles/openshift_examples/files/examples/v3.8/image-streams/image-streams-centos7.json21
-rw-r--r--roles/openshift_examples/files/examples/v3.8/image-streams/image-streams-rhel7.json21
-rw-r--r--roles/openshift_examples/files/examples/v3.8/quickstart-templates/cakephp-mysql-persistent.json5
-rw-r--r--roles/openshift_examples/files/examples/v3.8/quickstart-templates/cakephp-mysql.json3
-rw-r--r--roles/openshift_examples/files/examples/v3.8/quickstart-templates/dancer-mysql-persistent.json5
-rw-r--r--roles/openshift_examples/files/examples/v3.8/quickstart-templates/dancer-mysql.json3
-rw-r--r--roles/openshift_examples/files/examples/v3.8/quickstart-templates/django-postgresql-persistent.json5
-rw-r--r--roles/openshift_examples/files/examples/v3.8/quickstart-templates/django-postgresql.json3
-rw-r--r--roles/openshift_examples/files/examples/v3.8/quickstart-templates/httpd.json3
-rw-r--r--roles/openshift_examples/files/examples/v3.8/quickstart-templates/jenkins-ephemeral-template.json3
-rw-r--r--roles/openshift_examples/files/examples/v3.8/quickstart-templates/jenkins-persistent-template.json5
-rw-r--r--roles/openshift_examples/files/examples/v3.8/quickstart-templates/nodejs-mongodb-persistent.json5
-rw-r--r--roles/openshift_examples/files/examples/v3.8/quickstart-templates/nodejs-mongodb.json3
-rw-r--r--roles/openshift_examples/files/examples/v3.8/quickstart-templates/rails-postgresql-persistent.json5
-rw-r--r--roles/openshift_examples/files/examples/v3.8/quickstart-templates/rails-postgresql.json3
l---------roles/openshift_examples/files/examples/v3.9/latest1
-rw-r--r--roles/openshift_excluder/README.md5
-rw-r--r--roles/openshift_excluder/defaults/main.yml2
-rw-r--r--roles/openshift_excluder/meta/main.yml1
-rw-r--r--roles/openshift_excluder/tasks/main.yml5
-rw-r--r--roles/openshift_facts/defaults/main.yml100
-rwxr-xr-xroles/openshift_facts/library/openshift_facts.py491
-rw-r--r--roles/openshift_health_checker/HOWTO_CHECKS.md2
-rw-r--r--roles/openshift_health_checker/defaults/main.yml6
-rw-r--r--roles/openshift_health_checker/openshift_checks/package_availability.py4
-rw-r--r--roles/openshift_health_checker/openshift_checks/package_version.py4
-rw-r--r--roles/openshift_health_checker/test/docker_image_availability_test.py2
-rw-r--r--roles/openshift_health_checker/test/etcd_traffic_test.py5
-rw-r--r--roles/openshift_health_checker/test/ovs_version_test.py9
-rw-r--r--roles/openshift_health_checker/test/package_availability_test.py8
-rw-r--r--roles/openshift_health_checker/test/package_version_test.py7
-rw-r--r--roles/openshift_hosted/README.md7
-rw-r--r--roles/openshift_hosted/defaults/main.yml11
-rw-r--r--roles/openshift_hosted/filter_plugins/openshift_hosted_filters.py2
-rw-r--r--roles/openshift_hosted/meta/main.yml2
-rw-r--r--roles/openshift_hosted/tasks/registry.yml41
-rw-r--r--roles/openshift_hosted/tasks/router.yml18
-rw-r--r--roles/openshift_hosted/tasks/secure.yml14
-rw-r--r--roles/openshift_hosted/tasks/storage/glusterfs.yml10
-rw-r--r--roles/openshift_hosted/tasks/storage/glusterfs_endpoints.yml2
-rw-r--r--roles/openshift_hosted/tasks/storage/object_storage.yml4
-rw-r--r--roles/openshift_hosted/tasks/storage/s3.yml4
-rw-r--r--roles/openshift_hosted/templates/v3.6/glusterfs-registry-endpoints.yml.j22
-rw-r--r--roles/openshift_hosted/templates/v3.6/glusterfs-registry-service.yml.j22
-rw-r--r--roles/openshift_hosted/templates/v3.7/glusterfs-registry-endpoints.yml.j22
-rw-r--r--roles/openshift_hosted/templates/v3.7/glusterfs-registry-service.yml.j22
-rw-r--r--roles/openshift_hosted/templates/v3.8/glusterfs-registry-endpoints.yml.j212
-rw-r--r--roles/openshift_hosted/templates/v3.8/glusterfs-registry-service.yml.j210
-rw-r--r--roles/openshift_hosted/templates/v3.9/glusterfs-registry-endpoints.yml.j212
-rw-r--r--roles/openshift_hosted/templates/v3.9/glusterfs-registry-service.yml.j210
-rw-r--r--roles/openshift_hosted_facts/meta/main.yml15
-rw-r--r--roles/openshift_hosted_facts/tasks/main.yml19
-rw-r--r--roles/openshift_hosted_metrics/README.md54
-rw-r--r--roles/openshift_hosted_metrics/defaults/main.yml2
-rw-r--r--roles/openshift_hosted_metrics/handlers/main.yml31
-rw-r--r--roles/openshift_hosted_metrics/meta/main.yaml18
-rw-r--r--roles/openshift_hosted_metrics/tasks/install.yml132
-rw-r--r--roles/openshift_hosted_metrics/tasks/main.yaml75
-rw-r--r--roles/openshift_hosted_metrics/vars/main.yaml21
-rw-r--r--roles/openshift_loadbalancer/templates/haproxy.docker.service.j22
-rw-r--r--roles/openshift_logging/defaults/main.yml10
-rw-r--r--roles/openshift_logging/handlers/main.yml8
-rw-r--r--roles/openshift_logging/tasks/generate_certs.yaml18
-rw-r--r--roles/openshift_logging/tasks/install_logging.yaml6
-rw-r--r--roles/openshift_logging/tasks/main.yaml4
-rw-r--r--roles/openshift_logging_curator/meta/main.yaml1
-rw-r--r--roles/openshift_logging_curator/tasks/main.yaml2
-rw-r--r--roles/openshift_logging_elasticsearch/meta/main.yaml1
-rw-r--r--roles/openshift_logging_elasticsearch/tasks/main.yaml2
-rw-r--r--roles/openshift_logging_elasticsearch/templates/es.j21
-rw-r--r--roles/openshift_logging_eventrouter/tasks/main.yaml4
-rw-r--r--roles/openshift_logging_fluentd/meta/main.yaml1
-rw-r--r--roles/openshift_logging_fluentd/tasks/main.yaml4
-rw-r--r--roles/openshift_logging_kibana/defaults/main.yml2
-rw-r--r--roles/openshift_logging_kibana/meta/main.yaml1
-rw-r--r--roles/openshift_logging_kibana/tasks/main.yaml2
-rw-r--r--roles/openshift_logging_mux/defaults/main.yml2
-rw-r--r--roles/openshift_logging_mux/meta/main.yaml1
-rw-r--r--roles/openshift_logging_mux/tasks/main.yaml2
-rw-r--r--roles/openshift_management/README.md10
-rw-r--r--roles/openshift_management/defaults/main.yml2
-rw-r--r--roles/openshift_management/meta/main.yml1
-rw-r--r--roles/openshift_management/tasks/main.yml12
-rw-r--r--roles/openshift_management/tasks/storage/storage.yml2
-rw-r--r--roles/openshift_master/handlers/main.yml6
-rw-r--r--roles/openshift_master/meta/main.yml1
-rw-r--r--roles/openshift_master/tasks/main.yml67
-rw-r--r--roles/openshift_master/tasks/registry_auth.yml10
-rw-r--r--roles/openshift_master/tasks/restart.yml4
-rw-r--r--roles/openshift_master/tasks/system_container.yml10
-rw-r--r--roles/openshift_master/tasks/systemd_units.yml47
-rw-r--r--roles/openshift_master/tasks/upgrade/rpm_upgrade.yml12
-rw-r--r--roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j216
-rw-r--r--roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j216
-rw-r--r--roles/openshift_master/templates/master.yaml.v1.j222
-rw-r--r--roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.service.j26
-rw-r--r--roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.service.j28
-rw-r--r--roles/openshift_master_cluster/README.md34
-rw-r--r--roles/openshift_master_cluster/meta/main.yml15
-rw-r--r--roles/openshift_master_cluster/tasks/configure.yml43
-rw-r--r--roles/openshift_master_cluster/tasks/main.yml14
-rw-r--r--roles/openshift_master_facts/defaults/main.yml1
-rw-r--r--roles/openshift_master_facts/filter_plugins/openshift_master.py26
-rw-r--r--roles/openshift_master_facts/tasks/main.yml12
-rw-r--r--roles/openshift_metrics/handlers/main.yml8
-rw-r--r--roles/openshift_nfs/tasks/setup.yml3
-rw-r--r--roles/openshift_node/README.md6
-rw-r--r--roles/openshift_node/defaults/main.yml6
-rw-r--r--roles/openshift_node/handlers/main.yml2
-rw-r--r--roles/openshift_node/tasks/aws.yml2
-rw-r--r--roles/openshift_node/tasks/config.yml8
-rw-r--r--roles/openshift_node/tasks/config/configure-node-settings.yml2
-rw-r--r--roles/openshift_node/tasks/config/configure-proxy-settings.yml2
-rw-r--r--roles/openshift_node/tasks/config/install-node-deps-docker-service-file.yml2
-rw-r--r--roles/openshift_node/tasks/config/install-node-docker-service-file.yml2
-rw-r--r--roles/openshift_node/tasks/docker/upgrade.yml17
-rw-r--r--roles/openshift_node/tasks/install.yml4
-rw-r--r--roles/openshift_node/tasks/main.yml4
-rw-r--r--roles/openshift_node/tasks/node_system_container.yml8
-rw-r--r--roles/openshift_node/tasks/openvswitch_system_container.yml5
-rw-r--r--roles/openshift_node/tasks/registry_auth.yml8
-rw-r--r--roles/openshift_node/tasks/systemd_units.yml2
-rw-r--r--roles/openshift_node/tasks/upgrade.yml14
-rw-r--r--roles/openshift_node/tasks/upgrade/restart.yml8
-rw-r--r--roles/openshift_node/tasks/upgrade/rpm_upgrade.yml6
-rw-r--r--roles/openshift_node/templates/openshift.docker.node.dep.service8
-rw-r--r--roles/openshift_node/templates/openshift.docker.node.service22
-rw-r--r--roles/openshift_openstack/defaults/main.yml12
-rw-r--r--roles/openshift_openstack/tasks/check-prerequisites.yml8
-rw-r--r--roles/openshift_openstack/tasks/hostname.yml26
-rw-r--r--roles/openshift_openstack/tasks/node-configuration.yml6
-rw-r--r--roles/openshift_openstack/tasks/populate-dns.yml23
-rw-r--r--roles/openshift_openstack/tasks/provision.yml4
-rw-r--r--roles/openshift_openstack/templates/heat_stack.yaml.j2136
-rw-r--r--roles/openshift_persistent_volumes/action_plugins/generate_pv_pvcs_list.py157
-rw-r--r--roles/openshift_persistent_volumes/defaults/main.yml9
-rw-r--r--roles/openshift_persistent_volumes/meta/main.yml3
-rw-r--r--roles/openshift_persistent_volumes/tasks/main.yml57
-rw-r--r--roles/openshift_persistent_volumes/tasks/pv.yml17
-rw-r--r--roles/openshift_persistent_volumes/tasks/pvc.yml17
-rw-r--r--roles/openshift_persistent_volumes/templates/persistent-volume.yml.j22
-rw-r--r--roles/openshift_prometheus/README.md2
-rw-r--r--roles/openshift_prometheus/defaults/main.yaml3
-rw-r--r--roles/openshift_prometheus/tasks/install_prometheus.yaml3
-rw-r--r--roles/openshift_provisioners/tasks/install_efs.yaml2
-rw-r--r--roles/openshift_storage_glusterfs/README.md17
-rw-r--r--roles/openshift_storage_glusterfs/defaults/main.yml4
-rw-r--r--roles/openshift_storage_glusterfs/meta/main.yml2
-rw-r--r--roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml10
-rw-r--r--roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml3
-rw-r--r--roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml2
-rw-r--r--roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml7
-rw-r--r--roles/openshift_storage_glusterfs/tasks/main.yml10
-rw-r--r--roles/openshift_storage_glusterfs/templates/v1.5/glusterfs-storageclass.yml.j24
-rw-r--r--roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-storageclass.yml.j24
-rw-r--r--roles/openshift_storage_glusterfs/templates/v3.7/glusterfs-storageclass.yml.j24
-rw-r--r--roles/openshift_storage_glusterfs/templates/v3.8/glusterfs-storageclass.yml.j24
-rw-r--r--roles/openshift_storage_nfs/meta/main.yml2
-rw-r--r--roles/openshift_storage_nfs/tasks/main.yml20
-rw-r--r--roles/openshift_storage_nfs/templates/exports.j216
-rw-r--r--roles/openshift_storage_nfs_lvm/tasks/main.yml2
-rw-r--r--roles/openshift_version/defaults/main.yml8
-rw-r--r--roles/openshift_version/meta/main.yml1
-rw-r--r--roles/openshift_version/tasks/main.yml8
-rw-r--r--roles/openshift_version/tasks/set_version_rpm.yml6
-rw-r--r--roles/rhel_subscribe/README.md29
-rw-r--r--roles/rhel_subscribe/defaults/main.yml2
-rw-r--r--roles/rhel_subscribe/tasks/enterprise.yml21
-rw-r--r--roles/rhel_subscribe/tasks/main.yml31
240 files changed, 2444 insertions, 2222 deletions
diff --git a/roles/calico/tasks/main.yml b/roles/calico/tasks/main.yml
index 0e3863304..bbc6edd48 100644
--- a/roles/calico/tasks/main.yml
+++ b/roles/calico/tasks/main.yml
@@ -14,7 +14,6 @@
vars:
etcd_cert_prefix: calico.etcd-
etcd_cert_config_dir: "{{ openshift.common.config_base }}/calico"
- embedded_etcd: "{{ hostvars[groups.oo_first_master.0].openshift.master.embedded_etcd }}"
etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
etcd_cert_subdir: "openshift-calico-{{ openshift.common.hostname }}"
diff --git a/roles/container_runtime/README.md b/roles/container_runtime/README.md
index e363c1714..51f469aaf 100644
--- a/roles/container_runtime/README.md
+++ b/roles/container_runtime/README.md
@@ -1,18 +1,23 @@
-Docker
+Container Runtime
=========
Ensures docker package or system container is installed, and optionally raises timeout for systemd-udevd.service to 5 minutes.
container-daemon.json items may be found at https://docs.docker.com/engine/reference/commandline/dockerd/#daemon-configuration-file
-Requirements
-------------
+This role is designed to be used with include_role and tasks_from.
-Ansible 2.2
+Entry points
+------------
+* package_docker.yml - install and setup docker container runtime.
+* systemcontainer_docker.yml - utilize docker + systemcontainer
+* systemcontainer_crio.yml - utilize crio + systemcontainer
+* registry_auth.yml - place docker login credentials.
-Mandator Role Variables
---------------
+Requirements
+------------
+Ansible 2.4
Dependencies
@@ -24,9 +29,9 @@ Example Playbook
----------------
- hosts: servers
- roles:
- - role: container_runtime
- docker_udev_workaround: "true"
+ tasks:
+ - include_role: container_runtime
+ tasks_from: package_docker.yml
License
-------
@@ -36,4 +41,4 @@ ASL 2.0
Author Information
------------------
-OpenShift operations, Red Hat, Inc
+Red Hat, Inc
diff --git a/roles/container_runtime/defaults/main.yml b/roles/container_runtime/defaults/main.yml
index 62b3e141a..d7eb8663f 100644
--- a/roles/container_runtime/defaults/main.yml
+++ b/roles/container_runtime/defaults/main.yml
@@ -59,6 +59,7 @@ docker_default_storage_path: /var/lib/docker
# Set local versions of facts that must be in json format for container-daemon.json
# NOTE: When jinja2.9+ is used the container-daemon.json file can move to using tojson
l_docker_log_options: "{{ l2_docker_log_options | to_json }}"
+l_docker_log_options_dict: "{{ l2_docker_log_options | oo_list_to_dict | to_json }}"
l_docker_additional_registries: "{{ l2_docker_additional_registries | to_json }}"
l_docker_blocked_registries: "{{ l2_docker_blocked_registries | to_json }}"
l_docker_insecure_registries: "{{ l2_docker_insecure_registries | to_json }}"
@@ -71,10 +72,62 @@ docker_no_proxy: "{{ openshift.common.no_proxy | default('') }}"
openshift_use_crio: False
openshift_use_crio_only: False
+l_openshift_image_tag_default: "{{ openshift_release | default('latest') }}"
+l_openshift_image_tag: "{{ openshift_image_tag | default(l_openshift_image_tag_default) | string}}"
+# --------------------- #
+# systemcontainers_crio #
+# --------------------- #
l_insecure_crio_registries: "{{ '\"{}\"'.format('\", \"'.join(l2_docker_insecure_registries)) }}"
l_crio_registries: "{{ l2_docker_additional_registries + ['docker.io'] }}"
l_additional_crio_registries: "{{ '\"{}\"'.format('\", \"'.join(l_crio_registries)) }}"
-l_openshift_image_tag_default: "{{ openshift_release }}"
-l_openshift_image_tag: "{{ openshift_image_tag | default(l_openshift_image_tag_default) | string}}"
+
+openshift_crio_image_tag_default: "latest"
+
+l_crt_crio_image_tag_dict:
+ openshift-enterprise: "{{ l_openshift_image_tag }}"
+ origin: "{{ openshift_crio_image_tag | default(openshift_crio_image_tag_default) }}"
+
+l_crt_crio_image_prepend_dict:
+ openshift-enterprise: "registry.access.redhat.com/openshift3"
+ origin: "docker.io/gscrivano"
+
+l_crt_crio_image_dict:
+ Fedora:
+ crio_image_name: "cri-o-fedora"
+ crio_image_tag: "latest"
+ CentOS:
+ crio_image_name: "cri-o-centos"
+ crio_image_tag: "latest"
+ RedHat:
+ crio_image_name: "cri-o"
+ crio_image_tag: "{{ openshift_crio_image_tag | default(l_crt_crio_image_tag_dict[openshift_deployment_type]) }}"
+
+l_crio_image_prepend: "{{ l_crt_crio_image_prepend_dict[openshift_deployment_type] }}"
+l_crio_image_name: "{{ l_crt_crio_image_dict[ansible_distribution]['crio_image_name'] }}"
+l_crio_image_tag: "{{ l_crt_crio_image_dict[ansible_distribution] }}"
+
+l_crio_image_default: "{{ l_crio_image_prepend }}/{{ l_crio_image_name }}:{{ l_crio_image_tag }}"
+l_crio_image: "{{ openshift_crio_systemcontainer_image_override | default(l_crio_image_default) }}"
+
+# ----------------------- #
+# systemcontainers_docker #
+# ----------------------- #
+l_crt_docker_image_prepend_dict:
+ Fedora: "registry.fedoraproject.org/f25"
+ Centos: "docker.io/gscrivano"
+ RedHat: "registry.access.redhat.com/openshift3"
+
+openshift_docker_image_tag_default: "latest"
+l_crt_docker_image_tag_dict:
+ openshift-enterprise: "{{ l_openshift_image_tag }}"
+ origin: "{{ openshift_docker_image_tag | default(openshift_docker_image_tag_default) }}"
+
+l_docker_image_prepend: "{{ l_crt_docker_image_prepend_dict[ansible_distribution] }}"
+l_docker_image_tag: "{{ l_crt_docker_image_tag_dict[openshift_deployment_type] }}"
+
+l_docker_image_default: "{{ l_docker_image_prepend }}/{{ openshift_docker_service_name }}:{{ l_docker_image_tag }}"
+l_docker_image: "{{ openshift_docker_systemcontainer_image_override | default(l_docker_image_default) }}"
+
+l_is_node_system_container: "{{ (openshift_use_node_system_container | default(openshift_use_system_containers | default(false)) | bool) }}"
diff --git a/roles/openshift_atomic/tasks/proxy.yml b/roles/container_runtime/tasks/common/atomic_proxy.yml
index dde099984..dde099984 100644
--- a/roles/openshift_atomic/tasks/proxy.yml
+++ b/roles/container_runtime/tasks/common/atomic_proxy.yml
diff --git a/roles/container_runtime/tasks/common/post.yml b/roles/container_runtime/tasks/common/post.yml
new file mode 100644
index 000000000..d790eb2c0
--- /dev/null
+++ b/roles/container_runtime/tasks/common/post.yml
@@ -0,0 +1,26 @@
+---
+- name: Ensure /var/lib/containers exists
+ file:
+ path: /var/lib/containers
+ state: directory
+
+- name: Fix SELinux Permissions on /var/lib/containers
+ command: "restorecon -R /var/lib/containers/"
+ changed_when: false
+
+- meta: flush_handlers
+
+# This needs to run after docker is restarted to account for proxy settings.
+# registry_auth is called directly with include_role in some places, so we
+# have to put it in the root of the tasks/ directory.
+- include_tasks: ../registry_auth.yml
+
+- name: stat the docker data dir
+ stat:
+ path: "{{ docker_default_storage_path }}"
+ register: dockerstat
+
+- include_tasks: setup_docker_symlink.yml
+ when:
+ - openshift_use_crio
+ - dockerstat.stat.islnk is defined and not (dockerstat.stat.islnk | bool)
diff --git a/roles/container_runtime/tasks/common/pre.yml b/roles/container_runtime/tasks/common/pre.yml
new file mode 100644
index 000000000..990fe66da
--- /dev/null
+++ b/roles/container_runtime/tasks/common/pre.yml
@@ -0,0 +1,12 @@
+---
+- include_tasks: udev_workaround.yml
+ when: docker_udev_workaround | default(False) | bool
+
+- name: Add enterprise registry, if necessary
+ set_fact:
+ l2_docker_additional_registries: "{{ l2_docker_additional_registries + [openshift_docker_ent_reg] }}"
+ when:
+ - openshift.common.deployment_type == 'openshift-enterprise'
+ - openshift_docker_ent_reg != ''
+ - openshift_docker_ent_reg not in l2_docker_additional_registries
+ - not openshift_use_crio_only | bool
diff --git a/roles/container_runtime/tasks/common/setup_docker_symlink.yml b/roles/container_runtime/tasks/common/setup_docker_symlink.yml
new file mode 100644
index 000000000..d7aeb192e
--- /dev/null
+++ b/roles/container_runtime/tasks/common/setup_docker_symlink.yml
@@ -0,0 +1,38 @@
+---
+- block:
+ - name: stop the current running docker
+ systemd:
+ state: stopped
+ name: "{{ openshift_docker_service_name }}"
+
+ - name: copy "{{ docker_default_storage_path }}" to "{{ docker_alt_storage_path }}"
+ command: "cp -r {{ docker_default_storage_path }} {{ docker_alt_storage_path }}"
+ register: results
+ failed_when:
+ - results.rc != 0
+
+ - name: "Set the selinux context on {{ docker_alt_storage_path }}"
+ command: "semanage fcontext -a -e {{ docker_default_storage_path }} {{ docker_alt_storage_path }}"
+ register: results
+ failed_when:
+ - results.rc == 1
+ - "'already exists' not in results.stderr"
+
+ - name: "restorecon the {{ docker_alt_storage_path }}"
+ command: "restorecon -r {{ docker_alt_storage_path }}"
+
+ - name: Remove the old docker location
+ file:
+ state: absent
+ path: "{{ docker_default_storage_path }}"
+
+ - name: Setup the link
+ file:
+ state: link
+ src: "{{ docker_alt_storage_path }}"
+ path: "{{ docker_default_storage_path }}"
+
+ - name: start docker
+ systemd:
+ state: started
+ name: "{{ openshift_docker_service_name }}"
diff --git a/roles/container_runtime/tasks/common/syscontainer_packages.yml b/roles/container_runtime/tasks/common/syscontainer_packages.yml
new file mode 100644
index 000000000..715ed492d
--- /dev/null
+++ b/roles/container_runtime/tasks/common/syscontainer_packages.yml
@@ -0,0 +1,28 @@
+---
+
+- name: Ensure container-selinux is installed
+ package:
+ name: container-selinux
+ state: present
+ when: not openshift.common.is_atomic | bool
+ register: result
+ until: result | success
+
+# Used to pull and install the system container
+- name: Ensure atomic is installed
+ package:
+ name: atomic
+ state: present
+ when: not openshift.common.is_atomic | bool
+ register: result
+ until: result | success
+
+# At the time of writing the atomic command requires runc for it's own use. This
+# task is here in the even that the atomic package ever removes the dependency.
+- name: Ensure runc is installed
+ package:
+ name: runc
+ state: present
+ when: not openshift.common.is_atomic | bool
+ register: result
+ until: result | success
diff --git a/roles/container_runtime/tasks/udev_workaround.yml b/roles/container_runtime/tasks/common/udev_workaround.yml
index 257c3123d..257c3123d 100644
--- a/roles/container_runtime/tasks/udev_workaround.yml
+++ b/roles/container_runtime/tasks/common/udev_workaround.yml
diff --git a/roles/container_runtime/tasks/docker_sanity.yml b/roles/container_runtime/tasks/docker_sanity.yml
new file mode 100644
index 000000000..e62cf5505
--- /dev/null
+++ b/roles/container_runtime/tasks/docker_sanity.yml
@@ -0,0 +1,27 @@
+---
+# Sanity checks to ensure the role will complete and provide helpful error
+# messages for common problems.
+
+- name: Error out if Docker pre-installed but too old
+ fail:
+ msg: "Docker {{ curr_docker_version.stdout }} is installed, but >= 1.9.1 is required."
+ when: not curr_docker_version | skipped and curr_docker_version.stdout != '' and curr_docker_version.stdout | version_compare('1.9.1', '<') and not docker_version is defined
+
+- name: Error out if requested Docker is too old
+ fail:
+ msg: "Docker {{ docker_version }} requested, but >= 1.9.1 is required."
+ when: docker_version is defined and docker_version | version_compare('1.9.1', '<')
+
+# If a docker_version was requested, sanity check that we can install or upgrade to it, and
+# no downgrade is required.
+- name: Fail if Docker version requested but downgrade is required
+ fail:
+ msg: "Docker {{ curr_docker_version.stdout }} is installed, but version {{ docker_version }} was requested."
+ when: not curr_docker_version | skipped and curr_docker_version.stdout != '' and docker_version is defined and curr_docker_version.stdout | version_compare(docker_version, '>')
+
+# This involves an extremely slow migration process, users should instead run the
+# Docker 1.10 upgrade playbook to accomplish this.
+- name: Error out if attempting to upgrade Docker across the 1.10 boundary
+ fail:
+ msg: "Cannot upgrade Docker to >= 1.10, please upgrade or remove Docker manually, or use the Docker upgrade playbook if OpenShift is already installed."
+ when: not curr_docker_version | skipped and curr_docker_version.stdout != '' and curr_docker_version.stdout | version_compare('1.10', '<') and docker_version is defined and docker_version | version_compare('1.10', '>=')
diff --git a/roles/container_runtime/tasks/docker_upgrade_check.yml b/roles/container_runtime/tasks/docker_upgrade_check.yml
new file mode 100644
index 000000000..f29619f42
--- /dev/null
+++ b/roles/container_runtime/tasks/docker_upgrade_check.yml
@@ -0,0 +1,67 @@
+---
+
+# This snippet determines if a Docker upgrade is required by checking the inventory
+# variables, the available packages, and sets l_docker_upgrade to True if so.
+
+- set_fact:
+ docker_upgrade: True
+ when: docker_upgrade is not defined
+
+- name: Check if Docker is installed
+ command: rpm -q docker
+ args:
+ warn: no
+ register: pkg_check
+ failed_when: pkg_check.rc > 1
+ changed_when: no
+
+- name: Get current version of Docker
+ command: "{{ repoquery_installed }} --qf '%{version}' docker"
+ register: curr_docker_version
+ retries: 4
+ until: curr_docker_version | succeeded
+ changed_when: false
+
+- name: Get latest available version of Docker
+ command: >
+ {{ repoquery_cmd }} --qf '%{version}' "docker"
+ register: avail_docker_version
+ retries: 4
+ until: avail_docker_version | succeeded
+ # Don't expect docker rpm to be available on hosts that don't already have it installed:
+ when: pkg_check.rc == 0
+ failed_when: false
+ changed_when: false
+
+- fail:
+ msg: This playbook requires access to Docker 1.12 or later
+ # Disable the 1.12 requirement if the user set a specific Docker version
+ when: docker_version is not defined and (docker_upgrade is not defined or docker_upgrade | bool == True) and (pkg_check.rc == 0 and (avail_docker_version.stdout == "" or avail_docker_version.stdout | version_compare('1.12','<')))
+
+# Default l_docker_upgrade to False, we'll set to True if an upgrade is required:
+- set_fact:
+ l_docker_upgrade: False
+
+# Make sure a docker_version is set if none was requested:
+- set_fact:
+ docker_version: "{{ avail_docker_version.stdout }}"
+ when: pkg_check.rc == 0 and docker_version is not defined
+
+- name: Flag for Docker upgrade if necessary
+ set_fact:
+ l_docker_upgrade: True
+ when: pkg_check.rc == 0 and curr_docker_version.stdout | version_compare(docker_version,'<')
+
+# Additional checks for Atomic hosts:
+- name: Determine available Docker
+ shell: "rpm -q --queryformat '---\ncurr_version: %{VERSION}\navail_version: \n' docker"
+ register: g_atomic_docker_version_result
+ when: openshift.common.is_atomic | bool
+
+- set_fact:
+ l_docker_version: "{{ g_atomic_docker_version_result.stdout | from_yaml }}"
+ when: openshift.common.is_atomic | bool
+
+- fail:
+ msg: This playbook requires access to Docker 1.12 or later
+ when: openshift.common.is_atomic | bool and l_docker_version.avail_version | default(l_docker_version.curr_version, true) | version_compare('1.12','<')
diff --git a/roles/container_runtime/tasks/main.yml b/roles/container_runtime/tasks/main.yml
index 6d68082b1..96d8606c6 100644
--- a/roles/container_runtime/tasks/main.yml
+++ b/roles/container_runtime/tasks/main.yml
@@ -1,85 +1,2 @@
---
-- include_tasks: udev_workaround.yml
- when: docker_udev_workaround | default(False) | bool
-
-- name: Add enterprise registry, if necessary
- set_fact:
- l2_docker_additional_registries: "{{ l2_docker_additional_registries + [openshift_docker_ent_reg] }}"
- when:
- - openshift.common.deployment_type == 'openshift-enterprise'
- - openshift_docker_ent_reg != ''
- - openshift_docker_ent_reg not in l2_docker_additional_registries
- - not openshift_use_crio_only | bool
-
-- name: Use Package Docker if Requested
- include_tasks: package_docker.yml
- when:
- - not openshift_docker_use_system_container
- - not openshift_use_crio_only
-
-- name: Ensure /var/lib/containers exists
- file:
- path: /var/lib/containers
- state: directory
-
-- name: Fix SELinux Permissions on /var/lib/containers
- command: "restorecon -R /var/lib/containers/"
- changed_when: false
-
-- name: Use System Container Docker if Requested
- include_tasks: systemcontainer_docker.yml
- when:
- - openshift_docker_use_system_container
- - not openshift_use_crio_only
-
-- name: Add CRI-O usage Requested
- include_tasks: systemcontainer_crio.yml
- when:
- - openshift_use_crio
- - openshift_docker_is_node_or_master | bool
-
-- name: stat the docker data dir
- stat:
- path: "{{ docker_default_storage_path }}"
- register: dockerstat
-
-- when:
- - openshift_use_crio
- - dockerstat.stat.islnk is defined and not (dockerstat.stat.islnk | bool)
- block:
- - name: stop the current running docker
- systemd:
- state: stopped
- name: "{{ openshift_docker_service_name }}"
-
- - name: copy "{{ docker_default_storage_path }}" to "{{ docker_alt_storage_path }}"
- command: "cp -r {{ docker_default_storage_path }} {{ docker_alt_storage_path }}"
- register: results
- failed_when:
- - results.rc != 0
-
- - name: "Set the selinux context on {{ docker_alt_storage_path }}"
- command: "semanage fcontext -a -e {{ docker_default_storage_path }} {{ docker_alt_storage_path }}"
- register: results
- failed_when:
- - results.rc == 1
- - "'already exists' not in results.stderr"
-
- - name: "restorecon the {{ docker_alt_storage_path }}"
- command: "restorecon -r {{ docker_alt_storage_path }}"
-
- - name: Remove the old docker location
- file:
- state: absent
- path: "{{ docker_default_storage_path }}"
-
- - name: Setup the link
- file:
- state: link
- src: "{{ docker_alt_storage_path }}"
- path: "{{ docker_default_storage_path }}"
-
- - name: start docker
- systemd:
- state: started
- name: "{{ openshift_docker_service_name }}"
+# This role is meant to be used with include_role and tasks_from.
diff --git a/roles/container_runtime/tasks/package_docker.yml b/roles/container_runtime/tasks/package_docker.yml
index 40ab75a25..89899c9cf 100644
--- a/roles/container_runtime/tasks/package_docker.yml
+++ b/roles/container_runtime/tasks/package_docker.yml
@@ -1,4 +1,6 @@
---
+- include_tasks: common/pre.yml
+
- name: Get current installed Docker version
command: "{{ repoquery_installed }} --qf '%{version}' docker"
when: not openshift.common.is_atomic | bool
@@ -7,35 +9,16 @@
until: curr_docker_version | succeeded
changed_when: false
-- name: Error out if Docker pre-installed but too old
- fail:
- msg: "Docker {{ curr_docker_version.stdout }} is installed, but >= 1.9.1 is required."
- when: not curr_docker_version | skipped and curr_docker_version.stdout != '' and curr_docker_version.stdout | version_compare('1.9.1', '<') and not docker_version is defined
-
-- name: Error out if requested Docker is too old
- fail:
- msg: "Docker {{ docker_version }} requested, but >= 1.9.1 is required."
- when: docker_version is defined and docker_version | version_compare('1.9.1', '<')
-
-# If a docker_version was requested, sanity check that we can install or upgrade to it, and
-# no downgrade is required.
-- name: Fail if Docker version requested but downgrade is required
- fail:
- msg: "Docker {{ curr_docker_version.stdout }} is installed, but version {{ docker_version }} was requested."
- when: not curr_docker_version | skipped and curr_docker_version.stdout != '' and docker_version is defined and curr_docker_version.stdout | version_compare(docker_version, '>')
-
-# This involves an extremely slow migration process, users should instead run the
-# Docker 1.10 upgrade playbook to accomplish this.
-- name: Error out if attempting to upgrade Docker across the 1.10 boundary
- fail:
- msg: "Cannot upgrade Docker to >= 1.10, please upgrade or remove Docker manually, or use the Docker upgrade playbook if OpenShift is already installed."
- when: not curr_docker_version | skipped and curr_docker_version.stdout != '' and curr_docker_version.stdout | version_compare('1.10', '<') and docker_version is defined and docker_version | version_compare('1.10', '>=')
+# Some basic checks to ensure the role will complete
+- include_tasks: docker_sanity.yml
# Make sure Docker is installed, but does not update a running version.
# Docker upgrades are handled by a separate playbook.
# Note: The curr_docker_version.stdout check can be removed when https://github.com/ansible/ansible/issues/33187 gets fixed.
- name: Install Docker
- package: name=docker{{ '-' + docker_version if docker_version is defined else '' }} state=present
+ package:
+ name: "docker{{ '-' + docker_version if docker_version is defined else '' }}"
+ state: present
when: not openshift.common.is_atomic | bool and not curr_docker_version | skipped and not curr_docker_version.stdout != ''
register: result
until: result | success
@@ -161,7 +144,4 @@
- set_fact:
docker_service_status_changed: "{{ (r_docker_package_docker_start_result | changed) and (r_docker_already_running_result.stdout != 'ActiveState=active' ) }}"
-- meta: flush_handlers
-
-# This needs to run after docker is restarted to account for proxy settings.
-- include_tasks: registry_auth.yml
+- include_tasks: common/post.yml
diff --git a/roles/container_runtime/tasks/systemcontainer_crio.yml b/roles/container_runtime/tasks/systemcontainer_crio.yml
index 8dcfe60ef..61f122f3c 100644
--- a/roles/container_runtime/tasks/systemcontainer_crio.yml
+++ b/roles/container_runtime/tasks/systemcontainer_crio.yml
@@ -1,39 +1,14 @@
---
# TODO: Much of this file is shared with container engine tasks
-
-- name: Ensure container-selinux is installed
- package:
- name: container-selinux
- state: present
- when: not openshift.common.is_atomic | bool
- register: result
- until: result | success
-
- name: Check we are not using node as a Docker container with CRI-O
fail: msg='Cannot use CRI-O with node configured as a Docker container'
when:
- openshift.common.is_containerized | bool
- - not openshift.common.is_node_system_container | bool
-
-# Used to pull and install the system container
-- name: Ensure atomic is installed
- package:
- name: atomic
- state: present
- when: not openshift.common.is_atomic | bool
- register: result
- until: result | success
-
-# At the time of writing the atomic command requires runc for it's own use. This
-# task is here in the even that the atomic package ever removes the dependency.
-- name: Ensure runc is installed
- package:
- name: runc
- state: present
- when: not openshift.common.is_atomic | bool
- register: result
- until: result | success
+ - not l_is_node_system_container | bool
+
+- include_tasks: common/pre.yml
+- include_tasks: common/syscontainer_packages.yml
- name: Check that overlay is in the kernel
shell: lsmod | grep overlay
@@ -60,50 +35,11 @@
state: restarted
- name: Ensure proxies are in the atomic.conf
- include_role:
- name: openshift_atomic
- tasks_from: proxy
-
-- block:
-
- - name: Set CRI-O image defaults
- set_fact:
- l_crio_image_prepend: "docker.io/gscrivano"
- l_crio_image_name: "cri-o-fedora"
- l_crio_image_tag: "latest"
-
- - name: Use Centos based image when distribution is CentOS
- set_fact:
- l_crio_image_name: "cri-o-centos"
- when: ansible_distribution == "CentOS"
-
- - name: Set CRI-O image tag
- set_fact:
- l_crio_image_tag: "{{ l_openshift_image_tag }}"
- when:
- - openshift_deployment_type == 'openshift-enterprise'
-
- - name: Use RHEL based image when distribution is Red Hat
- set_fact:
- l_crio_image_prepend: "registry.access.redhat.com/openshift3"
- l_crio_image_name: "cri-o"
- when: ansible_distribution == "RedHat"
-
- - name: Set the full image name
- set_fact:
- l_crio_image: "{{ l_crio_image_prepend }}/{{ l_crio_image_name }}:{{ l_crio_image_tag }}"
-
- # For https://github.com/openshift/aos-cd-jobs/pull/624#pullrequestreview-61816548
- - name: Use a specific image if requested
- set_fact:
- l_crio_image: "{{ openshift_crio_systemcontainer_image_override }}"
- when:
- - openshift_crio_systemcontainer_image_override is defined
- - openshift_crio_systemcontainer_image_override != ""
-
- # Be nice and let the user see the variable result
- - debug:
- var: l_crio_image
+ include_tasks: common/atomic_proxy.yml
+
+# Be nice and let the user see the variable result
+- debug:
+ var: l_crio_image
# NOTE: no_proxy added as a workaround until https://github.com/projectatomic/atomic/pull/999 is released
- name: Pre-pull CRI-O System Container image
@@ -112,7 +48,6 @@
environment:
NO_PROXY: "{{ openshift.common.no_proxy | default('') }}"
-
- name: Install CRI-O System Container
oc_atomic_container:
name: "cri-o"
@@ -139,8 +74,7 @@
state: directory
- name: setup firewall for CRI-O
- include_tasks: crio_firewall.yml
- static: yes
+ import_tasks: crio_firewall.yml
- name: Configure the CNI network
template:
@@ -155,10 +89,8 @@
daemon_reload: yes
register: start_result
-- meta: flush_handlers
-
# If we are using crio only, docker.service might not be available for
# 'docker login'
-- include_tasks: registry_auth.yml
+- include_tasks: common/post.yml
vars:
openshift_docker_alternative_creds: "{{ openshift_use_crio_only }}"
diff --git a/roles/container_runtime/tasks/systemcontainer_docker.yml b/roles/container_runtime/tasks/systemcontainer_docker.yml
index 84217e50c..10570fe34 100644
--- a/roles/container_runtime/tasks/systemcontainer_docker.yml
+++ b/roles/container_runtime/tasks/systemcontainer_docker.yml
@@ -11,32 +11,9 @@
traditional docker package install. Otherwise, comment out openshift_docker_options
in your inventory file.
-- name: Ensure container-selinux is installed
- package:
- name: container-selinux
- state: present
- when: not openshift.common.is_atomic | bool
- register: result
- until: result | success
-
-# Used to pull and install the system container
-- name: Ensure atomic is installed
- package:
- name: atomic
- state: present
- when: not openshift.common.is_atomic | bool
- register: result
- until: result | success
+- include_tasks: common/pre.yml
-# At the time of writing the atomic command requires runc for it's own use. This
-# task is here in the even that the atomic package ever removes the dependency.
-- name: Ensure runc is installed
- package:
- name: runc
- state: present
- when: not openshift.common.is_atomic | bool
- register: result
- until: result | success
+- include_tasks: common/syscontainer_packages.yml
# Make sure Docker is installed so we are able to use the client
- name: Install Docker so we can use the client
@@ -59,48 +36,11 @@
delay: 30
- name: Ensure proxies are in the atomic.conf
- include_role:
- name: openshift_atomic
- tasks_from: proxy
-
-- block:
-
- - name: Set to default prepend
- set_fact:
- l_docker_image_prepend: "gscrivano"
- l_docker_image_tag: "latest"
-
- - name: Set container engine image tag
- set_fact:
- l_docker_image_tag: "{{ l_openshift_image_tag }}"
- when:
- - openshift_deployment_type == 'openshift-enterprise'
-
- - name: Use Red Hat Registry for image when distribution is Red Hat
- set_fact:
- l_docker_image_prepend: "registry.access.redhat.com/openshift3"
- when: ansible_distribution == 'RedHat'
-
- - name: Use Fedora Registry for image when distribution is Fedora
- set_fact:
- l_docker_image_prepend: "registry.fedoraproject.org/f25"
- when: ansible_distribution == 'Fedora'
-
- - name: Set the full image name
- set_fact:
- l_docker_image: "{{ l_docker_image_prepend }}/{{ openshift_docker_service_name }}:{{ l_docker_image_tag }}"
-
- # For https://github.com/openshift/openshift-ansible/pull/5354#issuecomment-328552959
- - name: Use a specific image if requested
- set_fact:
- l_docker_image: "{{ openshift_docker_systemcontainer_image_override }}"
- when:
- - openshift_docker_systemcontainer_image_override is defined
- - openshift_docker_systemcontainer_image_override != ""
-
- # Be nice and let the user see the variable result
- - debug:
- var: l_docker_image
+ include_tasks: common/atomic_proxy.yml
+
+# Be nice and let the user see the variable result
+- debug:
+ var: l_docker_image
# NOTE: no_proxy added as a workaround until https://github.com/projectatomic/atomic/pull/999 is released
- name: Pre-pull Container Engine System Container image
@@ -154,10 +94,8 @@
- set_fact:
docker_service_status_changed: "{{ r_docker_systemcontainer_docker_start_result | changed }}"
-- meta: flush_handlers
-
# Since docker is running as a system container, docker login will fail to create
# credentials. Use alternate method if requiring authenticated registries.
-- include_tasks: registry_auth.yml
+- include_tasks: common/post.yml
vars:
openshift_docker_alternative_creds: True
diff --git a/roles/container_runtime/templates/daemon.json b/roles/container_runtime/templates/daemon.json
index 383963bd3..1a72d812a 100644
--- a/roles/container_runtime/templates/daemon.json
+++ b/roles/container_runtime/templates/daemon.json
@@ -5,10 +5,10 @@
"disable-legacy-registry": false,
"exec-opts": ["native.cgroupdriver=systemd"],
"insecure-registries": {{ l_docker_insecure_registries }},
-{% if openshift_docker_log_driver is defined %}
+{% if openshift_docker_log_driver %}
"log-driver": "{{ openshift_docker_log_driver }}",
{%- endif %}
- "log-opts": {{ l_docker_log_options }},
+ "log-opts": {{ l_docker_log_options_dict }},
"runtimes": {
"oci": {
"path": "/usr/libexec/docker/docker-runc-current"
diff --git a/roles/contiv/meta/main.yml b/roles/contiv/meta/main.yml
index a2c2f98a7..52b9d09dd 100644
--- a/roles/contiv/meta/main.yml
+++ b/roles/contiv/meta/main.yml
@@ -21,7 +21,7 @@ dependencies:
etcd_client_port: 22379
etcd_conf_dir: /etc/contiv-etcd/
etcd_data_dir: /var/lib/contiv-etcd/
- etcd_ca_host: "{{ inventory_hostname }}"
+ etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
etcd_cert_config_dir: /etc/contiv-etcd/
etcd_url_scheme: http
etcd_peer_url_scheme: http
diff --git a/roles/contiv/tasks/default_network.yml b/roles/contiv/tasks/default_network.yml
index f679443e0..8a928ea54 100644
--- a/roles/contiv/tasks/default_network.yml
+++ b/roles/contiv/tasks/default_network.yml
@@ -8,51 +8,64 @@
- name: Contiv | Set globals
command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" global set --fabric-mode {{ contiv_fabric_mode }} --vlan-range {{ contiv_vlan_range }} --fwd-mode {{ netplugin_fwd_mode }} --private-subnet {{ contiv_private_ext_subnet }}'
+ run_once: true
- name: Contiv | Set arp mode to flood if ACI
command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" global set --arp-mode flood'
when: contiv_fabric_mode == "aci"
+ run_once: true
- name: Contiv | Check if default-net exists
command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" net ls'
register: net_result
+ run_once: true
- name: Contiv | Create default-net
command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" net create --subnet={{ contiv_default_subnet }} -e {{ contiv_encap_mode }} -p {{ contiv_default_network_tag }} --gateway {{ contiv_default_gw }} default-net'
when: net_result.stdout.find("default-net") == -1
+ run_once: true
- name: Contiv | Create host access infra network for VxLan routing case
command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" net create --subnet={{ contiv_h1_subnet_default }} --gateway={{ contiv_h1_gw_default }} --nw-type="infra" contivh1'
when: (contiv_encap_mode == "vxlan") and (netplugin_fwd_mode == "routing")
+ run_once: true
#- name: Contiv | Create an allow-all policy for the default-group
# command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" policy create ose-allow-all-policy'
# when: contiv_fabric_mode == "aci"
+# run_once: true
- name: Contiv | Set up aci external contract to consume default external contract
command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" external-contracts create -c -a {{ apic_default_external_contract }} oseExtToConsume'
when: (contiv_fabric_mode == "aci") and (apic_configure_default_policy == true)
+ run_once: true
- name: Contiv | Set up aci external contract to provide default external contract
command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" external-contracts create -p -a {{ apic_default_external_contract }} oseExtToProvide'
when: (contiv_fabric_mode == "aci") and (apic_configure_default_policy == true)
+ run_once: true
- name: Contiv | Create aci default-group
command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" group create default-net default-group'
when: contiv_fabric_mode == "aci"
+ run_once: true
- name: Contiv | Add external contracts to the default-group
command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" group create -e oseExtToConsume -e oseExtToProvide default-net default-group'
when: (contiv_fabric_mode == "aci") and (apic_configure_default_policy == true)
+ run_once: true
#- name: Contiv | Add policy rule 1 for allow-all policy
# command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" policy rule-add -d in --action allow ose-allow-all-policy 1'
# when: contiv_fabric_mode == "aci"
+# run_once: true
#- name: Contiv | Add policy rule 2 for allow-all policy
# command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" policy rule-add -d out --action allow ose-allow-all-policy 2'
# when: contiv_fabric_mode == "aci"
+# run_once: true
- name: Contiv | Create default aci app profile
command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" app-profile create -g default-group {{ apic_default_app_profile }}'
when: contiv_fabric_mode == "aci"
+ run_once: true
diff --git a/roles/contiv/tasks/netmaster_iptables.yml b/roles/contiv/tasks/netmaster_iptables.yml
index 07bb16ea7..c98e7b6a5 100644
--- a/roles/contiv/tasks/netmaster_iptables.yml
+++ b/roles/contiv/tasks/netmaster_iptables.yml
@@ -13,9 +13,15 @@
- name: Netmaster IPtables | Open Netmaster with iptables
command: /sbin/iptables -I INPUT 1 -p tcp --dport {{ item }} -j ACCEPT -m comment --comment "contiv"
with_items:
- - "{{ netmaster_port }}"
- "{{ contiv_rpc_port1 }}"
- "{{ contiv_rpc_port2 }}"
- "{{ contiv_rpc_port3 }}"
when: iptablesrules.stdout.find("contiv") == -1
notify: Save iptables rules
+
+- name: Netmaster IPtables | Open netmaster main port
+ command: /sbin/iptables -I INPUT 1 -p tcp -s {{ item }} --dport {{ netmaster_port }} -j ACCEPT -m comment --comment "contiv"
+ with_items:
+ - "{{ groups.oo_nodes_to_config|difference(hostvars[inventory_hostname]['ansible_' + netmaster_interface].ipv4.address)|list }}"
+ when: iptablesrules.stdout.find("contiv") == -1
+ notify: Save iptables rules
diff --git a/roles/contiv_facts/tasks/rpm.yml b/roles/contiv_facts/tasks/rpm.yml
index 07401a6dd..d12436f96 100644
--- a/roles/contiv_facts/tasks/rpm.yml
+++ b/roles/contiv_facts/tasks/rpm.yml
@@ -6,10 +6,17 @@
failed_when: false
check_mode: no
+- name: RPM | Determine if firewalld enabled
+ command: "systemctl status firewalld.service"
+ register: ss
+ changed_when: false
+ failed_when: false
+ check_mode: no
+
- name: Set the has_firewalld fact
set_fact:
has_firewalld: true
- when: s.rc == 0
+ when: s.rc == 0 and ss.rc == 0
- name: Determine if iptables-services installed
command: "rpm -q iptables-services"
diff --git a/roles/etcd/meta/main.yml b/roles/etcd/meta/main.yml
index 879ca4f4e..f2e1fc310 100644
--- a/roles/etcd/meta/main.yml
+++ b/roles/etcd/meta/main.yml
@@ -19,3 +19,4 @@ dependencies:
- role: lib_openshift
- role: lib_os_firewall
- role: lib_utils
+- role: openshift_facts
diff --git a/roles/etcd/tasks/migration/add_ttls.yml b/roles/etcd/tasks/migration/add_ttls.yml
index 4bdc6bcc3..a4b0ff31d 100644
--- a/roles/etcd/tasks/migration/add_ttls.yml
+++ b/roles/etcd/tasks/migration/add_ttls.yml
@@ -11,7 +11,7 @@
- name: Re-introduce leases (as a replacement for key TTLs)
command: >
- oadm migrate etcd-ttl \
+ {{ openshift.common.client_binary }} adm migrate etcd-ttl \
--cert {{ r_etcd_common_master_peer_cert_file }} \
--key {{ r_etcd_common_master_peer_key_file }} \
--cacert {{ r_etcd_common_master_peer_ca_file }} \
diff --git a/roles/etcd/tasks/system_container.yml b/roles/etcd/tasks/system_container.yml
index 82ac4fc84..ca8b6a707 100644
--- a/roles/etcd/tasks/system_container.yml
+++ b/roles/etcd/tasks/system_container.yml
@@ -1,9 +1,4 @@
---
-- name: Ensure proxies are in the atomic.conf
- include_role:
- name: openshift_atomic
- tasks_from: proxy
-
- name: Pull etcd system container
command: atomic pull --storage=ostree {{ etcd_image }}
register: pull_result
diff --git a/roles/flannel/defaults/main.yaml b/roles/flannel/defaults/main.yaml
index 488b6b0bc..2e4a0dc39 100644
--- a/roles/flannel/defaults/main.yaml
+++ b/roles/flannel/defaults/main.yaml
@@ -2,8 +2,8 @@
flannel_interface: "{{ ansible_default_ipv4.interface }}"
flannel_etcd_key: /openshift.com/network
etcd_hosts: "{{ etcd_urls }}"
-etcd_peer_ca_file: "{{ openshift.common.config_base }}/node/{{ 'ca' if (embedded_etcd | bool) else 'flannel.etcd-ca' }}.crt"
-etcd_peer_cert_file: "{{ openshift.common.config_base }}/node/{{ 'system:node:' + openshift.common.hostname if (embedded_etcd | bool) else 'flannel.etcd-client' }}.crt"
-etcd_peer_key_file: "{{ openshift.common.config_base }}/node/{{ 'system:node:' + openshift.common.hostname if (embedded_etcd | bool) else 'flannel.etcd-client' }}.key"
+etcd_peer_ca_file: "{{ openshift.common.config_base }}/node/flannel.etcd-ca.crt"
+etcd_peer_cert_file: "{{ openshift.common.config_base }}/node/flannel.etcd-client.crt"
+etcd_peer_key_file: "{{ openshift.common.config_base }}/node/flannel.etcd-client.key"
openshift_docker_service_name: "{{ 'container-engine' if (openshift_docker_use_system_container | default(False)) else 'docker' }}"
diff --git a/roles/flannel/handlers/main.yml b/roles/flannel/handlers/main.yml
index 80e4d391d..705d39f9a 100644
--- a/roles/flannel/handlers/main.yml
+++ b/roles/flannel/handlers/main.yml
@@ -15,7 +15,7 @@
- name: restart node
systemd:
- name: "{{ openshift.common.service_type }}-node"
+ name: "{{ openshift_service_type }}-node"
state: restarted
register: l_restart_node_result
until: not l_restart_node_result | failed
diff --git a/roles/flannel_register/defaults/main.yaml b/roles/flannel_register/defaults/main.yaml
index 1d0f5df6a..cd11fd9ff 100644
--- a/roles/flannel_register/defaults/main.yaml
+++ b/roles/flannel_register/defaults/main.yaml
@@ -4,6 +4,6 @@ flannel_subnet_len: "{{ 32 - (openshift.master.sdn_host_subnet_length | int) }}"
flannel_etcd_key: /openshift.com/network
etcd_hosts: "{{ etcd_urls }}"
etcd_conf_dir: "{{ openshift.common.config_base }}/master"
-etcd_peer_ca_file: "{{ etcd_conf_dir + '/ca.crt' if (openshift.master.embedded_etcd | bool) else etcd_conf_dir + '/master.etcd-ca.crt' }}"
+etcd_peer_ca_file: "{{ etcd_conf_dir + '/master.etcd-ca.crt' }}"
etcd_peer_cert_file: "{{ etcd_conf_dir }}/master.etcd-client.crt"
etcd_peer_key_file: "{{ etcd_conf_dir }}/master.etcd-client.key"
diff --git a/roles/installer_checkpoint/callback_plugins/installer_checkpoint.py b/roles/installer_checkpoint/callback_plugins/installer_checkpoint.py
index 3cb1fa8d0..83ca83350 100644
--- a/roles/installer_checkpoint/callback_plugins/installer_checkpoint.py
+++ b/roles/installer_checkpoint/callback_plugins/installer_checkpoint.py
@@ -86,7 +86,7 @@ class CallbackModule(CallbackBase):
},
'installer_phase_logging': {
'title': 'Logging Install',
- 'playbook': 'playbooks/byo/openshift-cluster/openshift-logging.yml'
+ 'playbook': 'playbooks/openshift-logging/config.yml'
},
'installer_phase_prometheus': {
'title': 'Prometheus Install',
diff --git a/roles/kuryr/tasks/node.yaml b/roles/kuryr/tasks/node.yaml
index ffe814713..08f2d5adc 100644
--- a/roles/kuryr/tasks/node.yaml
+++ b/roles/kuryr/tasks/node.yaml
@@ -36,7 +36,7 @@
- name: Configure OpenShift node with disabled service proxy
lineinfile:
- dest: "/etc/sysconfig/{{ openshift.common.service_type }}-node"
+ dest: "/etc/sysconfig/{{ openshift_service_type }}-node"
regexp: '^OPTIONS="?(.*?)"?$'
backrefs: yes
backup: yes
@@ -44,5 +44,5 @@
- name: force node restart to disable the proxy
service:
- name: "{{ openshift.common.service_type }}-node"
+ name: "{{ openshift_service_type }}-node"
state: restarted
diff --git a/roles/kuryr/templates/configmap.yaml.j2 b/roles/kuryr/templates/configmap.yaml.j2
index 6bf6c1db2..96c215f00 100644
--- a/roles/kuryr/templates/configmap.yaml.j2
+++ b/roles/kuryr/templates/configmap.yaml.j2
@@ -229,6 +229,7 @@ data:
# TODO (apuimedo): Remove the duplicated line just after this one once the
# RDO packaging contains the upstream patch
worker_nodes_subnet = {{ kuryr_openstack_worker_nodes_subnet_id }}
+ external_svc_subnet = {{ kuryr_openstack_external_svc_subnet_id }}
[pod_vif_nested]
worker_nodes_subnet = {{ kuryr_openstack_worker_nodes_subnet_id }}
diff --git a/roles/lib_utils/library/oo_ec2_group.py b/roles/lib_utils/library/oo_ec2_group.py
new file mode 100644
index 000000000..615021ac5
--- /dev/null
+++ b/roles/lib_utils/library/oo_ec2_group.py
@@ -0,0 +1,903 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# pylint: skip-file
+# flake8: noqa
+
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['stableinterface'],
+ 'supported_by': 'core'}
+
+DOCUMENTATION = '''
+---
+module: ec2_group
+author: "Andrew de Quincey (@adq)"
+version_added: "1.3"
+requirements: [ boto3 ]
+short_description: maintain an ec2 VPC security group.
+description:
+ - maintains ec2 security groups. This module has a dependency on python-boto >= 2.5
+options:
+ name:
+ description:
+ - Name of the security group.
+ - One of and only one of I(name) or I(group_id) is required.
+ - Required if I(state=present).
+ required: false
+ group_id:
+ description:
+ - Id of group to delete (works only with absent).
+ - One of and only one of I(name) or I(group_id) is required.
+ required: false
+ version_added: "2.4"
+ description:
+ description:
+ - Description of the security group. Required when C(state) is C(present).
+ required: false
+ vpc_id:
+ description:
+ - ID of the VPC to create the group in.
+ required: false
+ rules:
+ description:
+ - List of firewall inbound rules to enforce in this group (see example). If none are supplied,
+ no inbound rules will be enabled. Rules list may include its own name in `group_name`.
+ This allows idempotent loopback additions (e.g. allow group to access itself).
+ Rule sources list support was added in version 2.4. This allows to define multiple sources per
+ source type as well as multiple source types per rule. Prior to 2.4 an individual source is allowed.
+ required: false
+ rules_egress:
+ description:
+ - List of firewall outbound rules to enforce in this group (see example). If none are supplied,
+ a default all-out rule is assumed. If an empty list is supplied, no outbound rules will be enabled.
+ Rule Egress sources list support was added in version 2.4.
+ required: false
+ version_added: "1.6"
+ state:
+ version_added: "1.4"
+ description:
+ - Create or delete a security group
+ required: false
+ default: 'present'
+ choices: [ "present", "absent" ]
+ aliases: []
+ purge_rules:
+ version_added: "1.8"
+ description:
+ - Purge existing rules on security group that are not found in rules
+ required: false
+ default: 'true'
+ aliases: []
+ purge_rules_egress:
+ version_added: "1.8"
+ description:
+ - Purge existing rules_egress on security group that are not found in rules_egress
+ required: false
+ default: 'true'
+ aliases: []
+ tags:
+ version_added: "2.4"
+ description:
+ - A dictionary of one or more tags to assign to the security group.
+ required: false
+ purge_tags:
+ version_added: "2.4"
+ description:
+ - If yes, existing tags will be purged from the resource to match exactly what is defined by I(tags) parameter. If the I(tags) parameter is not set then
+ tags will not be modified.
+ required: false
+ default: yes
+ choices: [ 'yes', 'no' ]
+
+extends_documentation_fragment:
+ - aws
+ - ec2
+
+notes:
+ - If a rule declares a group_name and that group doesn't exist, it will be
+ automatically created. In that case, group_desc should be provided as well.
+ The module will refuse to create a depended-on group without a description.
+'''
+
+EXAMPLES = '''
+- name: example ec2 group
+ ec2_group:
+ name: example
+ description: an example EC2 group
+ vpc_id: 12345
+ region: eu-west-1
+ aws_secret_key: SECRET
+ aws_access_key: ACCESS
+ rules:
+ - proto: tcp
+ from_port: 80
+ to_port: 80
+ cidr_ip: 0.0.0.0/0
+ - proto: tcp
+ from_port: 22
+ to_port: 22
+ cidr_ip: 10.0.0.0/8
+ - proto: tcp
+ from_port: 443
+ to_port: 443
+ group_id: amazon-elb/sg-87654321/amazon-elb-sg
+ - proto: tcp
+ from_port: 3306
+ to_port: 3306
+ group_id: 123412341234/sg-87654321/exact-name-of-sg
+ - proto: udp
+ from_port: 10050
+ to_port: 10050
+ cidr_ip: 10.0.0.0/8
+ - proto: udp
+ from_port: 10051
+ to_port: 10051
+ group_id: sg-12345678
+ - proto: icmp
+ from_port: 8 # icmp type, -1 = any type
+ to_port: -1 # icmp subtype, -1 = any subtype
+ cidr_ip: 10.0.0.0/8
+ - proto: all
+ # the containing group name may be specified here
+ group_name: example
+ rules_egress:
+ - proto: tcp
+ from_port: 80
+ to_port: 80
+ cidr_ip: 0.0.0.0/0
+ cidr_ipv6: 64:ff9b::/96
+ group_name: example-other
+ # description to use if example-other needs to be created
+ group_desc: other example EC2 group
+
+- name: example2 ec2 group
+ ec2_group:
+ name: example2
+ description: an example2 EC2 group
+ vpc_id: 12345
+ region: eu-west-1
+ rules:
+ # 'ports' rule keyword was introduced in version 2.4. It accepts a single port value or a list of values including ranges (from_port-to_port).
+ - proto: tcp
+ ports: 22
+ group_name: example-vpn
+ - proto: tcp
+ ports:
+ - 80
+ - 443
+ - 8080-8099
+ cidr_ip: 0.0.0.0/0
+ # Rule sources list support was added in version 2.4. This allows to define multiple sources per source type as well as multiple source types per rule.
+ - proto: tcp
+ ports:
+ - 6379
+ - 26379
+ group_name:
+ - example-vpn
+ - example-redis
+ - proto: tcp
+ ports: 5665
+ group_name: example-vpn
+ cidr_ip:
+ - 172.16.1.0/24
+ - 172.16.17.0/24
+ cidr_ipv6:
+ - 2607:F8B0::/32
+ - 64:ff9b::/96
+ group_id:
+ - sg-edcd9784
+
+- name: "Delete group by its id"
+ ec2_group:
+ group_id: sg-33b4ee5b
+ state: absent
+'''
+
+RETURN = '''
+group_name:
+ description: Security group name
+ sample: My Security Group
+ type: string
+ returned: on create/update
+group_id:
+ description: Security group id
+ sample: sg-abcd1234
+ type: string
+ returned: on create/update
+description:
+ description: Description of security group
+ sample: My Security Group
+ type: string
+ returned: on create/update
+tags:
+ description: Tags associated with the security group
+ sample:
+ Name: My Security Group
+ Purpose: protecting stuff
+ type: dict
+ returned: on create/update
+vpc_id:
+ description: ID of VPC to which the security group belongs
+ sample: vpc-abcd1234
+ type: string
+ returned: on create/update
+ip_permissions:
+ description: Inbound rules associated with the security group.
+ sample:
+ - from_port: 8182
+ ip_protocol: tcp
+ ip_ranges:
+ - cidr_ip: "1.1.1.1/32"
+ ipv6_ranges: []
+ prefix_list_ids: []
+ to_port: 8182
+ user_id_group_pairs: []
+ type: list
+ returned: on create/update
+ip_permissions_egress:
+ description: Outbound rules associated with the security group.
+ sample:
+ - ip_protocol: -1
+ ip_ranges:
+ - cidr_ip: "0.0.0.0/0"
+ ipv6_ranges: []
+ prefix_list_ids: []
+ user_id_group_pairs: []
+ type: list
+ returned: on create/update
+owner_id:
+ description: AWS Account ID of the security group
+ sample: 123456789012
+ type: int
+ returned: on create/update
+'''
+
+import json
+import re
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.ec2 import boto3_conn
+from ansible.module_utils.ec2 import get_aws_connection_info
+from ansible.module_utils.ec2 import ec2_argument_spec
+from ansible.module_utils.ec2 import camel_dict_to_snake_dict
+from ansible.module_utils.ec2 import HAS_BOTO3
+from ansible.module_utils.ec2 import boto3_tag_list_to_ansible_dict, ansible_dict_to_boto3_tag_list, compare_aws_tags
+from ansible.module_utils.ec2 import AWSRetry
+import traceback
+
+try:
+ import botocore
+except ImportError:
+ pass # caught by imported HAS_BOTO3
+
+
+@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
+def get_security_groups_with_backoff(connection, **kwargs):
+ return connection.describe_security_groups(**kwargs)
+
+
+def deduplicate_rules_args(rules):
+ """Returns unique rules"""
+ if rules is None:
+ return None
+ return list(dict(zip((json.dumps(r, sort_keys=True) for r in rules), rules)).values())
+
+
+def make_rule_key(prefix, rule, group_id, cidr_ip):
+ if 'proto' in rule:
+ proto, from_port, to_port = [rule.get(x, None) for x in ('proto', 'from_port', 'to_port')]
+ elif 'IpProtocol' in rule:
+ proto, from_port, to_port = [rule.get(x, None) for x in ('IpProtocol', 'FromPort', 'ToPort')]
+ if proto not in ['icmp', 'tcp', 'udp'] and from_port == -1 and to_port == -1:
+ from_port = 'none'
+ to_port = 'none'
+ key = "%s-%s-%s-%s-%s-%s" % (prefix, proto, from_port, to_port, group_id, cidr_ip)
+ return key.lower().replace('-none', '-None')
+
+
+def add_rules_to_lookup(ipPermissions, group_id, prefix, dict):
+ for rule in ipPermissions:
+ for groupGrant in rule.get('UserIdGroupPairs', []):
+ dict[make_rule_key(prefix, rule, group_id, groupGrant.get('GroupId'))] = (rule, groupGrant)
+ for ipv4Grants in rule.get('IpRanges', []):
+ dict[make_rule_key(prefix, rule, group_id, ipv4Grants.get('CidrIp'))] = (rule, ipv4Grants)
+ for ipv6Grants in rule.get('Ipv6Ranges', []):
+ dict[make_rule_key(prefix, rule, group_id, ipv6Grants.get('CidrIpv6'))] = (rule, ipv6Grants)
+
+
+def validate_rule(module, rule):
+ VALID_PARAMS = ('cidr_ip', 'cidr_ipv6',
+ 'group_id', 'group_name', 'group_desc',
+ 'proto', 'from_port', 'to_port')
+ if not isinstance(rule, dict):
+ module.fail_json(msg='Invalid rule parameter type [%s].' % type(rule))
+ for k in rule:
+ if k not in VALID_PARAMS:
+ module.fail_json(msg='Invalid rule parameter \'{}\''.format(k))
+
+ if 'group_id' in rule and 'cidr_ip' in rule:
+ module.fail_json(msg='Specify group_id OR cidr_ip, not both')
+ elif 'group_name' in rule and 'cidr_ip' in rule:
+ module.fail_json(msg='Specify group_name OR cidr_ip, not both')
+ elif 'group_id' in rule and 'cidr_ipv6' in rule:
+ module.fail_json(msg="Specify group_id OR cidr_ipv6, not both")
+ elif 'group_name' in rule and 'cidr_ipv6' in rule:
+ module.fail_json(msg="Specify group_name OR cidr_ipv6, not both")
+ elif 'cidr_ip' in rule and 'cidr_ipv6' in rule:
+ module.fail_json(msg="Specify cidr_ip OR cidr_ipv6, not both")
+ elif 'group_id' in rule and 'group_name' in rule:
+ module.fail_json(msg='Specify group_id OR group_name, not both')
+
+
+def get_target_from_rule(module, client, rule, name, group, groups, vpc_id):
+ """
+ Returns tuple of (group_id, ip) after validating rule params.
+
+ rule: Dict describing a rule.
+ name: Name of the security group being managed.
+ groups: Dict of all available security groups.
+
+ AWS accepts an ip range or a security group as target of a rule. This
+ function validate the rule specification and return either a non-None
+ group_id or a non-None ip range.
+ """
+
+ FOREIGN_SECURITY_GROUP_REGEX = '^(\S+)/(sg-\S+)/(\S+)'
+ group_id = None
+ group_name = None
+ ip = None
+ ipv6 = None
+ target_group_created = False
+
+ if 'group_id' in rule and 'cidr_ip' in rule:
+ module.fail_json(msg="Specify group_id OR cidr_ip, not both")
+ elif 'group_name' in rule and 'cidr_ip' in rule:
+ module.fail_json(msg="Specify group_name OR cidr_ip, not both")
+ elif 'group_id' in rule and 'cidr_ipv6' in rule:
+ module.fail_json(msg="Specify group_id OR cidr_ipv6, not both")
+ elif 'group_name' in rule and 'cidr_ipv6' in rule:
+ module.fail_json(msg="Specify group_name OR cidr_ipv6, not both")
+ elif 'group_id' in rule and 'group_name' in rule:
+ module.fail_json(msg="Specify group_id OR group_name, not both")
+ elif 'cidr_ip' in rule and 'cidr_ipv6' in rule:
+ module.fail_json(msg="Specify cidr_ip OR cidr_ipv6, not both")
+ elif rule.get('group_id') and re.match(FOREIGN_SECURITY_GROUP_REGEX, rule['group_id']):
+ # this is a foreign Security Group. Since you can't fetch it you must create an instance of it
+ owner_id, group_id, group_name = re.match(FOREIGN_SECURITY_GROUP_REGEX, rule['group_id']).groups()
+ group_instance = dict(GroupId=group_id, GroupName=group_name)
+ groups[group_id] = group_instance
+ groups[group_name] = group_instance
+ elif 'group_id' in rule:
+ group_id = rule['group_id']
+ elif 'group_name' in rule:
+ group_name = rule['group_name']
+ if group_name == name:
+ group_id = group['GroupId']
+ groups[group_id] = group
+ groups[group_name] = group
+ elif group_name in groups and group.get('VpcId') and groups[group_name].get('VpcId'):
+ # both are VPC groups, this is ok
+ group_id = groups[group_name]['GroupId']
+ elif group_name in groups and not (group.get('VpcId') or groups[group_name].get('VpcId')):
+ # both are EC2 classic, this is ok
+ group_id = groups[group_name]['GroupId']
+ else:
+ # if we got here, either the target group does not exist, or there
+ # is a mix of EC2 classic + VPC groups. Mixing of EC2 classic + VPC
+ # is bad, so we have to create a new SG because no compatible group
+ # exists
+ if not rule.get('group_desc', '').strip():
+ module.fail_json(msg="group %s will be automatically created by rule %s and "
+ "no description was provided" % (group_name, rule))
+ if not module.check_mode:
+ params = dict(GroupName=group_name, Description=rule['group_desc'])
+ if vpc_id:
+ params['VpcId'] = vpc_id
+ auto_group = client.create_security_group(**params)
+ group_id = auto_group['GroupId']
+ groups[group_id] = auto_group
+ groups[group_name] = auto_group
+ target_group_created = True
+ elif 'cidr_ip' in rule:
+ ip = rule['cidr_ip']
+ elif 'cidr_ipv6' in rule:
+ ipv6 = rule['cidr_ipv6']
+
+ return group_id, ip, ipv6, target_group_created
+
+
+def ports_expand(ports):
+ # takes a list of ports and returns a list of (port_from, port_to)
+ ports_expanded = []
+ for port in ports:
+ if not isinstance(port, str):
+ ports_expanded.append((port,) * 2)
+ elif '-' in port:
+ ports_expanded.append(tuple(p.strip() for p in port.split('-', 1)))
+ else:
+ ports_expanded.append((port.strip(),) * 2)
+
+ return ports_expanded
+
+
+def rule_expand_ports(rule):
+ # takes a rule dict and returns a list of expanded rule dicts
+ if 'ports' not in rule:
+ return [rule]
+
+ ports = rule['ports'] if isinstance(rule['ports'], list) else [rule['ports']]
+
+ rule_expanded = []
+ for from_to in ports_expand(ports):
+ temp_rule = rule.copy()
+ del temp_rule['ports']
+ temp_rule['from_port'], temp_rule['to_port'] = from_to
+ rule_expanded.append(temp_rule)
+
+ return rule_expanded
+
+
+def rules_expand_ports(rules):
+ # takes a list of rules and expands it based on 'ports'
+ if not rules:
+ return rules
+
+ return [rule for rule_complex in rules
+ for rule in rule_expand_ports(rule_complex)]
+
+
+def rule_expand_source(rule, source_type):
+ # takes a rule dict and returns a list of expanded rule dicts for specified source_type
+ sources = rule[source_type] if isinstance(rule[source_type], list) else [rule[source_type]]
+ source_types_all = ('cidr_ip', 'cidr_ipv6', 'group_id', 'group_name')
+
+ rule_expanded = []
+ for source in sources:
+ temp_rule = rule.copy()
+ for s in source_types_all:
+ temp_rule.pop(s, None)
+ temp_rule[source_type] = source
+ rule_expanded.append(temp_rule)
+
+ return rule_expanded
+
+
+def rule_expand_sources(rule):
+ # takes a rule dict and returns a list of expanded rule discts
+ source_types = (stype for stype in ('cidr_ip', 'cidr_ipv6', 'group_id', 'group_name') if stype in rule)
+
+ return [r for stype in source_types
+ for r in rule_expand_source(rule, stype)]
+
+
+def rules_expand_sources(rules):
+ # takes a list of rules and expands it based on 'cidr_ip', 'group_id', 'group_name'
+ if not rules:
+ return rules
+
+ return [rule for rule_complex in rules
+ for rule in rule_expand_sources(rule_complex)]
+
+
+def authorize_ip(type, changed, client, group, groupRules,
+ ip, ip_permission, module, rule, ethertype):
+ # If rule already exists, don't later delete it
+ for thisip in ip:
+ rule_id = make_rule_key(type, rule, group['GroupId'], thisip)
+ if rule_id in groupRules:
+ del groupRules[rule_id]
+ else:
+ if not module.check_mode:
+ ip_permission = serialize_ip_grant(rule, thisip, ethertype)
+ if ip_permission:
+ try:
+ if type == "in":
+ client.authorize_security_group_ingress(GroupId=group['GroupId'],
+ IpPermissions=[ip_permission])
+ elif type == "out":
+ client.authorize_security_group_egress(GroupId=group['GroupId'],
+ IpPermissions=[ip_permission])
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg="Unable to authorize %s for ip %s security group '%s' - %s" %
+ (type, thisip, group['GroupName'], e),
+ exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
+ changed = True
+ return changed, ip_permission
+
+
+def serialize_group_grant(group_id, rule):
+ permission = {'IpProtocol': rule['proto'],
+ 'FromPort': rule['from_port'],
+ 'ToPort': rule['to_port'],
+ 'UserIdGroupPairs': [{'GroupId': group_id}]}
+
+ return fix_port_and_protocol(permission)
+
+
+def serialize_revoke(grant, rule):
+ permission = dict()
+ fromPort = rule['FromPort'] if 'FromPort' in rule else None
+ toPort = rule['ToPort'] if 'ToPort' in rule else None
+ if 'GroupId' in grant:
+ permission = {'IpProtocol': rule['IpProtocol'],
+ 'FromPort': fromPort,
+ 'ToPort': toPort,
+ 'UserIdGroupPairs': [{'GroupId': grant['GroupId']}]
+ }
+ elif 'CidrIp' in grant:
+ permission = {'IpProtocol': rule['IpProtocol'],
+ 'FromPort': fromPort,
+ 'ToPort': toPort,
+ 'IpRanges': [grant]
+ }
+ elif 'CidrIpv6' in grant:
+ permission = {'IpProtocol': rule['IpProtocol'],
+ 'FromPort': fromPort,
+ 'ToPort': toPort,
+ 'Ipv6Ranges': [grant]
+ }
+ return fix_port_and_protocol(permission)
+
+
+def serialize_ip_grant(rule, thisip, ethertype):
+ permission = {'IpProtocol': rule['proto'],
+ 'FromPort': rule['from_port'],
+ 'ToPort': rule['to_port']}
+ if ethertype == "ipv4":
+ permission['IpRanges'] = [{'CidrIp': thisip}]
+ elif ethertype == "ipv6":
+ permission['Ipv6Ranges'] = [{'CidrIpv6': thisip}]
+
+ return fix_port_and_protocol(permission)
+
+
+def fix_port_and_protocol(permission):
+ for key in ['FromPort', 'ToPort']:
+ if key in permission:
+ if permission[key] is None:
+ del permission[key]
+ else:
+ permission[key] = int(permission[key])
+
+ permission['IpProtocol'] = str(permission['IpProtocol'])
+
+ return permission
+
+
+def main():
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(dict(
+ name=dict(),
+ group_id=dict(),
+ description=dict(),
+ vpc_id=dict(),
+ rules=dict(type='list'),
+ rules_egress=dict(type='list'),
+ state=dict(default='present', type='str', choices=['present', 'absent']),
+ purge_rules=dict(default=True, required=False, type='bool'),
+ purge_rules_egress=dict(default=True, required=False, type='bool'),
+ tags=dict(required=False, type='dict', aliases=['resource_tags']),
+ purge_tags=dict(default=True, required=False, type='bool')
+ )
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_one_of=[['name', 'group_id']],
+ required_if=[['state', 'present', ['name']]],
+ )
+
+ if not HAS_BOTO3:
+ module.fail_json(msg='boto3 required for this module')
+
+ name = module.params['name']
+ group_id = module.params['group_id']
+ description = module.params['description']
+ vpc_id = module.params['vpc_id']
+ rules = deduplicate_rules_args(rules_expand_sources(rules_expand_ports(module.params['rules'])))
+ rules_egress = deduplicate_rules_args(rules_expand_sources(rules_expand_ports(module.params['rules_egress'])))
+ state = module.params.get('state')
+ purge_rules = module.params['purge_rules']
+ purge_rules_egress = module.params['purge_rules_egress']
+ tags = module.params['tags']
+ purge_tags = module.params['purge_tags']
+
+ if state == 'present' and not description:
+ module.fail_json(msg='Must provide description when state is present.')
+
+ changed = False
+ region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)
+ if not region:
+ module.fail_json(msg="The AWS region must be specified as an "
+ "environment variable or in the AWS credentials "
+ "profile.")
+ client = boto3_conn(module, conn_type='client', resource='ec2', endpoint=ec2_url, region=region, **aws_connect_params)
+ group = None
+ groups = dict()
+ security_groups = []
+ # do get all security groups
+ # find if the group is present
+ try:
+ response = get_security_groups_with_backoff(client)
+ security_groups = response.get('SecurityGroups', [])
+ except botocore.exceptions.NoCredentialsError as e:
+ module.fail_json(msg="Error in describe_security_groups: %s" % "Unable to locate credentials", exception=traceback.format_exc())
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg="Error in describe_security_groups: %s" % e, exception=traceback.format_exc(),
+ **camel_dict_to_snake_dict(e.response))
+
+ for sg in security_groups:
+ groups[sg['GroupId']] = sg
+ groupName = sg['GroupName']
+ if groupName in groups:
+ # Prioritise groups from the current VPC
+ # even if current VPC is EC2-Classic
+ if groups[groupName].get('VpcId') == vpc_id:
+ # Group saved already matches current VPC, change nothing
+ pass
+ elif vpc_id is None and groups[groupName].get('VpcId') is None:
+ # We're in EC2 classic, and the group already saved is as well
+ # No VPC groups can be used alongside EC2 classic groups
+ pass
+ else:
+ # the current SG stored has no direct match, so we can replace it
+ groups[groupName] = sg
+ else:
+ groups[groupName] = sg
+
+ if group_id and sg['GroupId'] == group_id:
+ group = sg
+ elif groupName == name and (vpc_id is None or sg.get('VpcId') == vpc_id):
+ group = sg
+
+ # Ensure requested group is absent
+ if state == 'absent':
+ if group:
+ # found a match, delete it
+ try:
+ if not module.check_mode:
+ client.delete_security_group(GroupId=group['GroupId'])
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg="Unable to delete security group '%s' - %s" % (group, e),
+ exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
+ else:
+ group = None
+ changed = True
+ else:
+ # no match found, no changes required
+ pass
+
+ # Ensure requested group is present
+ elif state == 'present':
+ if group:
+ # existing group
+ if group['Description'] != description:
+ module.warn("Group description does not match existing group. Descriptions cannot be changed without deleting "
+ "and re-creating the security group. Try using state=absent to delete, then rerunning this task.")
+
+ # if the group doesn't exist, create it now
+ else:
+ # no match found, create it
+ if not module.check_mode:
+ params = dict(GroupName=name, Description=description)
+ if vpc_id:
+ params['VpcId'] = vpc_id
+ group = client.create_security_group(**params)
+ # When a group is created, an egress_rule ALLOW ALL
+ # to 0.0.0.0/0 is added automatically but it's not
+ # reflected in the object returned by the AWS API
+ # call. We re-read the group for getting an updated object
+ # amazon sometimes takes a couple seconds to update the security group so wait till it exists
+ while True:
+ group = get_security_groups_with_backoff(client, GroupIds=[group['GroupId']])['SecurityGroups'][0]
+ if group.get('VpcId') and not group.get('IpPermissionsEgress'):
+ pass
+ else:
+ break
+
+ changed = True
+
+ if tags is not None:
+ current_tags = boto3_tag_list_to_ansible_dict(group.get('Tags', []))
+ tags_need_modify, tags_to_delete = compare_aws_tags(current_tags, tags, purge_tags)
+ if tags_to_delete:
+ try:
+ client.delete_tags(Resources=[group['GroupId']], Tags=[{'Key': tag} for tag in tags_to_delete])
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg=e.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
+ changed = True
+
+ # Add/update tags
+ if tags_need_modify:
+ try:
+ client.create_tags(Resources=[group['GroupId']], Tags=ansible_dict_to_boto3_tag_list(tags_need_modify))
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg=e.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
+ changed = True
+
+ else:
+ module.fail_json(msg="Unsupported state requested: %s" % state)
+
+ # create a lookup for all existing rules on the group
+ ip_permission = []
+ if group:
+ # Manage ingress rules
+ groupRules = {}
+ add_rules_to_lookup(group['IpPermissions'], group['GroupId'], 'in', groupRules)
+ # Now, go through all provided rules and ensure they are there.
+ if rules is not None:
+ for rule in rules:
+ validate_rule(module, rule)
+ group_id, ip, ipv6, target_group_created = get_target_from_rule(module, client, rule, name,
+ group, groups, vpc_id)
+ if target_group_created:
+ changed = True
+
+ if rule['proto'] in ('all', '-1', -1):
+ rule['proto'] = -1
+ rule['from_port'] = None
+ rule['to_port'] = None
+
+ if group_id:
+ rule_id = make_rule_key('in', rule, group['GroupId'], group_id)
+ if rule_id in groupRules:
+ del groupRules[rule_id]
+ else:
+ if not module.check_mode:
+ ip_permission = serialize_group_grant(group_id, rule)
+ if ip_permission:
+ ips = ip_permission
+ if vpc_id:
+ [useridpair.update({'VpcId': vpc_id}) for useridpair in
+ ip_permission.get('UserIdGroupPairs', [])]
+ try:
+ client.authorize_security_group_ingress(GroupId=group['GroupId'], IpPermissions=[ips])
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(
+ msg="Unable to authorize ingress for group %s security group '%s' - %s" %
+ (group_id, group['GroupName'], e),
+ exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
+ changed = True
+ elif ip:
+ # Convert ip to list we can iterate over
+ if ip and not isinstance(ip, list):
+ ip = [ip]
+
+ changed, ip_permission = authorize_ip("in", changed, client, group, groupRules, ip, ip_permission,
+ module, rule, "ipv4")
+ elif ipv6:
+ # Convert ip to list we can iterate over
+ if not isinstance(ipv6, list):
+ ipv6 = [ipv6]
+ # If rule already exists, don't later delete it
+ changed, ip_permission = authorize_ip("in", changed, client, group, groupRules, ipv6, ip_permission,
+ module, rule, "ipv6")
+ # Finally, remove anything left in the groupRules -- these will be defunct rules
+ if purge_rules:
+ for (rule, grant) in groupRules.values():
+ ip_permission = serialize_revoke(grant, rule)
+ if not module.check_mode:
+ try:
+ client.revoke_security_group_ingress(GroupId=group['GroupId'], IpPermissions=[ip_permission])
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(
+ msg="Unable to revoke ingress for security group '%s' - %s" %
+ (group['GroupName'], e),
+ exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
+ changed = True
+
+ # Manage egress rules
+ groupRules = {}
+ add_rules_to_lookup(group['IpPermissionsEgress'], group['GroupId'], 'out', groupRules)
+ # Now, go through all provided rules and ensure they are there.
+ if rules_egress is not None:
+ for rule in rules_egress:
+ validate_rule(module, rule)
+ group_id, ip, ipv6, target_group_created = get_target_from_rule(module, client, rule, name,
+ group, groups, vpc_id)
+ if target_group_created:
+ changed = True
+
+ if rule['proto'] in ('all', '-1', -1):
+ rule['proto'] = -1
+ rule['from_port'] = None
+ rule['to_port'] = None
+
+ if group_id:
+ rule_id = make_rule_key('out', rule, group['GroupId'], group_id)
+ if rule_id in groupRules:
+ del groupRules[rule_id]
+ else:
+ if not module.check_mode:
+ ip_permission = serialize_group_grant(group_id, rule)
+ if ip_permission:
+ ips = ip_permission
+ if vpc_id:
+ [useridpair.update({'VpcId': vpc_id}) for useridpair in
+ ip_permission.get('UserIdGroupPairs', [])]
+ try:
+ client.authorize_security_group_egress(GroupId=group['GroupId'], IpPermissions=[ips])
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(
+ msg="Unable to authorize egress for group %s security group '%s' - %s" %
+ (group_id, group['GroupName'], e),
+ exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
+ changed = True
+ elif ip:
+ # Convert ip to list we can iterate over
+ if not isinstance(ip, list):
+ ip = [ip]
+ changed, ip_permission = authorize_ip("out", changed, client, group, groupRules, ip,
+ ip_permission, module, rule, "ipv4")
+ elif ipv6:
+ # Convert ip to list we can iterate over
+ if not isinstance(ipv6, list):
+ ipv6 = [ipv6]
+ # If rule already exists, don't later delete it
+ changed, ip_permission = authorize_ip("out", changed, client, group, groupRules, ipv6,
+ ip_permission, module, rule, "ipv6")
+ elif vpc_id is not None:
+ # when no egress rules are specified and we're in a VPC,
+ # we add in a default allow all out rule, which was the
+ # default behavior before egress rules were added
+ default_egress_rule = 'out--1-None-None-' + group['GroupId'] + '-0.0.0.0/0'
+ if default_egress_rule not in groupRules:
+ if not module.check_mode:
+ ip_permission = [{'IpProtocol': '-1',
+ 'IpRanges': [{'CidrIp': '0.0.0.0/0'}]
+ }
+ ]
+ try:
+ client.authorize_security_group_egress(GroupId=group['GroupId'], IpPermissions=ip_permission)
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg="Unable to authorize egress for ip %s security group '%s' - %s" %
+ ('0.0.0.0/0',
+ group['GroupName'],
+ e),
+ exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
+ changed = True
+ else:
+ # make sure the default egress rule is not removed
+ del groupRules[default_egress_rule]
+
+ # Finally, remove anything left in the groupRules -- these will be defunct rules
+ if purge_rules_egress and vpc_id is not None:
+ for (rule, grant) in groupRules.values():
+ # we shouldn't be revoking 0.0.0.0 egress
+ if grant != '0.0.0.0/0':
+ ip_permission = serialize_revoke(grant, rule)
+ if not module.check_mode:
+ try:
+ client.revoke_security_group_egress(GroupId=group['GroupId'], IpPermissions=[ip_permission])
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg="Unable to revoke egress for ip %s security group '%s' - %s" %
+ (grant, group['GroupName'], e),
+ exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
+ changed = True
+
+ if group:
+ security_group = get_security_groups_with_backoff(client, GroupIds=[group['GroupId']])['SecurityGroups'][0]
+ security_group = camel_dict_to_snake_dict(security_group)
+ security_group['tags'] = boto3_tag_list_to_ansible_dict(security_group.get('tags', []),
+ tag_name_key_name='key', tag_value_key_name='value')
+ module.exit_json(changed=changed, **security_group)
+ else:
+ module.exit_json(changed=changed, group_id=None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/roles/nuage_master/handlers/main.yaml b/roles/nuage_master/handlers/main.yaml
index 410b739e9..7b55dda56 100644
--- a/roles/nuage_master/handlers/main.yaml
+++ b/roles/nuage_master/handlers/main.yaml
@@ -1,21 +1,19 @@
---
- name: restart master api
- systemd: name={{ openshift.common.service_type }}-master-api state=restarted
+ systemd: name={{ openshift_service_type }}-master-api state=restarted
when: >
(openshift_master_ha | bool) and
- (not master_api_service_status_changed | default(false)) and
- openshift.master.cluster_method == 'native'
+ (not master_api_service_status_changed | default(false))
# TODO: need to fix up ignore_errors here
# We retry the controllers because the API may not be 100% initialized yet.
- name: restart master controllers
- command: "systemctl restart {{ openshift.common.service_type }}-master-controllers"
+ command: "systemctl restart {{ openshift_service_type }}-master-controllers"
retries: 3
delay: 5
register: result
until: result.rc == 0
when: >
(openshift_master_ha | bool) and
- (not master_controllers_service_status_changed | default(false)) and
- openshift.master.cluster_method == 'native'
+ (not master_controllers_service_status_changed | default(false))
ignore_errors: yes
diff --git a/roles/nuage_node/handlers/main.yaml b/roles/nuage_node/handlers/main.yaml
index e68ae74bd..ede6f2125 100644
--- a/roles/nuage_node/handlers/main.yaml
+++ b/roles/nuage_node/handlers/main.yaml
@@ -1,7 +1,7 @@
---
- name: restart node
become: yes
- systemd: name={{ openshift.common.service_type }}-node daemon-reload=yes state=restarted
+ systemd: name={{ openshift_service_type }}-node daemon-reload=yes state=restarted
- name: save iptable rules
become: yes
diff --git a/roles/nuage_node/vars/main.yaml b/roles/nuage_node/vars/main.yaml
index fdf01b7c2..88d62de49 100644
--- a/roles/nuage_node/vars/main.yaml
+++ b/roles/nuage_node/vars/main.yaml
@@ -23,5 +23,5 @@ cni_conf_dir: "/etc/cni/net.d/"
cni_bin_dir: "/opt/cni/bin/"
nuage_plugin_crt_dir: /usr/share/vsp-openshift
-openshift_atomic_node_config_file: /etc/sysconfig/{{ openshift.common.service_type }}-node
+openshift_atomic_node_config_file: /etc/sysconfig/{{ openshift_service_type }}-node
nuage_atomic_docker_additional_mounts: "NUAGE_ADDTL_BIND_MOUNTS=-v /var/usr/share/vsp-openshift:/var/usr/share/vsp-openshift -v /etc/default:/etc/default -v /var/run:/var/run -v /opt/cni/bin:/opt/cni/bin -v /etc/cni/net.d:/etc/cni/net.d"
diff --git a/roles/openshift_atomic/README.md b/roles/openshift_atomic/README.md
deleted file mode 100644
index 8c10c9991..000000000
--- a/roles/openshift_atomic/README.md
+++ /dev/null
@@ -1,28 +0,0 @@
-OpenShift Atomic
-================
-
-This role houses atomic specific tasks.
-
-Requirements
-------------
-
-Role Variables
---------------
-
-Dependencies
-------------
-
-Example Playbook
-----------------
-
-```
-- name: Ensure atomic proxies are defined
- hosts: localhost
- roles:
- - role: openshift_atomic
-```
-
-License
--------
-
-Apache License Version 2.0
diff --git a/roles/openshift_atomic/meta/main.yml b/roles/openshift_atomic/meta/main.yml
deleted file mode 100644
index ea129f514..000000000
--- a/roles/openshift_atomic/meta/main.yml
+++ /dev/null
@@ -1,13 +0,0 @@
----
-galaxy_info:
- author: OpenShift
- description: Atomic related tasks
- company: Red Hat, Inc
- license: ASL 2.0
- min_ansible_version: 2.2
- platforms:
- - name: EL
- versions:
- - 7
-dependencies:
-- role: lib_openshift
diff --git a/roles/openshift_aws/defaults/main.yml b/roles/openshift_aws/defaults/main.yml
index 42ef22846..74e5d1dde 100644
--- a/roles/openshift_aws/defaults/main.yml
+++ b/roles/openshift_aws/defaults/main.yml
@@ -6,9 +6,7 @@ openshift_aws_create_security_groups: True
openshift_aws_create_launch_config: True
openshift_aws_create_scale_group: True
-openshift_aws_current_version: ''
-openshift_aws_new_version: ''
-
+openshift_aws_node_group_upgrade: False
openshift_aws_wait_for_ssh: True
openshift_aws_clusterid: default
@@ -19,7 +17,6 @@ openshift_aws_build_ami_group: "{{ openshift_aws_clusterid }}"
openshift_aws_iam_cert_name: "{{ openshift_aws_clusterid }}-master-external"
openshift_aws_iam_cert_path: ''
openshift_aws_iam_cert_key_path: ''
-openshift_aws_scale_group_basename: "{{ openshift_aws_clusterid }} openshift"
openshift_aws_iam_role_name: openshift_node_describe_instances
openshift_aws_iam_role_policy_json: "{{ lookup('file', 'describeinstances.json') }}"
@@ -34,14 +31,12 @@ openshift_aws_ami_name: openshift-gi
openshift_aws_base_ami_name: ami_base
openshift_aws_launch_config_bootstrap_token: ''
-openshift_aws_launch_config_basename: "{{ openshift_aws_clusterid }}"
openshift_aws_users: []
openshift_aws_ami_tags:
bootstrap: "true"
openshift-created: "true"
- clusterid: "{{ openshift_aws_clusterid }}"
parent: "{{ openshift_aws_base_ami | default('unknown') }}"
openshift_aws_s3_mode: create
@@ -124,6 +119,20 @@ openshift_aws_ami_map:
infra: "{{ openshift_aws_ami }}"
compute: "{{ openshift_aws_ami }}"
+openshift_aws_master_group:
+- name: "{{ openshift_aws_clusterid }} master group"
+ group: master
+
+openshift_aws_node_groups:
+- name: "{{ openshift_aws_clusterid }} compute group"
+ group: compute
+- name: "{{ openshift_aws_clusterid }} infra group"
+ group: infra
+
+openshift_aws_created_asgs: []
+openshift_aws_current_asgs: []
+
+# these will be used during upgrade
openshift_aws_master_group_config:
# The 'master' key is always required here.
master:
@@ -139,7 +148,6 @@ openshift_aws_master_group_config:
host-type: master
sub-host-type: default
runtime: docker
- version: "{{ openshift_aws_new_version }}"
wait_for_instances: True
termination_policy: "{{ openshift_aws_node_group_termination_policy }}"
replace_all_instances: "{{ openshift_aws_node_group_replace_all_instances }}"
@@ -163,7 +171,6 @@ openshift_aws_node_group_config:
host-type: node
sub-host-type: compute
runtime: docker
- version: "{{ openshift_aws_new_version }}"
termination_policy: "{{ openshift_aws_node_group_termination_policy }}"
replace_all_instances: "{{ openshift_aws_node_group_replace_all_instances }}"
iam_role: "{{ openshift_aws_iam_role_name }}"
@@ -183,7 +190,6 @@ openshift_aws_node_group_config:
host-type: node
sub-host-type: infra
runtime: docker
- version: "{{ openshift_aws_new_version }}"
termination_policy: "{{ openshift_aws_node_group_termination_policy }}"
replace_all_instances: "{{ openshift_aws_node_group_replace_all_instances }}"
iam_role: "{{ openshift_aws_iam_role_name }}"
@@ -283,21 +289,4 @@ openshift_aws_node_run_bootstrap_startup: True
openshift_aws_node_user_data: ''
openshift_aws_node_config_namespace: openshift-node
-openshift_aws_node_groups: nodes
-
openshift_aws_masters_groups: masters,etcd,nodes
-
-# If creating extra node groups, you'll need to define all of the following
-
-# The format is the same as openshift_aws_node_group_config, but the top-level
-# key names should be different (ie, not == master or infra).
-# openshift_aws_node_group_config_extra: {}
-
-# This variable should look like openshift_aws_launch_config_security_groups
-# and contain a one-to-one mapping of top level keys that are defined in
-# openshift_aws_node_group_config_extra.
-# openshift_aws_launch_config_security_groups_extra: {}
-
-# openshift_aws_node_security_groups_extra: {}
-
-# openshift_aws_ami_map_extra: {}
diff --git a/roles/openshift_aws/filter_plugins/openshift_aws_filters.py b/roles/openshift_aws/filter_plugins/openshift_aws_filters.py
index e707abd3f..dfcb11da3 100644
--- a/roles/openshift_aws/filter_plugins/openshift_aws_filters.py
+++ b/roles/openshift_aws/filter_plugins/openshift_aws_filters.py
@@ -4,11 +4,43 @@
Custom filters for use in openshift_aws
'''
+from ansible import errors
+
class FilterModule(object):
''' Custom ansible filters for use by openshift_aws role'''
@staticmethod
+ def scale_groups_serial(scale_group_info, upgrade=False):
+ ''' This function will determine what the deployment serial should be and return it
+
+ Search through the tags and find the deployment_serial tag. Once found,
+ determine if an increment is needed during an upgrade.
+ if upgrade is true then increment the serial and return it
+ else return the serial
+ '''
+ if scale_group_info == []:
+ return 1
+
+ scale_group_info = scale_group_info[0]
+
+ if not isinstance(scale_group_info, dict):
+ raise errors.AnsibleFilterError("|filter plugin failed: Expected scale_group_info to be a dict")
+
+ serial = None
+
+ for tag in scale_group_info['tags']:
+ if tag['key'] == 'deployment_serial':
+ serial = int(tag['value'])
+ if upgrade:
+ serial += 1
+ break
+ else:
+ raise errors.AnsibleFilterError("|filter plugin failed: deployment_serial tag was not found")
+
+ return serial
+
+ @staticmethod
def scale_groups_match_capacity(scale_group_info):
''' This function will verify that the scale group instance count matches
the scale group desired capacity
@@ -38,4 +70,5 @@ class FilterModule(object):
def filters(self):
''' returns a mapping of filters to methods '''
return {'build_instance_tags': self.build_instance_tags,
- 'scale_groups_match_capacity': self.scale_groups_match_capacity}
+ 'scale_groups_match_capacity': self.scale_groups_match_capacity,
+ 'scale_groups_serial': self.scale_groups_serial}
diff --git a/roles/openshift_aws/tasks/accept_nodes.yml b/roles/openshift_aws/tasks/accept_nodes.yml
index ae320962f..c2a2cea30 100644
--- a/roles/openshift_aws/tasks/accept_nodes.yml
+++ b/roles/openshift_aws/tasks/accept_nodes.yml
@@ -1,6 +1,6 @@
---
- name: fetch masters
- ec2_remote_facts:
+ ec2_instance_facts:
region: "{{ openshift_aws_region | default('us-east-1') }}"
filters:
"{{ {'tag:kubernetes.io/cluster/' ~ openshift_aws_clusterid: openshift_aws_clusterid,
@@ -11,7 +11,7 @@
until: "'instances' in mastersout and mastersout.instances|length > 0"
- name: fetch new node instances
- ec2_remote_facts:
+ ec2_instance_facts:
region: "{{ openshift_aws_region }}"
filters:
"{{ {'tag:kubernetes.io/cluster/' ~ openshift_aws_clusterid: openshift_aws_clusterid,
@@ -22,9 +22,14 @@
delay: 3
until: "'instances' in instancesout and instancesout.instances|length > 0"
-- debug:
+- name: Dump the private dns names
+ debug:
msg: "{{ instancesout.instances|map(attribute='private_dns_name') | list }}"
+- name: Dump the master public ip address
+ debug:
+ msg: "{{ mastersout.instances[0].public_ip_address }}"
+
- name: approve nodes
oc_adm_csr:
#approve_all: True
diff --git a/roles/openshift_aws/tasks/build_node_group.yml b/roles/openshift_aws/tasks/build_node_group.yml
index 7e8e9b679..7fb617dd5 100644
--- a/roles/openshift_aws/tasks/build_node_group.yml
+++ b/roles/openshift_aws/tasks/build_node_group.yml
@@ -1,6 +1,4 @@
---
-# This task file expects l_nodes_to_build to be passed in.
-
# When openshift_aws_use_custom_ami is '' then
# we retrieve the latest build AMI.
# Then set openshift_aws_ami to the ami.
@@ -26,12 +24,41 @@
# Need to set epoch time in one place to use for launch_config and scale_group
- set_fact:
l_epoch_time: "{{ ansible_date_time.epoch }}"
+#
+# query asg's and determine if we need to create the others.
+# if we find more than 1 for each type, then exit
+- name: query all asg's for this cluster
+ ec2_asg_facts:
+ region: "{{ openshift_aws_region }}"
+ tags: "{{ {'kubernetes.io/cluster/' ~ openshift_aws_clusterid: openshift_aws_clusterid} | combine(l_node_group_config[openshift_aws_node_group.group].tags) }}"
+ register: asgs
+
+- fail:
+ msg: "Found more than 1 auto scaling group that matches the query for group: {{ openshift_aws_node_group }}"
+ when:
+ - asgs.results|length > 1
+
+- debug:
+ msg: "{{ asgs }}"
+
+- name: set the value for the deployment_serial and the current asgs
+ set_fact:
+ l_deployment_serial: "{{ openshift_aws_node_group_deployment_serial if openshift_aws_node_group_deployment_serial is defined else asgs.results | scale_groups_serial(openshift_aws_node_group_upgrade) }}"
+ openshift_aws_current_asgs: "{{ asgs.results | map(attribute='auto_scaling_group_name') | list | union(openshift_aws_current_asgs) }}"
+
+- name: dump deployment serial
+ debug:
+ msg: "Deployment serial: {{ l_deployment_serial }}"
+
+- name: dump current_asgs
+ debug:
+ msg: "openshift_aws_current_asgs: {{ openshift_aws_current_asgs }}"
- when: openshift_aws_create_iam_role
- include: iam_role.yml
+ include_tasks: iam_role.yml
- when: openshift_aws_create_launch_config
- include: launch_config.yml
+ include_tasks: launch_config.yml
- when: openshift_aws_create_scale_group
- include: scale_group.yml
+ include_tasks: scale_group.yml
diff --git a/roles/openshift_aws/tasks/iam_role.yml b/roles/openshift_aws/tasks/iam_role.yml
index d9910d938..cf3bb28fb 100644
--- a/roles/openshift_aws/tasks/iam_role.yml
+++ b/roles/openshift_aws/tasks/iam_role.yml
@@ -13,11 +13,10 @@
#####
- name: Create an iam role
iam_role:
- name: "{{ item.value.iam_role }}"
+ name: "{{ l_node_group_config[openshift_aws_node_group.group].iam_role }}"
assume_role_policy_document: "{{ lookup('file','trustpolicy.json') }}"
state: "{{ openshift_aws_iam_role_state | default('present') }}"
- when: item.value.iam_role is defined
- with_dict: "{{ l_nodes_to_build }}"
+ when: l_node_group_config[openshift_aws_node_group.group].iam_role is defined
#####
# The second part of this task file is linking the role to a policy
@@ -28,9 +27,8 @@
- name: create an iam policy
iam_policy:
iam_type: role
- iam_name: "{{ item.value.iam_role }}"
- policy_json: "{{ item.value.policy_json }}"
- policy_name: "{{ item.value.policy_name }}"
+ iam_name: "{{ l_node_group_config[openshift_aws_node_group.group].iam_role }}"
+ policy_json: "{{ l_node_group_config[openshift_aws_node_group.group].policy_json }}"
+ policy_name: "{{ l_node_group_config[openshift_aws_node_group.group].policy_name }}"
state: "{{ openshift_aws_iam_role_state | default('present') }}"
- when: item.value.iam_role is defined
- with_dict: "{{ l_nodes_to_build }}"
+ when: "'iam_role' in l_node_group_config[openshift_aws_node_group.group]"
diff --git a/roles/openshift_aws/tasks/launch_config.yml b/roles/openshift_aws/tasks/launch_config.yml
index 0dbeba5a0..6f78ee00a 100644
--- a/roles/openshift_aws/tasks/launch_config.yml
+++ b/roles/openshift_aws/tasks/launch_config.yml
@@ -1,15 +1,26 @@
---
-- fail:
- msg: "Ensure that an AMI value is defined for openshift_aws_ami or openshift_aws_launch_config_custom_image."
- when:
- - openshift_aws_ami is undefined
+- name: fetch the security groups for launch config
+ ec2_group_facts:
+ filters:
+ group-name: "{{ openshift_aws_launch_config_security_groups[openshift_aws_node_group.group] }}"
+ vpc-id: "{{ vpcout.vpcs[0].id }}"
+ region: "{{ openshift_aws_region }}"
+ register: ec2sgs
-- fail:
- msg: "Ensure that openshift_deployment_type is defined."
- when:
- - openshift_deployment_type is undefined
-
-- include: launch_config_create.yml
- with_dict: "{{ l_nodes_to_build }}"
- loop_control:
- loop_var: launch_config_item
+# Create the scale group config
+- name: Create the node scale group launch config
+ ec2_lc:
+ name: "{{ openshift_aws_node_group.name }}-{{ openshift_aws_ami_map[openshift_aws_node_group.group] | default(openshift_aws_ami) }}-{{ l_epoch_time }}"
+ region: "{{ openshift_aws_region }}"
+ image_id: "{{ openshift_aws_ami_map[openshift_aws_node_group.group] | default(openshift_aws_ami) }}"
+ instance_type: "{{ l_node_group_config[openshift_aws_node_group.group].instance_type }}"
+ security_groups: "{{ openshift_aws_launch_config_security_group_id | default(ec2sgs.security_groups | map(attribute='group_id')| list) }}"
+ instance_profile_name: "{{ l_node_group_config[openshift_aws_node_group.group].iam_role if l_node_group_config[openshift_aws_node_group.group].iam_role is defined and
+ l_node_group_config[openshift_aws_node_group.group].iam_role != '' and
+ openshift_aws_create_iam_role
+ else omit }}"
+ user_data: "{{ lookup('template', 'user_data.j2') }}"
+ key_name: "{{ openshift_aws_ssh_key_name }}"
+ ebs_optimized: False
+ volumes: "{{ l_node_group_config[openshift_aws_node_group.group].volumes }}"
+ assign_public_ip: True
diff --git a/roles/openshift_aws/tasks/launch_config_create.yml b/roles/openshift_aws/tasks/launch_config_create.yml
deleted file mode 100644
index f7f0f0953..000000000
--- a/roles/openshift_aws/tasks/launch_config_create.yml
+++ /dev/null
@@ -1,26 +0,0 @@
----
-- name: fetch the security groups for launch config
- ec2_group_facts:
- filters:
- group-name: "{{ l_launch_config_security_groups[launch_config_item.key] }}"
- vpc-id: "{{ vpcout.vpcs[0].id }}"
- region: "{{ openshift_aws_region }}"
- register: ec2sgs
-
-# Create the scale group config
-- name: Create the node scale group launch config
- ec2_lc:
- name: "{{ openshift_aws_launch_config_basename }}-{{ launch_config_item.key }}{{'-' ~ openshift_aws_new_version if openshift_aws_new_version != '' else '' }}"
- region: "{{ openshift_aws_region }}"
- image_id: "{{ l_aws_ami_map[launch_config_item.key] | default(openshift_aws_ami) }}"
- instance_type: "{{ launch_config_item.value.instance_type }}"
- security_groups: "{{ openshift_aws_launch_config_security_group_id | default(ec2sgs.security_groups | map(attribute='group_id')| list) }}"
- instance_profile_name: "{{ launch_config_item.value.iam_role if launch_config_item.value.iam_role is defined and
- launch_config_item.value.iam_role != '' and
- openshift_aws_create_iam_role
- else omit }}"
- user_data: "{{ lookup('template', 'user_data.j2') }}"
- key_name: "{{ openshift_aws_ssh_key_name }}"
- ebs_optimized: False
- volumes: "{{ launch_config_item.value.volumes }}"
- assign_public_ip: True
diff --git a/roles/openshift_aws/tasks/provision.yml b/roles/openshift_aws/tasks/provision.yml
index 91538ed5c..786a2e4cf 100644
--- a/roles/openshift_aws/tasks/provision.yml
+++ b/roles/openshift_aws/tasks/provision.yml
@@ -1,16 +1,16 @@
---
- when: openshift_aws_create_iam_cert | bool
name: create the iam_cert for elb certificate
- include: iam_cert.yml
+ include_tasks: iam_cert.yml
- when: openshift_aws_create_s3 | bool
name: create s3 bucket for registry
- include: s3.yml
+ include_tasks: s3.yml
-- include: vpc_and_subnet_id.yml
+- include_tasks: vpc_and_subnet_id.yml
- name: create elbs
- include: elb.yml
+ include_tasks: elb.yml
with_dict: "{{ openshift_aws_elb_dict }}"
vars:
l_elb_security_groups: "{{ openshift_aws_elb_security_groups }}"
@@ -19,14 +19,15 @@
loop_var: l_elb_dict_item
- name: include scale group creation for master
- include: build_node_group.yml
+ include_tasks: build_node_group.yml
+ with_items: "{{ openshift_aws_master_group }}"
vars:
- l_nodes_to_build: "{{ openshift_aws_master_group_config }}"
- l_launch_config_security_groups: "{{ openshift_aws_launch_config_security_groups }}"
- l_aws_ami_map: "{{ openshift_aws_ami_map }}"
+ l_node_group_config: "{{ openshift_aws_master_group_config }}"
+ loop_control:
+ loop_var: openshift_aws_node_group
- name: fetch newly created instances
- ec2_remote_facts:
+ ec2_instance_facts:
region: "{{ openshift_aws_region }}"
filters:
"tag:clusterid": "{{ openshift_aws_clusterid }}"
diff --git a/roles/openshift_aws/tasks/provision_instance.yml b/roles/openshift_aws/tasks/provision_instance.yml
index 3349acb7a..696b323c0 100644
--- a/roles/openshift_aws/tasks/provision_instance.yml
+++ b/roles/openshift_aws/tasks/provision_instance.yml
@@ -3,7 +3,7 @@
set_fact:
openshift_node_bootstrap: True
-- include: vpc_and_subnet_id.yml
+- include_tasks: vpc_and_subnet_id.yml
- name: create instance for ami creation
ec2:
@@ -27,7 +27,7 @@
Name: "{{ openshift_aws_base_ami_name }}"
- name: fetch newly created instances
- ec2_remote_facts:
+ ec2_instance_facts:
region: "{{ openshift_aws_region }}"
filters:
"tag:Name": "{{ openshift_aws_base_ami_name }}"
diff --git a/roles/openshift_aws/tasks/provision_nodes.yml b/roles/openshift_aws/tasks/provision_nodes.yml
index 3e84666a2..d82f18574 100644
--- a/roles/openshift_aws/tasks/provision_nodes.yml
+++ b/roles/openshift_aws/tasks/provision_nodes.yml
@@ -3,7 +3,7 @@
# bootstrap should be created on first master
# need to fetch it and shove it into cloud data
- name: fetch master instances
- ec2_remote_facts:
+ ec2_instance_facts:
region: "{{ openshift_aws_region }}"
filters:
"tag:clusterid": "{{ openshift_aws_clusterid }}"
@@ -27,24 +27,19 @@
set_fact:
openshift_aws_launch_config_bootstrap_token: "{{ bootstrap['content'] | b64decode }}"
-- include: vpc_and_subnet_id.yml
+- include_tasks: vpc_and_subnet_id.yml
- name: include build compute and infra node groups
- include: build_node_group.yml
+ include_tasks: build_node_group.yml
+ with_items: "{{ openshift_aws_node_groups }}"
vars:
- l_nodes_to_build: "{{ openshift_aws_node_group_config }}"
- l_launch_config_security_groups: "{{ openshift_aws_launch_config_security_groups }}"
- l_aws_ami_map: "{{ openshift_aws_ami_map }}"
-
-- name: include build node group for extra nodes
- include: build_node_group.yml
- when: openshift_aws_node_group_config_extra is defined
- vars:
- l_nodes_to_build: "{{ openshift_aws_node_group_config_extra | default({}) }}"
- l_launch_config_security_groups: "{{ openshift_aws_launch_config_security_groups_extra }}"
- l_aws_ami_map: "{{ openshift_aws_ami_map_extra }}"
+ l_node_group_config: "{{ openshift_aws_node_group_config }}"
+ loop_control:
+ loop_var: openshift_aws_node_group
# instances aren't scaling fast enough here, we need to wait for them
- when: openshift_aws_wait_for_ssh | bool
name: wait for our new nodes to come up
- include: wait_for_groups.yml
+ include_tasks: wait_for_groups.yml
+ vars:
+ created_asgs: "{{ openshift_aws_created_asgs }}"
diff --git a/roles/openshift_aws/tasks/remove_scale_group.yml b/roles/openshift_aws/tasks/remove_scale_group.yml
index 55d1af2b5..a01cde294 100644
--- a/roles/openshift_aws/tasks/remove_scale_group.yml
+++ b/roles/openshift_aws/tasks/remove_scale_group.yml
@@ -1,10 +1,13 @@
---
+# FIGURE OUT HOW TO REMOVE SCALE GROUPS
+# use openshift_aws_current_asgs??
- name: fetch the scale groups
ec2_asg_facts:
region: "{{ openshift_aws_region }}"
+ name: "^{{ item }}$"
tags:
- "{{ {'kubernetes.io/cluster/' ~ openshift_aws_clusterid: openshift_aws_clusterid,
- 'version': openshift_aws_current_version} }}"
+ "{{ {'kubernetes.io/cluster/' ~ openshift_aws_clusterid: openshift_aws_clusterid} }}"
+ with_items: "{{ openshift_aws_current_asgs if openshift_aws_current_asgs != [] else openshift_aws_asgs_to_remove }}"
register: qasg
- name: remove non-master scale groups
@@ -14,7 +17,7 @@
name: "{{ item.auto_scaling_group_name }}"
when: "'master' not in item.auto_scaling_group_name"
register: asg_results
- with_items: "{{ qasg.results }}"
+ with_items: "{{ qasg | json_query('results[*]') | sum(attribute='results', start=[]) }}"
async: 600
poll: 0
diff --git a/roles/openshift_aws/tasks/s3.yml b/roles/openshift_aws/tasks/s3.yml
index 9cf37c840..ba70bcff6 100644
--- a/roles/openshift_aws/tasks/s3.yml
+++ b/roles/openshift_aws/tasks/s3.yml
@@ -1,6 +1,6 @@
---
- name: Create an s3 bucket
- s3:
+ aws_s3:
bucket: "{{ openshift_aws_s3_bucket_name }}"
mode: "{{ openshift_aws_s3_mode }}"
region: "{{ openshift_aws_region }}"
diff --git a/roles/openshift_aws/tasks/scale_group.yml b/roles/openshift_aws/tasks/scale_group.yml
index 30df7545d..3632f7ce9 100644
--- a/roles/openshift_aws/tasks/scale_group.yml
+++ b/roles/openshift_aws/tasks/scale_group.yml
@@ -1,20 +1,30 @@
---
+- name: set node group name
+ set_fact:
+ l_node_group_name: "{{ openshift_aws_node_group.name }} {{ l_deployment_serial }}"
+
- name: Create the scale group
ec2_asg:
- name: "{{ openshift_aws_scale_group_basename }} {{ item.key }}"
- launch_config_name: "{{ openshift_aws_launch_config_basename }}-{{ item.key }}{{ '-' ~ openshift_aws_new_version if openshift_aws_new_version != '' else '' }}"
- health_check_period: "{{ item.value.health_check.period }}"
- health_check_type: "{{ item.value.health_check.type }}"
- min_size: "{{ item.value.min_size }}"
- max_size: "{{ item.value.max_size }}"
- desired_capacity: "{{ item.value.desired_size }}"
+ name: "{{ l_node_group_name }}"
+ launch_config_name: "{{ openshift_aws_node_group.name }}-{{ openshift_aws_ami_map[openshift_aws_node_group.group] | default(openshift_aws_ami) }}-{{ l_epoch_time }}"
+ health_check_period: "{{ l_node_group_config[openshift_aws_node_group.group].health_check.period }}"
+ health_check_type: "{{ l_node_group_config[openshift_aws_node_group.group].health_check.type }}"
+ min_size: "{{ l_node_group_config[openshift_aws_node_group.group].min_size }}"
+ max_size: "{{ l_node_group_config[openshift_aws_node_group.group].max_size }}"
+ desired_capacity: "{{ l_node_group_config[openshift_aws_node_group.group].desired_size }}"
region: "{{ openshift_aws_region }}"
- termination_policies: "{{ item.value.termination_policy if 'termination_policy' in item.value else omit }}"
- load_balancers: "{{ item.value.elbs if 'elbs' in item.value else omit }}"
- wait_for_instances: "{{ item.value.wait_for_instances | default(False)}}"
+ termination_policies: "{{ l_node_group_config[openshift_aws_node_group.group].termination_policy if 'termination_policy' in l_node_group_config[openshift_aws_node_group.group] else omit }}"
+ load_balancers: "{{ l_node_group_config[openshift_aws_node_group.group].elbs if 'elbs' in l_node_group_config[openshift_aws_node_group.group] else omit }}"
+ wait_for_instances: "{{ l_node_group_config[openshift_aws_node_group.group].wait_for_instances | default(False)}}"
vpc_zone_identifier: "{{ subnetout.subnets[0].id }}"
replace_instances: "{{ openshift_aws_node_group_replace_instances if openshift_aws_node_group_replace_instances != [] else omit }}"
- replace_all_instances: "{{ omit if openshift_aws_node_group_replace_instances != [] else (item.value.replace_all_instances | default(omit)) }}"
+ replace_all_instances: "{{ omit if openshift_aws_node_group_replace_instances != []
+ else (l_node_group_config[openshift_aws_node_group.group].replace_all_instances | default(omit)) }}"
tags:
- - "{{ openshift_aws_node_group_config_tags | combine(item.value.tags) }}"
- with_dict: "{{ l_nodes_to_build }}"
+ - "{{ openshift_aws_node_group_config_tags
+ | combine(l_node_group_config[openshift_aws_node_group.group].tags)
+ | combine({'deployment_serial': l_deployment_serial, 'ami': openshift_aws_ami_map[openshift_aws_node_group.group] | default(openshift_aws_ami)}) }}"
+
+- name: append the asg name to the openshift_aws_created_asgs fact
+ set_fact:
+ openshift_aws_created_asgs: "{{ [l_node_group_name] | union(openshift_aws_created_asgs) | list }}"
diff --git a/roles/openshift_aws/tasks/seal_ami.yml b/roles/openshift_aws/tasks/seal_ami.yml
index 0cb749dcc..74877d5c7 100644
--- a/roles/openshift_aws/tasks/seal_ami.yml
+++ b/roles/openshift_aws/tasks/seal_ami.yml
@@ -1,6 +1,6 @@
---
- name: fetch newly created instances
- ec2_remote_facts:
+ ec2_instance_facts:
region: "{{ openshift_aws_region }}"
filters:
"tag:Name": "{{ openshift_aws_base_ami_name }}"
@@ -12,7 +12,7 @@
- name: bundle ami
ec2_ami:
- instance_id: "{{ instancesout.instances.0.id }}"
+ instance_id: "{{ instancesout.instances.0.instance_id }}"
region: "{{ openshift_aws_region }}"
state: present
description: "This was provisioned {{ ansible_date_time.iso8601 }}"
@@ -31,7 +31,7 @@
source-ami: "{{ amioutput.image_id }}"
- name: copy the ami for encrypted disks
- include: ami_copy.yml
+ include_tasks: ami_copy.yml
vars:
openshift_aws_ami_copy_name: "{{ openshift_aws_ami_name }}-encrypted"
openshift_aws_ami_copy_src_ami: "{{ amioutput.image_id }}"
@@ -46,4 +46,4 @@
ec2:
state: absent
region: "{{ openshift_aws_region }}"
- instance_ids: "{{ instancesout.instances.0.id }}"
+ instance_ids: "{{ instancesout.instances.0.instance_id }}"
diff --git a/roles/openshift_aws/tasks/security_group.yml b/roles/openshift_aws/tasks/security_group.yml
index 5cc7ae537..0568a0190 100644
--- a/roles/openshift_aws/tasks/security_group.yml
+++ b/roles/openshift_aws/tasks/security_group.yml
@@ -6,11 +6,27 @@
"tag:Name": "{{ openshift_aws_clusterid }}"
register: vpcout
-- include: security_group_create.yml
- vars:
- l_security_groups: "{{ openshift_aws_node_security_groups }}"
+- name: create the node group sgs
+ oo_ec2_group:
+ name: "{{ item.value.name}}"
+ description: "{{ item.value.desc }}"
+ rules: "{{ item.value.rules if 'rules' in item.value else [] }}"
+ region: "{{ openshift_aws_region }}"
+ vpc_id: "{{ vpcout.vpcs[0].id }}"
+ with_dict: "{{ openshift_aws_node_security_groups }}"
+
+- name: create the k8s sgs for the node group
+ oo_ec2_group:
+ name: "{{ item.value.name }}_k8s"
+ description: "{{ item.value.desc }} for k8s"
+ region: "{{ openshift_aws_region }}"
+ vpc_id: "{{ vpcout.vpcs[0].id }}"
+ with_dict: "{{ openshift_aws_node_security_groups }}"
+ register: k8s_sg_create
-- include: security_group_create.yml
- when: openshift_aws_node_security_groups_extra is defined
- vars:
- l_security_groups: "{{ openshift_aws_node_security_groups_extra | default({}) }}"
+- name: tag sg groups with proper tags
+ ec2_tag:
+ tags: "{{ openshift_aws_security_groups_tags }}"
+ resource: "{{ item.group_id }}"
+ region: "{{ openshift_aws_region }}"
+ with_items: "{{ k8s_sg_create.results }}"
diff --git a/roles/openshift_aws/tasks/security_group_create.yml b/roles/openshift_aws/tasks/security_group_create.yml
deleted file mode 100644
index ef6060555..000000000
--- a/roles/openshift_aws/tasks/security_group_create.yml
+++ /dev/null
@@ -1,25 +0,0 @@
----
-- name: create the node group sgs
- ec2_group:
- name: "{{ item.value.name}}"
- description: "{{ item.value.desc }}"
- rules: "{{ item.value.rules if 'rules' in item.value else [] }}"
- region: "{{ openshift_aws_region }}"
- vpc_id: "{{ vpcout.vpcs[0].id }}"
- with_dict: "{{ l_security_groups }}"
-
-- name: create the k8s sgs for the node group
- ec2_group:
- name: "{{ item.value.name }}_k8s"
- description: "{{ item.value.desc }} for k8s"
- region: "{{ openshift_aws_region }}"
- vpc_id: "{{ vpcout.vpcs[0].id }}"
- with_dict: "{{ l_security_groups }}"
- register: k8s_sg_create
-
-- name: tag sg groups with proper tags
- ec2_tag:
- tags: "{{ openshift_aws_security_groups_tags }}"
- resource: "{{ item.group_id }}"
- region: "{{ openshift_aws_region }}"
- with_items: "{{ k8s_sg_create.results }}"
diff --git a/roles/openshift_aws/tasks/setup_master_group.yml b/roles/openshift_aws/tasks/setup_master_group.yml
index 05b68f460..700917ef4 100644
--- a/roles/openshift_aws/tasks/setup_master_group.yml
+++ b/roles/openshift_aws/tasks/setup_master_group.yml
@@ -8,7 +8,7 @@
msg: "openshift_aws_region={{ openshift_aws_region }}"
- name: fetch newly created instances
- ec2_remote_facts:
+ ec2_instance_facts:
region: "{{ openshift_aws_region }}"
filters:
"tag:clusterid": "{{ openshift_aws_clusterid }}"
@@ -19,11 +19,13 @@
delay: 3
until: instancesout.instances|length > 0
+- debug: var=instancesout
+
- name: add new master to masters group
add_host:
groups: "{{ openshift_aws_masters_groups }}"
name: "{{ item.public_dns_name }}"
- hostname: "{{ openshift_aws_clusterid }}-master-{{ item.id[:-5] }}"
+ hostname: "{{ openshift_aws_clusterid }}-master-{{ item.instance_id[:-5] }}"
with_items: "{{ instancesout.instances }}"
- name: wait for ssh to become available
diff --git a/roles/openshift_aws/tasks/setup_scale_group_facts.yml b/roles/openshift_aws/tasks/setup_scale_group_facts.yml
index d65fdc2de..14c5246c9 100644
--- a/roles/openshift_aws/tasks/setup_scale_group_facts.yml
+++ b/roles/openshift_aws/tasks/setup_scale_group_facts.yml
@@ -1,11 +1,15 @@
---
-- name: group scale group nodes
- ec2_remote_facts:
+- name: fetch all created instances
+ ec2_instance_facts:
region: "{{ openshift_aws_region }}"
filters:
- "{{ {'tag:kubernetes.io/cluster/' ~ openshift_aws_clusterid: openshift_aws_clusterid }}}"
+ "{{ {'tag:kubernetes.io/cluster/' ~ openshift_aws_clusterid: openshift_aws_clusterid,
+ 'instance-state-name': 'running'} }}"
register: qinstances
+# The building of new and current groups is dependent of having a list of the current asgs and the created ones
+# that can be found in the variables: openshift_aws_created_asgs, openshift_aws_current_asgs. If these do not exist, we cannot determine which hosts are
+# new and which hosts are current.
- name: Build new node group
add_host:
groups: oo_sg_new_nodes
@@ -13,10 +17,16 @@
name: "{{ item.public_dns_name }}"
hostname: "{{ item.public_dns_name }}"
when:
- - (item.tags.version | default(False)) == openshift_aws_new_version
+ - openshift_aws_created_asgs != []
+ - "'aws:autoscaling:groupName' in item.tags"
+ - item.tags['aws:autoscaling:groupName'] in openshift_aws_created_asgs
- "'node' in item.tags['host-type']"
with_items: "{{ qinstances.instances }}"
+- name: dump openshift_aws_current_asgs
+ debug:
+ msg: "{{ openshift_aws_current_asgs }}"
+
- name: Build current node group
add_host:
groups: oo_sg_current_nodes
@@ -24,7 +34,9 @@
name: "{{ item.public_dns_name }}"
hostname: "{{ item.public_dns_name }}"
when:
- - (item.tags.version | default('')) == openshift_aws_current_version
+ - openshift_aws_current_asgs != []
+ - "'aws:autoscaling:groupName' in item.tags"
+ - item.tags['aws:autoscaling:groupName'] in openshift_aws_current_asgs
- "'node' in item.tags['host-type']"
with_items: "{{ qinstances.instances }}"
diff --git a/roles/openshift_aws/tasks/upgrade_node_group.yml b/roles/openshift_aws/tasks/upgrade_node_group.yml
index d7851d887..4f4730dd6 100644
--- a/roles/openshift_aws/tasks/upgrade_node_group.yml
+++ b/roles/openshift_aws/tasks/upgrade_node_group.yml
@@ -1,16 +1,26 @@
---
-- fail:
- msg: 'Please ensure the current_version and new_version variables are not the same.'
+- include_tasks: provision_nodes.yml
+ vars:
+ openshift_aws_node_group_upgrade: True
when:
- - openshift_aws_current_version == openshift_aws_new_version
+ - openshift_aws_upgrade_provision_nodes | default(True)
+
+- debug: var=openshift_aws_current_asgs
+- debug: var=openshift_aws_created_asgs
-- include: provision_nodes.yml
+- name: fail if asg variables aren't set
+ fail:
+ msg: "Please ensure that openshift_aws_created_asgs and openshift_aws_current_asgs are defined."
+ when:
+ - openshift_aws_created_asgs == []
+ - openshift_aws_current_asgs == []
-- include: accept_nodes.yml
+- include_tasks: accept_nodes.yml
+ when: openshift_aws_upgrade_accept_nodes | default(True)
-- include: setup_scale_group_facts.yml
+- include_tasks: setup_scale_group_facts.yml
-- include: setup_master_group.yml
+- include_tasks: setup_master_group.yml
vars:
# we do not set etcd here as its limited to 1 or 3
openshift_aws_masters_groups: masters,nodes
diff --git a/roles/openshift_aws/tasks/wait_for_groups.yml b/roles/openshift_aws/tasks/wait_for_groups.yml
index 9f1a68a2a..1f4ef3e1c 100644
--- a/roles/openshift_aws/tasks/wait_for_groups.yml
+++ b/roles/openshift_aws/tasks/wait_for_groups.yml
@@ -1,31 +1,41 @@
---
# The idea here is to wait until all scale groups are at
# their desired capacity before continuing.
-- name: fetch the scale groups
+# This is accomplished with a custom filter_plugin and until clause
+- name: "fetch the scale groups"
ec2_asg_facts:
region: "{{ openshift_aws_region }}"
tags:
- "{{ {'kubernetes.io/cluster/' ~ openshift_aws_clusterid: openshift_aws_clusterid} }}"
+ "{{ {'kubernetes.io/cluster/' ~ openshift_aws_clusterid: openshift_aws_clusterid } }}"
register: qasg
- until: qasg.results | scale_groups_match_capacity | bool
+ until: qasg | json_query('results[*]') | scale_groups_match_capacity | bool
delay: 10
retries: 60
+- debug: var=openshift_aws_created_asgs
+
+# how do we gaurantee the instances are up?
- name: fetch newly created instances
- ec2_remote_facts:
+ ec2_instance_facts:
region: "{{ openshift_aws_region }}"
filters:
"{{ {'tag:kubernetes.io/cluster/' ~ openshift_aws_clusterid: openshift_aws_clusterid,
- 'tag:version': openshift_aws_new_version} }}"
+ 'tag:aws:autoscaling:groupName': item,
+ 'instance-state-name': 'running'} }}"
+ with_items: "{{ openshift_aws_created_asgs if openshift_aws_created_asgs != [] else qasg | sum(attribute='results', start=[]) }}"
register: instancesout
until: instancesout.instances|length > 0
delay: 5
retries: 60
+- name: dump instances
+ debug:
+ msg: "{{ instancesout.results | sum(attribute='instances', start=[]) }}"
+
- name: wait for ssh to become available
wait_for:
port: 22
host: "{{ item.public_ip_address }}"
timeout: 300
search_regex: OpenSSH
- with_items: "{{ instancesout.instances }}"
+ with_items: "{{ instancesout.results | sum(attribute='instances', start=[]) }}"
diff --git a/roles/openshift_aws/templates/user_data.j2 b/roles/openshift_aws/templates/user_data.j2
index fe0fe83d4..bda1334cd 100644
--- a/roles/openshift_aws/templates/user_data.j2
+++ b/roles/openshift_aws/templates/user_data.j2
@@ -7,8 +7,8 @@ write_files:
owner: 'root:root'
permissions: '0640'
content: |
- openshift_group_type: {{ launch_config_item.key }}
-{% if launch_config_item.key != 'master' %}
+ openshift_group_type: {{ openshift_aws_node_group.group }}
+{% if openshift_aws_node_group.group != 'master' %}
- path: /etc/origin/node/bootstrap.kubeconfig
owner: 'root:root'
permissions: '0640'
@@ -19,7 +19,7 @@ runcmd:
{% if openshift_aws_node_run_bootstrap_startup %}
- [ ansible-playbook, /root/openshift_bootstrap/bootstrap.yml]
{% endif %}
-{% if launch_config_item.key != 'master' %}
+{% if openshift_aws_node_group.group != 'master' %}
- [ systemctl, restart, NetworkManager]
- [ systemctl, enable, {% if openshift_deployment_type == 'openshift-enterprise' %}atomic-openshift{% else %}origin{% endif %}-node]
- [ systemctl, start, {% if openshift_deployment_type == 'openshift-enterprise' %}atomic-openshift{% else %}origin{% endif %}-node]
diff --git a/roles/openshift_builddefaults/tasks/main.yml b/roles/openshift_builddefaults/tasks/main.yml
index e0b51eee0..612b6522d 100644
--- a/roles/openshift_builddefaults/tasks/main.yml
+++ b/roles/openshift_builddefaults/tasks/main.yml
@@ -4,11 +4,6 @@
role: builddefaults
# TODO: add ability to define builddefaults env vars sort of like this
# may need to move the config generation to a filter however.
- # openshift_env: "{{ hostvars
- # | oo_merge_hostvars(vars, inventory_hostname)
- # | oo_openshift_env }}"
- # openshift_env_structures:
- # - 'openshift.builddefaults.env.*'
local_facts:
http_proxy: "{{ openshift_builddefaults_http_proxy | default(None) }}"
https_proxy: "{{ openshift_builddefaults_https_proxy | default(None) }}"
diff --git a/roles/openshift_ca/meta/main.yml b/roles/openshift_ca/meta/main.yml
index f8b784a63..81b49ce60 100644
--- a/roles/openshift_ca/meta/main.yml
+++ b/roles/openshift_ca/meta/main.yml
@@ -14,3 +14,4 @@ galaxy_info:
- system
dependencies:
- role: openshift_cli
+- role: openshift_facts
diff --git a/roles/openshift_ca/tasks/main.yml b/roles/openshift_ca/tasks/main.yml
index 05e0a1352..eb00f13db 100644
--- a/roles/openshift_ca/tasks/main.yml
+++ b/roles/openshift_ca/tasks/main.yml
@@ -9,7 +9,7 @@
- name: Install the base package for admin tooling
package:
- name: "{{ openshift.common.service_type }}{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }}"
+ name: "{{ openshift_service_type }}{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }}"
state: present
when: not openshift.common.is_containerized | bool
register: install_result
diff --git a/roles/openshift_cli/library/openshift_container_binary_sync.py b/roles/openshift_cli/library/openshift_container_binary_sync.py
index 08045794a..440b8ec28 100644
--- a/roles/openshift_cli/library/openshift_container_binary_sync.py
+++ b/roles/openshift_cli/library/openshift_container_binary_sync.py
@@ -27,7 +27,7 @@ class BinarySyncError(Exception):
# pylint: disable=too-few-public-methods,too-many-instance-attributes
class BinarySyncer(object):
"""
- Syncs the openshift, oc, oadm, and kubectl binaries/symlinks out of
+ Syncs the openshift, oc, and kubectl binaries/symlinks out of
a container onto the host system.
"""
@@ -108,7 +108,10 @@ class BinarySyncer(object):
# Ensure correct symlinks created:
self._sync_symlink('kubectl', 'openshift')
- self._sync_symlink('oadm', 'openshift')
+
+ # Remove old oadm binary
+ if os.path.exists(os.path.join(self.bin_dir, 'oadm')):
+ os.remove(os.path.join(self.bin_dir, 'oadm'))
def _sync_symlink(self, binary_name, link_to):
""" Ensure the given binary name exists and links to the expected binary. """
diff --git a/roles/openshift_cli/tasks/main.yml b/roles/openshift_cli/tasks/main.yml
index 140c6ea26..888aa8f0c 100644
--- a/roles/openshift_cli/tasks/main.yml
+++ b/roles/openshift_cli/tasks/main.yml
@@ -1,6 +1,6 @@
---
- name: Install clients
- package: name={{ openshift.common.service_type }}-clients state=present
+ package: name={{ openshift_service_type }}-clients{{ openshift_pkg_version | default('') }} state=present
when: not openshift.common.is_containerized | bool
register: result
until: result | success
diff --git a/roles/openshift_cluster_autoscaler/tasks/main.yml b/roles/openshift_cluster_autoscaler/tasks/main.yml
index 173dcf044..ca7dfb885 100644
--- a/roles/openshift_cluster_autoscaler/tasks/main.yml
+++ b/roles/openshift_cluster_autoscaler/tasks/main.yml
@@ -31,7 +31,7 @@
type: role
name: "{{ openshift_cluster_autoscaler_name }}"
-- include: aws.yml
+- include_tasks: aws.yml
when: openshift_cluster_autoscaler_cloud_provider == 'aws'
- name: create the policies
diff --git a/roles/openshift_docker_gc/templates/dockergc-ds.yaml.j2 b/roles/openshift_docker_gc/templates/dockergc-ds.yaml.j2
index 53e8b448b..3d51abc52 100644
--- a/roles/openshift_docker_gc/templates/dockergc-ds.yaml.j2
+++ b/roles/openshift_docker_gc/templates/dockergc-ds.yaml.j2
@@ -5,7 +5,7 @@ items:
kind: ServiceAccount
metadata:
name: dockergc
- # You must grant privileged via: oadm policy add-scc-to-user -z dockergc privileged
+ # You must grant privileged via: oc adm policy add-scc-to-user -z dockergc privileged
# in order for the dockergc to access the docker socket and root directory
- apiVersion: extensions/v1beta1
kind: DaemonSet
diff --git a/roles/openshift_examples/examples-sync.sh b/roles/openshift_examples/examples-sync.sh
index 1d5fba990..68a0e8857 100755
--- a/roles/openshift_examples/examples-sync.sh
+++ b/roles/openshift_examples/examples-sync.sh
@@ -5,7 +5,7 @@
#
# This script should be run from openshift-ansible/roles/openshift_examples
-XPAAS_VERSION=ose-v1.4.6
+XPAAS_VERSION=ose-v1.4.7
ORIGIN_VERSION=${1:-v3.7}
RHAMP_TAG=2.0.0.GA
EXAMPLES_BASE=$(pwd)/files/examples/${ORIGIN_VERSION}
diff --git a/roles/openshift_examples/files/examples/latest b/roles/openshift_examples/files/examples/latest
new file mode 120000
index 000000000..8cad94b63
--- /dev/null
+++ b/roles/openshift_examples/files/examples/latest
@@ -0,0 +1 @@
+v3.9 \ No newline at end of file
diff --git a/roles/openshift_examples/files/examples/v3.7/v3.8 b/roles/openshift_examples/files/examples/v3.7/v3.8
deleted file mode 120000
index 8ddcf661c..000000000
--- a/roles/openshift_examples/files/examples/v3.7/v3.8
+++ /dev/null
@@ -1 +0,0 @@
-v3.8 \ No newline at end of file
diff --git a/roles/openshift_examples/files/examples/v3.8/db-templates/mariadb-persistent-template.json b/roles/openshift_examples/files/examples/v3.8/db-templates/mariadb-persistent-template.json
index 217ef11dd..92be8f42e 100644
--- a/roles/openshift_examples/files/examples/v3.8/db-templates/mariadb-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v3.8/db-templates/mariadb-persistent-template.json
@@ -4,7 +4,7 @@
"metadata": {
"name": "mariadb-persistent",
"annotations": {
- "openshift.io/display-name": "MariaDB (Persistent)",
+ "openshift.io/display-name": "MariaDB",
"description": "MariaDB database service, with persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mariadb-container/blob/master/10.1/README.md.\n\nNOTE: Scaling to more than one replica is not supported. You must have persistent volumes available in your cluster to use this template.",
"iconClass": "icon-mariadb",
"tags": "database,mariadb",
diff --git a/roles/openshift_examples/files/examples/v3.8/db-templates/mongodb-persistent-template.json b/roles/openshift_examples/files/examples/v3.8/db-templates/mongodb-persistent-template.json
index 97e4128a4..4e3e64d48 100644
--- a/roles/openshift_examples/files/examples/v3.8/db-templates/mongodb-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v3.8/db-templates/mongodb-persistent-template.json
@@ -4,7 +4,7 @@
"metadata": {
"name": "mongodb-persistent",
"annotations": {
- "openshift.io/display-name": "MongoDB (Persistent)",
+ "openshift.io/display-name": "MongoDB",
"description": "MongoDB database service, with persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mongodb-container/blob/master/3.2/README.md.\n\nNOTE: Scaling to more than one replica is not supported. You must have persistent volumes available in your cluster to use this template.",
"iconClass": "icon-mongodb",
"tags": "database,mongodb",
diff --git a/roles/openshift_examples/files/examples/v3.8/db-templates/mysql-persistent-template.json b/roles/openshift_examples/files/examples/v3.8/db-templates/mysql-persistent-template.json
index 48ac114fd..6ac80f3a0 100644
--- a/roles/openshift_examples/files/examples/v3.8/db-templates/mysql-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v3.8/db-templates/mysql-persistent-template.json
@@ -4,7 +4,7 @@
"metadata": {
"name": "mysql-persistent",
"annotations": {
- "openshift.io/display-name": "MySQL (Persistent)",
+ "openshift.io/display-name": "MySQL",
"description": "MySQL database service, with persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mysql-container/blob/master/5.7/README.md.\n\nNOTE: Scaling to more than one replica is not supported. You must have persistent volumes available in your cluster to use this template.",
"iconClass": "icon-mysql-database",
"tags": "database,mysql",
diff --git a/roles/openshift_examples/files/examples/v3.8/db-templates/postgresql-persistent-template.json b/roles/openshift_examples/files/examples/v3.8/db-templates/postgresql-persistent-template.json
index 8a2d23907..190509112 100644
--- a/roles/openshift_examples/files/examples/v3.8/db-templates/postgresql-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v3.8/db-templates/postgresql-persistent-template.json
@@ -4,7 +4,7 @@
"metadata": {
"name": "postgresql-persistent",
"annotations": {
- "openshift.io/display-name": "PostgreSQL (Persistent)",
+ "openshift.io/display-name": "PostgreSQL",
"description": "PostgreSQL database service, with persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/blob/master/9.5.\n\nNOTE: Scaling to more than one replica is not supported. You must have persistent volumes available in your cluster to use this template.",
"iconClass": "icon-postgresql",
"tags": "database,postgresql",
diff --git a/roles/openshift_examples/files/examples/v3.8/db-templates/redis-persistent-template.json b/roles/openshift_examples/files/examples/v3.8/db-templates/redis-persistent-template.json
index e0e0a88d5..d1103d3af 100644
--- a/roles/openshift_examples/files/examples/v3.8/db-templates/redis-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v3.8/db-templates/redis-persistent-template.json
@@ -4,7 +4,7 @@
"metadata": {
"name": "redis-persistent",
"annotations": {
- "openshift.io/display-name": "Redis (Persistent)",
+ "openshift.io/display-name": "Redis",
"description": "Redis in-memory data structure store, with persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/redis-container/blob/master/3.2.\n\nNOTE: You must have persistent volumes available in your cluster to use this template.",
"iconClass": "icon-redis",
"tags": "database,redis",
diff --git a/roles/openshift_examples/files/examples/v3.8/image-streams/image-streams-centos7.json b/roles/openshift_examples/files/examples/v3.8/image-streams/image-streams-centos7.json
index e7af160d9..0aa586061 100644
--- a/roles/openshift_examples/files/examples/v3.8/image-streams/image-streams-centos7.json
+++ b/roles/openshift_examples/files/examples/v3.8/image-streams/image-streams-centos7.json
@@ -407,7 +407,7 @@
"annotations": {
"openshift.io/display-name": "Python (Latest)",
"openshift.io/provider-display-name": "Red Hat, Inc.",
- "description": "Build and run Python applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-python-container/blob/master/3.5/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Python available on OpenShift, including major versions updates.",
+ "description": "Build and run Python applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-python-container/blob/master/3.6/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Python available on OpenShift, including major versions updates.",
"iconClass": "icon-python",
"tags": "builder,python",
"supports":"python",
@@ -415,7 +415,7 @@
},
"from": {
"kind": "ImageStreamTag",
- "name": "3.5"
+ "name": "3.6"
}
},
{
@@ -485,6 +485,23 @@
"kind": "DockerImage",
"name": "centos/python-35-centos7:latest"
}
+ },
+ {
+ "name": "3.6",
+ "annotations": {
+ "openshift.io/display-name": "Python 3.6",
+ "openshift.io/provider-display-name": "Red Hat, Inc.",
+ "description": "Build and run Python 3.6 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-python-container/blob/master/3.6/README.md.",
+ "iconClass": "icon-python",
+ "tags": "builder,python",
+ "supports":"python:3.6,python",
+ "version": "3.6",
+ "sampleRepo": "https://github.com/openshift/django-ex.git"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "centos/python-36-centos7:latest"
+ }
}
]
}
diff --git a/roles/openshift_examples/files/examples/v3.8/image-streams/image-streams-rhel7.json b/roles/openshift_examples/files/examples/v3.8/image-streams/image-streams-rhel7.json
index 2b082fc75..7ab9f4e59 100644
--- a/roles/openshift_examples/files/examples/v3.8/image-streams/image-streams-rhel7.json
+++ b/roles/openshift_examples/files/examples/v3.8/image-streams/image-streams-rhel7.json
@@ -407,7 +407,7 @@
"annotations": {
"openshift.io/display-name": "Python (Latest)",
"openshift.io/provider-display-name": "Red Hat, Inc.",
- "description": "Build and run Python applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-python-container/blob/master/3.5/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Python available on OpenShift, including major versions updates.",
+ "description": "Build and run Python applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-python-container/blob/master/3.6/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Python available on OpenShift, including major versions updates.",
"iconClass": "icon-python",
"tags": "builder,python",
"supports":"python",
@@ -415,7 +415,7 @@
},
"from": {
"kind": "ImageStreamTag",
- "name": "3.5"
+ "name": "3.6"
}
},
{
@@ -485,6 +485,23 @@
"kind": "DockerImage",
"name": "registry.access.redhat.com/rhscl/python-35-rhel7:latest"
}
+ },
+ {
+ "name": "3.6",
+ "annotations": {
+ "openshift.io/display-name": "Python 3.6",
+ "openshift.io/provider-display-name": "Red Hat, Inc.",
+ "description": "Build and run Python 3.6 applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-python-container/blob/master/3.6/README.md.",
+ "iconClass": "icon-python",
+ "tags": "builder,python",
+ "supports":"python:3.6,python",
+ "version": "3.6",
+ "sampleRepo": "https://github.com/openshift/django-ex.git"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/rhscl/python-36-rhel7:latest"
+ }
}
]
}
diff --git a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/cakephp-mysql-persistent.json b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/cakephp-mysql-persistent.json
index 86ddc184a..40b4eaa81 100644
--- a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/cakephp-mysql-persistent.json
+++ b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/cakephp-mysql-persistent.json
@@ -4,7 +4,7 @@
"metadata": {
"name": "cakephp-mysql-persistent",
"annotations": {
- "openshift.io/display-name": "CakePHP + MySQL (Persistent)",
+ "openshift.io/display-name": "CakePHP + MySQL",
"description": "An example CakePHP application with a MySQL database. For more information about using this template, including OpenShift considerations, see https://github.com/openshift/cakephp-ex/blob/master/README.md.",
"tags": "quickstart,php,cakephp",
"iconClass": "icon-php",
@@ -17,7 +17,8 @@
},
"message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/cake-ex/blob/master/README.md.",
"labels": {
- "template": "cakephp-mysql-persistent"
+ "template": "cakephp-mysql-persistent",
+ "app": "cakephp-mysql-persistent"
},
"objects": [
{
diff --git a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/cakephp-mysql.json b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/cakephp-mysql.json
index 3c964bd6a..ecd90e495 100644
--- a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/cakephp-mysql.json
+++ b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/cakephp-mysql.json
@@ -17,7 +17,8 @@
},
"message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/cake-ex/blob/master/README.md.",
"labels": {
- "template": "cakephp-mysql-example"
+ "template": "cakephp-mysql-example",
+ "app": "cakephp-mysql-example"
},
"objects": [
{
diff --git a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/dancer-mysql-persistent.json b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/dancer-mysql-persistent.json
index 0a10c5fbc..17a155600 100644
--- a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/dancer-mysql-persistent.json
+++ b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/dancer-mysql-persistent.json
@@ -4,7 +4,7 @@
"metadata": {
"name": "dancer-mysql-persistent",
"annotations": {
- "openshift.io/display-name": "Dancer + MySQL (Persistent)",
+ "openshift.io/display-name": "Dancer + MySQL",
"description": "An example Dancer application with a MySQL database. For more information about using this template, including OpenShift considerations, see https://github.com/openshift/dancer-ex/blob/master/README.md.",
"tags": "quickstart,perl,dancer",
"iconClass": "icon-perl",
@@ -17,7 +17,8 @@
},
"message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/dancer-ex/blob/master/README.md.",
"labels": {
- "template": "dancer-mysql-persistent"
+ "template": "dancer-mysql-persistent",
+ "app": "dancer-mysql-persistent"
},
"objects": [
{
diff --git a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/dancer-mysql.json b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/dancer-mysql.json
index 6122d5436..abf711535 100644
--- a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/dancer-mysql.json
+++ b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/dancer-mysql.json
@@ -17,7 +17,8 @@
},
"message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/dancer-ex/blob/master/README.md.",
"labels": {
- "template": "dancer-mysql-example"
+ "template": "dancer-mysql-example",
+ "app": "dancer-mysql-example"
},
"objects": [
{
diff --git a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/django-postgresql-persistent.json b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/django-postgresql-persistent.json
index f3b5838fa..c8dab0b53 100644
--- a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/django-postgresql-persistent.json
+++ b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/django-postgresql-persistent.json
@@ -4,7 +4,7 @@
"metadata": {
"name": "django-psql-persistent",
"annotations": {
- "openshift.io/display-name": "Django + PostgreSQL (Persistent)",
+ "openshift.io/display-name": "Django + PostgreSQL",
"description": "An example Django application with a PostgreSQL database. For more information about using this template, including OpenShift considerations, see https://github.com/openshift/django-ex/blob/master/README.md.",
"tags": "quickstart,python,django",
"iconClass": "icon-python",
@@ -17,7 +17,8 @@
},
"message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/django-ex/blob/master/README.md.",
"labels": {
- "template": "django-psql-persistent"
+ "template": "django-psql-persistent",
+ "app": "django-psql-persistent"
},
"objects": [
{
diff --git a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/django-postgresql.json b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/django-postgresql.json
index b21295df2..6395defda 100644
--- a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/django-postgresql.json
+++ b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/django-postgresql.json
@@ -17,7 +17,8 @@
},
"message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/django-ex/blob/master/README.md.",
"labels": {
- "template": "django-psql-example"
+ "template": "django-psql-example",
+ "app": "django-psql-example"
},
"objects": [
{
diff --git a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/httpd.json b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/httpd.json
index 3771280bf..d7cab4889 100644
--- a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/httpd.json
+++ b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/httpd.json
@@ -17,7 +17,8 @@
},
"message": "The following service(s) have been created in your project: ${NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/httpd-ex/blob/master/README.md.",
"labels": {
- "template": "httpd-example"
+ "template": "httpd-example",
+ "app": "httpd-example"
},
"objects": [
{
diff --git a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/jenkins-ephemeral-template.json b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/jenkins-ephemeral-template.json
index 28b4b9d81..6186bba10 100644
--- a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/jenkins-ephemeral-template.json
+++ b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/jenkins-ephemeral-template.json
@@ -15,6 +15,9 @@
}
},
"message": "A Jenkins service has been created in your project. Log into Jenkins with your OpenShift account. The tutorial at https://github.com/openshift/origin/blob/master/examples/jenkins/README.md contains more information about using this template.",
+ "labels": {
+ "app": "jenkins-ephemeral"
+ },
"objects": [
{
"kind": "Route",
diff --git a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/jenkins-persistent-template.json b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/jenkins-persistent-template.json
index 4915bb12c..59dc0d22b 100644
--- a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/jenkins-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/jenkins-persistent-template.json
@@ -4,7 +4,7 @@
"metadata": {
"name": "jenkins-persistent",
"annotations": {
- "openshift.io/display-name": "Jenkins (Persistent)",
+ "openshift.io/display-name": "Jenkins",
"description": "Jenkins service, with persistent storage.\n\nNOTE: You must have persistent volumes available in your cluster to use this template.",
"iconClass": "icon-jenkins",
"tags": "instant-app,jenkins",
@@ -15,6 +15,9 @@
}
},
"message": "A Jenkins service has been created in your project. Log into Jenkins with your OpenShift account. The tutorial at https://github.com/openshift/origin/blob/master/examples/jenkins/README.md contains more information about using this template.",
+ "labels": {
+ "app": "jenkins-persistent"
+ },
"objects": [
{
"kind": "Route",
diff --git a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/nodejs-mongodb-persistent.json b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/nodejs-mongodb-persistent.json
index 7f2a5d804..f04adaa67 100644
--- a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/nodejs-mongodb-persistent.json
+++ b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/nodejs-mongodb-persistent.json
@@ -4,7 +4,7 @@
"metadata": {
"name": "nodejs-mongo-persistent",
"annotations": {
- "openshift.io/display-name": "Node.js + MongoDB (Persistent)",
+ "openshift.io/display-name": "Node.js + MongoDB",
"description": "An example Node.js application with a MongoDB database. For more information about using this template, including OpenShift considerations, see https://github.com/openshift/nodejs-ex/blob/master/README.md.",
"tags": "quickstart,nodejs",
"iconClass": "icon-nodejs",
@@ -17,7 +17,8 @@
},
"message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/nodejs-ex/blob/master/README.md.",
"labels": {
- "template": "nodejs-mongo-persistent"
+ "template": "nodejs-mongo-persistent",
+ "app": "nodejs-mongo-persistent"
},
"objects": [
{
diff --git a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/nodejs-mongodb.json b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/nodejs-mongodb.json
index b3afae46e..0ce36dba5 100644
--- a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/nodejs-mongodb.json
+++ b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/nodejs-mongodb.json
@@ -17,7 +17,8 @@
},
"message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/nodejs-ex/blob/master/README.md.",
"labels": {
- "template": "nodejs-mongodb-example"
+ "template": "nodejs-mongodb-example",
+ "app": "nodejs-mongodb-example"
},
"objects": [
{
diff --git a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/rails-postgresql-persistent.json b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/rails-postgresql-persistent.json
index 1c03be28a..10e9382cc 100644
--- a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/rails-postgresql-persistent.json
+++ b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/rails-postgresql-persistent.json
@@ -4,7 +4,7 @@
"metadata": {
"name": "rails-pgsql-persistent",
"annotations": {
- "openshift.io/display-name": "Rails + PostgreSQL (Persistent)",
+ "openshift.io/display-name": "Rails + PostgreSQL",
"description": "An example Rails application with a PostgreSQL database. For more information about using this template, including OpenShift considerations, see https://github.com/openshift/rails-ex/blob/master/README.md.",
"tags": "quickstart,ruby,rails",
"iconClass": "icon-ruby",
@@ -17,7 +17,8 @@
},
"message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/rails-ex/blob/master/README.md.",
"labels": {
- "template": "rails-pgsql-persistent"
+ "template": "rails-pgsql-persistent",
+ "app": "rails-pgsql-persistent"
},
"objects": [
{
diff --git a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/rails-postgresql.json b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/rails-postgresql.json
index 240289d33..8ec2c8ea6 100644
--- a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/rails-postgresql.json
+++ b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/rails-postgresql.json
@@ -17,7 +17,8 @@
},
"message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/rails-ex/blob/master/README.md.",
"labels": {
- "template": "rails-postgresql-example"
+ "template": "rails-postgresql-example",
+ "app": "rails-postgresql-example"
},
"objects": [
{
diff --git a/roles/openshift_examples/files/examples/v3.9/latest b/roles/openshift_examples/files/examples/v3.9/latest
deleted file mode 120000
index b9bc2fdcb..000000000
--- a/roles/openshift_examples/files/examples/v3.9/latest
+++ /dev/null
@@ -1 +0,0 @@
-latest \ No newline at end of file
diff --git a/roles/openshift_excluder/README.md b/roles/openshift_excluder/README.md
index 80cb88d45..7b43d5adf 100644
--- a/roles/openshift_excluder/README.md
+++ b/roles/openshift_excluder/README.md
@@ -28,7 +28,7 @@ Role Variables
| r_openshift_excluder_verify_upgrade | false | true, false | When upgrading, this variable should be set to true when calling the role |
| r_openshift_excluder_package_state | present | present, latest | Use 'latest' to upgrade openshift_excluder package |
| r_openshift_excluder_docker_package_state | present | present, latest | Use 'latest' to upgrade docker_excluder package |
-| r_openshift_excluder_service_type | None | | (Required) Defined as openshift.common.service_type e.g. atomic-openshift |
+| r_openshift_excluder_service_type | None | | (Required) Defined as openshift_service_type e.g. atomic-openshift |
| r_openshift_excluder_upgrade_target | None | | Required when r_openshift_excluder_verify_upgrade is true, defined as openshift_upgrade_target by Upgrade playbooks e.g. '3.6'|
Dependencies
@@ -46,15 +46,12 @@ Example Playbook
# Disable all excluders
- role: openshift_excluder
r_openshift_excluder_action: disable
- r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
# Enable all excluders
- role: openshift_excluder
r_openshift_excluder_action: enable
- r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
# Disable all excluders and verify appropriate excluder packages are available for upgrade
- role: openshift_excluder
r_openshift_excluder_action: disable
- r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
r_openshift_excluder_verify_upgrade: true
r_openshift_excluder_upgrade_target: "{{ openshift_upgrade_target }}"
r_openshift_excluder_package_state: latest
diff --git a/roles/openshift_excluder/defaults/main.yml b/roles/openshift_excluder/defaults/main.yml
index d4f151142..3a910e490 100644
--- a/roles/openshift_excluder/defaults/main.yml
+++ b/roles/openshift_excluder/defaults/main.yml
@@ -2,7 +2,7 @@
# keep the 'current' package or update to 'latest' if available?
r_openshift_excluder_package_state: present
r_openshift_excluder_docker_package_state: present
-
+r_openshift_excluder_service_type: "{{ openshift_service_type }}"
# Legacy variables are included for backwards compatibility with v3.5
# Inventory variables Legacy
# openshift_enable_excluders enable_excluders
diff --git a/roles/openshift_excluder/meta/main.yml b/roles/openshift_excluder/meta/main.yml
index 871081c19..a9653edda 100644
--- a/roles/openshift_excluder/meta/main.yml
+++ b/roles/openshift_excluder/meta/main.yml
@@ -12,4 +12,5 @@ galaxy_info:
categories:
- cloud
dependencies:
+- role: openshift_facts
- role: lib_utils
diff --git a/roles/openshift_excluder/tasks/main.yml b/roles/openshift_excluder/tasks/main.yml
index 93d6ef149..f0e87ba25 100644
--- a/roles/openshift_excluder/tasks/main.yml
+++ b/roles/openshift_excluder/tasks/main.yml
@@ -19,11 +19,6 @@
msg: "openshift_excluder role can only be called with 'enable' or 'disable'"
when: r_openshift_excluder_action not in ['enable', 'disable']
- - name: Fail if r_openshift_excluder_service_type is not defined
- fail:
- msg: "r_openshift_excluder_service_type must be specified for this role"
- when: r_openshift_excluder_service_type is not defined
-
- name: Fail if r_openshift_excluder_upgrade_target is not defined
fail:
msg: "r_openshift_excluder_upgrade_target must be provided when using this role for upgrades"
diff --git a/roles/openshift_facts/defaults/main.yml b/roles/openshift_facts/defaults/main.yml
index 7064d727a..53a3bc87e 100644
--- a/roles/openshift_facts/defaults/main.yml
+++ b/roles/openshift_facts/defaults/main.yml
@@ -3,4 +3,104 @@ openshift_cli_image_dict:
origin: 'openshift/origin'
openshift-enterprise: 'openshift3/ose'
+openshift_hosted_images_dict:
+ origin: 'openshift/origin-${component}:${version}'
+ openshift-enterprise: 'openshift3/ose-${component}:${version}'
+
openshift_cli_image: "{{ osm_image | default(openshift_cli_image_dict[openshift_deployment_type]) }}"
+
+# osm_default_subdomain is an old migrated fact, can probably be removed.
+osm_default_subdomain: "router.default.svc.cluster.local"
+openshift_master_default_subdomain: "{{ osm_default_subdomain }}"
+
+openshift_hosted_etcd_storage_nfs_directory: '/exports'
+openshift_hosted_etcd_storage_nfs_options: '*(rw,root_squash)'
+openshift_hosted_etcd_storage_volume_name: 'etcd'
+openshift_hosted_etcd_storage_volume_size: '1Gi'
+openshift_hosted_etcd_storage_create_pv: True
+openshift_hosted_etcd_storage_create_pvc: False
+openshift_hosted_etcd_storage_access_modes:
+ - 'ReadWriteOnce'
+
+openshift_hosted_registry_namespace: 'default'
+openshift_hosted_registry_storage_volume_name: 'registry'
+openshift_hosted_registry_storage_volume_size: '5Gi'
+openshift_hosted_registry_storage_create_pv: True
+openshift_hosted_registry_storage_create_pvc: True
+openshift_hosted_registry_storage_nfs_directory: '/exports'
+openshift_hosted_registry_storage_nfs_options: '*(rw,root_squash)'
+openshift_hosted_registry_storage_glusterfs_endpoints: 'glusterfs-registry-endpoints'
+openshift_hosted_registry_storage_glusterfs_path: glusterfs-registry-volume
+openshift_hosted_registry_storage_glusterfs_readOnly: False
+openshift_hosted_registry_storage_glusterfs_swap: False
+openshift_hosted_registry_storage_glusterfs_swapcopy: True
+openshift_hosted_registry_storage_glusterfs_ips: []
+openshift_hosted_registry_storage_access_modes:
+ - 'ReadWriteMany'
+
+openshift_logging_storage_nfs_directory: '/exports'
+openshift_logging_storage_nfs_options: '*(rw,root_squash)'
+openshift_logging_storage_volume_name: 'logging-es'
+openshift_logging_storage_create_pv: True
+openshift_logging_storage_create_pvc: False
+openshift_logging_storage_access_modes:
+ - ['ReadWriteOnce']
+
+openshift_loggingops_storage_volume_name: 'logging-es-ops'
+openshift_loggingops_storage_volume_size: '10Gi'
+openshift_loggingops_storage_create_pv: True
+openshift_loggingops_storage_create_pvc: False
+openshift_loggingops_storage_nfs_directory: '/exports'
+openshift_loggingops_storage_nfs_options: '*(rw,root_squash)'
+openshift_loggingops_storage_access_modes:
+ - 'ReadWriteOnce'
+
+openshift_metrics_deploy: False
+openshift_metrics_duration: 7
+openshift_metrics_resolution: '10s'
+openshift_metrics_storage_volume_name: 'metrics'
+openshift_metrics_storage_volume_size: '10Gi'
+openshift_metrics_storage_create_pv: True
+openshift_metrics_storage_create_pvc: False
+openshift_metrics_storage_nfs_directory: '/exports'
+openshift_metrics_storage_nfs_options: '*(rw,root_squash)'
+openshift_metrics_storage_access_modes:
+ - 'ReadWriteOnce'
+
+openshift_prometheus_storage_volume_name: 'prometheus'
+openshift_prometheus_storage_volume_size: '10Gi'
+openshift_prometheus_storage_nfs_directory: '/exports'
+openshift_prometheus_storage_nfs_options: '*(rw,root_squash)'
+openshift_prometheus_storage_access_modes:
+ - 'ReadWriteOnce'
+openshift_prometheus_storage_create_pv: True
+openshift_prometheus_storage_create_pvc: False
+
+openshift_prometheus_alertmanager_storage_volume_name: 'prometheus-alertmanager'
+openshift_prometheus_alertmanager_storage_volume_size: '10Gi'
+openshift_prometheus_alertmanager_storage_nfs_directory: '/exports'
+openshift_prometheus_alertmanager_storage_nfs_options: '*(rw,root_squash)'
+openshift_prometheus_alertmanager_storage_access_modes:
+ - 'ReadWriteOnce'
+openshift_prometheus_alertmanager_storage_create_pv: True
+openshift_prometheus_alertmanager_storage_create_pvc: False
+
+openshift_prometheus_alertbuffer_storage_volume_name: 'prometheus-alertbuffer'
+openshift_prometheus_alertbuffer_storage_volume_size: '10Gi'
+openshift_prometheus_alertbuffer_storage_nfs_directory: '/exports'
+openshift_prometheus_alertbuffer_storage_nfs_options: '*(rw,root_squash)'
+openshift_prometheus_alertbuffer_storage_access_modes:
+ - 'ReadWriteOnce'
+openshift_prometheus_alertbuffer_storage_create_pv: True
+openshift_prometheus_alertbuffer_storage_create_pvc: False
+
+
+openshift_router_selector: "region=infra"
+openshift_hosted_router_selector: "{{ openshift_router_selector }}"
+openshift_hosted_registry_selector: "{{ openshift_router_selector }}"
+
+openshift_service_type_dict:
+ origin: origin
+ openshift-enterprise: atomic-openshift
+
+openshift_service_type: "{{ openshift_service_type_dict[openshift_deployment_type] }}"
diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py
index 508228b2e..a10ba9310 100755
--- a/roles/openshift_facts/library/openshift_facts.py
+++ b/roles/openshift_facts/library/openshift_facts.py
@@ -11,14 +11,13 @@ import copy
import errno
import json
import re
-import io
import os
import yaml
import struct
import socket
from distutils.util import strtobool
from distutils.version import LooseVersion
-from ansible.module_utils.six import string_types, text_type
+from ansible.module_utils.six import string_types
from ansible.module_utils.six.moves import configparser
# ignore pylint errors related to the module_utils import
@@ -86,24 +85,6 @@ def migrate_node_facts(facts):
return facts
-def migrate_hosted_facts(facts):
- """ Apply migrations for master facts """
- if 'master' in facts:
- if 'router_selector' in facts['master']:
- if 'hosted' not in facts:
- facts['hosted'] = {}
- if 'router' not in facts['hosted']:
- facts['hosted']['router'] = {}
- facts['hosted']['router']['selector'] = facts['master'].pop('router_selector')
- if 'registry_selector' in facts['master']:
- if 'hosted' not in facts:
- facts['hosted'] = {}
- if 'registry' not in facts['hosted']:
- facts['hosted']['registry'] = {}
- facts['hosted']['registry']['selector'] = facts['master'].pop('registry_selector')
- return facts
-
-
def migrate_admission_plugin_facts(facts):
""" Apply migrations for admission plugin facts """
if 'master' in facts:
@@ -113,8 +94,7 @@ def migrate_admission_plugin_facts(facts):
# Merge existing kube_admission_plugin_config with admission_plugin_config.
facts['master']['admission_plugin_config'] = merge_facts(facts['master']['admission_plugin_config'],
facts['master']['kube_admission_plugin_config'],
- additive_facts_to_overwrite=[],
- protected_facts_to_overwrite=[])
+ additive_facts_to_overwrite=[])
# Remove kube_admission_plugin_config fact
facts['master'].pop('kube_admission_plugin_config', None)
return facts
@@ -125,7 +105,6 @@ def migrate_local_facts(facts):
migrated_facts = copy.deepcopy(facts)
migrated_facts = migrate_common_facts(migrated_facts)
migrated_facts = migrate_node_facts(migrated_facts)
- migrated_facts = migrate_hosted_facts(migrated_facts)
migrated_facts = migrate_admission_plugin_facts(migrated_facts)
return migrated_facts
@@ -412,58 +391,6 @@ def normalize_provider_facts(provider, metadata):
return facts
-# pylint: disable=too-many-branches
-def set_selectors(facts):
- """ Set selectors facts if not already present in facts dict
- Args:
- facts (dict): existing facts
- Returns:
- dict: the facts dict updated with the generated selectors
- facts if they were not already present
-
- """
- selector = "region=infra"
-
- if 'hosted' not in facts:
- facts['hosted'] = {}
- if 'router' not in facts['hosted']:
- facts['hosted']['router'] = {}
- if 'selector' not in facts['hosted']['router'] or facts['hosted']['router']['selector'] in [None, 'None']:
- facts['hosted']['router']['selector'] = selector
- if 'registry' not in facts['hosted']:
- facts['hosted']['registry'] = {}
- if 'selector' not in facts['hosted']['registry'] or facts['hosted']['registry']['selector'] in [None, 'None']:
- facts['hosted']['registry']['selector'] = selector
- if 'metrics' not in facts['hosted']:
- facts['hosted']['metrics'] = {}
- if 'selector' not in facts['hosted']['metrics'] or facts['hosted']['metrics']['selector'] in [None, 'None']:
- facts['hosted']['metrics']['selector'] = None
- if 'logging' not in facts or not isinstance(facts['logging'], dict):
- facts['logging'] = {}
- if 'selector' not in facts['logging'] or facts['logging']['selector'] in [None, 'None']:
- facts['logging']['selector'] = None
- if 'etcd' not in facts['hosted']:
- facts['hosted']['etcd'] = {}
- if 'selector' not in facts['hosted']['etcd'] or facts['hosted']['etcd']['selector'] in [None, 'None']:
- facts['hosted']['etcd']['selector'] = None
- if 'prometheus' not in facts:
- facts['prometheus'] = {}
- if 'selector' not in facts['prometheus'] or facts['prometheus']['selector'] in [None, 'None']:
- facts['prometheus']['selector'] = None
- if 'alertmanager' not in facts['prometheus']:
- facts['prometheus']['alertmanager'] = {}
- # pylint: disable=line-too-long
- if 'selector' not in facts['prometheus']['alertmanager'] or facts['prometheus']['alertmanager']['selector'] in [None, 'None']:
- facts['prometheus']['alertmanager']['selector'] = None
- if 'alertbuffer' not in facts['prometheus']:
- facts['prometheus']['alertbuffer'] = {}
- # pylint: disable=line-too-long
- if 'selector' not in facts['prometheus']['alertbuffer'] or facts['prometheus']['alertbuffer']['selector'] in [None, 'None']:
- facts['prometheus']['alertbuffer']['selector'] = None
-
- return facts
-
-
def set_identity_providers_if_unset(facts):
""" Set identity_providers fact if not already present in facts dict
@@ -531,7 +458,6 @@ def set_url_facts_if_unset(facts):
etcd_urls = []
if etcd_hosts != '':
facts['master']['etcd_port'] = ports['etcd']
- facts['master']['embedded_etcd'] = False
for host in etcd_hosts:
etcd_urls.append(format_url(use_ssl['etcd'], host,
ports['etcd']))
@@ -608,63 +534,9 @@ def set_aggregate_facts(facts):
return facts
-def set_etcd_facts_if_unset(facts):
- """
- If using embedded etcd, loads the data directory from master-config.yaml.
-
- If using standalone etcd, loads ETCD_DATA_DIR from etcd.conf.
-
- If anything goes wrong parsing these, the fact will not be set.
- """
- if 'master' in facts and safe_get_bool(facts['master']['embedded_etcd']):
- etcd_facts = facts['etcd'] if 'etcd' in facts else dict()
-
- if 'etcd_data_dir' not in etcd_facts:
- try:
- # Parse master config to find actual etcd data dir:
- master_cfg_path = os.path.join(facts['common']['config_base'],
- 'master/master-config.yaml')
- master_cfg_f = open(master_cfg_path, 'r')
- config = yaml.safe_load(master_cfg_f.read())
- master_cfg_f.close()
-
- etcd_facts['etcd_data_dir'] = \
- config['etcdConfig']['storageDirectory']
-
- facts['etcd'] = etcd_facts
-
- # We don't want exceptions bubbling up here:
- # pylint: disable=broad-except
- except Exception:
- pass
- else:
- etcd_facts = facts['etcd'] if 'etcd' in facts else dict()
-
- # Read ETCD_DATA_DIR from /etc/etcd/etcd.conf:
- try:
- # Add a fake section for parsing:
- ini_str = text_type('[root]\n' + open('/etc/etcd/etcd.conf', 'r').read(), 'utf-8')
- ini_fp = io.StringIO(ini_str)
- config = configparser.RawConfigParser()
- config.readfp(ini_fp)
- etcd_data_dir = config.get('root', 'ETCD_DATA_DIR')
- if etcd_data_dir.startswith('"') and etcd_data_dir.endswith('"'):
- etcd_data_dir = etcd_data_dir[1:-1]
-
- etcd_facts['etcd_data_dir'] = etcd_data_dir
- facts['etcd'] = etcd_facts
-
- # We don't want exceptions bubbling up here:
- # pylint: disable=broad-except
- except Exception:
- pass
-
- return facts
-
-
def set_deployment_facts_if_unset(facts):
""" Set Facts that vary based on deployment_type. This currently
- includes common.service_type, master.registry_url, node.registry_url,
+ includes master.registry_url, node.registry_url,
node.storage_plugin_deps
Args:
@@ -676,14 +548,6 @@ def set_deployment_facts_if_unset(facts):
# disabled to avoid breaking up facts related to deployment type into
# multiple methods for now.
# pylint: disable=too-many-statements, too-many-branches
- if 'common' in facts:
- deployment_type = facts['common']['deployment_type']
- if 'service_type' not in facts['common']:
- service_type = 'atomic-openshift'
- if deployment_type == 'origin':
- service_type = 'origin'
- facts['common']['service_type'] = service_type
-
for role in ('master', 'node'):
if role in facts:
deployment_type = facts['common']['deployment_type']
@@ -980,7 +844,7 @@ values provided as a list. Hence the gratuitous use of ['foo'] below.
# If we've added items to the kubelet_args dict then we need
# to merge the new items back into the main facts object.
if kubelet_args != {}:
- facts = merge_facts({'node': {'kubelet_args': kubelet_args}}, facts, [], [])
+ facts = merge_facts({'node': {'kubelet_args': kubelet_args}}, facts, [])
return facts
@@ -1002,7 +866,7 @@ def build_controller_args(facts):
controller_args['cloud-provider'] = ['gce']
controller_args['cloud-config'] = [cloud_cfg_path + '/gce.conf']
if controller_args != {}:
- facts = merge_facts({'master': {'controller_args': controller_args}}, facts, [], [])
+ facts = merge_facts({'master': {'controller_args': controller_args}}, facts, [])
return facts
@@ -1024,7 +888,7 @@ def build_api_server_args(facts):
api_server_args['cloud-provider'] = ['gce']
api_server_args['cloud-config'] = [cloud_cfg_path + '/gce.conf']
if api_server_args != {}:
- facts = merge_facts({'master': {'api_server_args': api_server_args}}, facts, [], [])
+ facts = merge_facts({'master': {'api_server_args': api_server_args}}, facts, [])
return facts
@@ -1147,8 +1011,13 @@ def get_container_openshift_version(facts):
If containerized, see if we can determine the installed version via the
systemd environment files.
"""
+ deployment_type = facts['common']['deployment_type']
+ service_type_dict = {'origin': 'origin',
+ 'openshift-enterprise': 'atomic-openshift'}
+ service_type = service_type_dict[deployment_type]
+
for filename in ['/etc/sysconfig/%s-master-controllers', '/etc/sysconfig/%s-node']:
- env_path = filename % facts['common']['service_type']
+ env_path = filename % service_type
if not os.path.exists(env_path):
continue
@@ -1211,7 +1080,7 @@ def apply_provider_facts(facts, provider_facts):
# Disabling pylint too many branches. This function needs refactored
# but is a very core part of openshift_facts.
# pylint: disable=too-many-branches, too-many-nested-blocks
-def merge_facts(orig, new, additive_facts_to_overwrite, protected_facts_to_overwrite):
+def merge_facts(orig, new, additive_facts_to_overwrite):
""" Recursively merge facts dicts
Args:
@@ -1219,14 +1088,11 @@ def merge_facts(orig, new, additive_facts_to_overwrite, protected_facts_to_overw
new (dict): facts to update
additive_facts_to_overwrite (list): additive facts to overwrite in jinja
'.' notation ex: ['master.named_certificates']
- protected_facts_to_overwrite (list): protected facts to overwrite in jinja
- '.' notation ex: ['master.master_count']
Returns:
dict: the merged facts
"""
additive_facts = ['named_certificates']
- protected_facts = ['ha']
# Facts we do not ever want to merge. These originate in inventory variables
# and contain JSON dicts. We don't ever want to trigger a merge
@@ -1258,14 +1124,7 @@ def merge_facts(orig, new, additive_facts_to_overwrite, protected_facts_to_overw
if '.' in item and item.startswith(key + '.'):
relevant_additive_facts.append(item)
- # Collect the subset of protected facts to overwrite
- # if key matches. These will be passed to the
- # subsequent merge_facts call.
- relevant_protected_facts = []
- for item in protected_facts_to_overwrite:
- if '.' in item and item.startswith(key + '.'):
- relevant_protected_facts.append(item)
- facts[key] = merge_facts(value, new[key], relevant_additive_facts, relevant_protected_facts)
+ facts[key] = merge_facts(value, new[key], relevant_additive_facts)
# Key matches an additive fact and we are not overwriting
# it so we will append the new value to the existing value.
elif key in additive_facts and key not in [x.split('.')[-1] for x in additive_facts_to_overwrite]:
@@ -1275,18 +1134,6 @@ def merge_facts(orig, new, additive_facts_to_overwrite, protected_facts_to_overw
if item not in new_fact:
new_fact.append(item)
facts[key] = new_fact
- # Key matches a protected fact and we are not overwriting
- # it so we will determine if it is okay to change this
- # fact.
- elif key in protected_facts and key not in [x.split('.')[-1] for x in protected_facts_to_overwrite]:
- # ha (bool) can not change unless it has been passed
- # as a protected fact to overwrite.
- if key == 'ha':
- if safe_get_bool(value) != safe_get_bool(new[key]):
- # pylint: disable=line-too-long
- module.fail_json(msg='openshift_facts received a different value for openshift.master.ha') # noqa: F405
- else:
- facts[key] = value
# No other condition has been met. Overwrite the old fact
# with the new value.
else:
@@ -1559,7 +1406,6 @@ def set_container_facts_if_unset(facts):
facts['node']['ovs_system_image'] = ovs_image
if safe_get_bool(facts['common']['is_containerized']):
- facts['common']['admin_binary'] = '/usr/local/bin/oadm'
facts['common']['client_binary'] = '/usr/local/bin/oc'
return facts
@@ -1620,8 +1466,6 @@ class OpenShiftFacts(object):
local_facts (dict): local facts to set
additive_facts_to_overwrite (list): additive facts to overwrite in jinja
'.' notation ex: ['master.named_certificates']
- protected_facts_to_overwrite (list): protected facts to overwrite in jinja
- '.' notation ex: ['master.master_count']
Raises:
OpenShiftFactsUnsupportedRoleError:
@@ -1631,21 +1475,13 @@ class OpenShiftFacts(object):
'cloudprovider',
'common',
'etcd',
- 'hosted',
'master',
- 'node',
- 'logging',
- 'loggingops',
- 'metrics',
- 'prometheus']
+ 'node']
# Disabling too-many-arguments, this should be cleaned up as a TODO item.
# pylint: disable=too-many-arguments,no-value-for-parameter
def __init__(self, role, filename, local_facts,
- additive_facts_to_overwrite=None,
- openshift_env=None,
- openshift_env_structures=None,
- protected_facts_to_overwrite=None):
+ additive_facts_to_overwrite=None):
self.changed = False
self.filename = filename
if role not in self.known_roles:
@@ -1667,34 +1503,23 @@ class OpenShiftFacts(object):
self.system_facts = get_all_facts(module)['ansible_facts'] # noqa: F405
self.facts = self.generate_facts(local_facts,
- additive_facts_to_overwrite,
- openshift_env,
- openshift_env_structures,
- protected_facts_to_overwrite)
+ additive_facts_to_overwrite)
def generate_facts(self,
local_facts,
- additive_facts_to_overwrite,
- openshift_env,
- openshift_env_structures,
- protected_facts_to_overwrite):
+ additive_facts_to_overwrite):
""" Generate facts
Args:
local_facts (dict): local_facts for overriding generated defaults
additive_facts_to_overwrite (list): additive facts to overwrite in jinja
'.' notation ex: ['master.named_certificates']
- openshift_env (dict): openshift_env facts for overriding generated defaults
- protected_facts_to_overwrite (list): protected facts to overwrite in jinja
- '.' notation ex: ['master.master_count']
Returns:
dict: The generated facts
"""
+
local_facts = self.init_local_facts(local_facts,
- additive_facts_to_overwrite,
- openshift_env,
- openshift_env_structures,
- protected_facts_to_overwrite)
+ additive_facts_to_overwrite)
roles = local_facts.keys()
if 'common' in local_facts and 'deployment_type' in local_facts['common']:
@@ -1712,12 +1537,10 @@ class OpenShiftFacts(object):
facts = apply_provider_facts(defaults, provider_facts)
facts = merge_facts(facts,
local_facts,
- additive_facts_to_overwrite,
- protected_facts_to_overwrite)
+ additive_facts_to_overwrite)
facts = migrate_oauth_template_facts(facts)
facts['current_config'] = get_current_config(facts)
facts = set_url_facts_if_unset(facts)
- facts = set_selectors(facts)
facts = set_identity_providers_if_unset(facts)
facts = set_deployment_facts_if_unset(facts)
facts = set_sdn_facts_if_unset(facts, self.system_facts)
@@ -1727,7 +1550,6 @@ class OpenShiftFacts(object):
facts = build_api_server_args(facts)
facts = set_version_facts_if_unset(facts)
facts = set_aggregate_facts(facts)
- facts = set_etcd_facts_if_unset(facts)
facts = set_proxy_facts(facts)
facts = set_builddefaults_facts(facts)
facts = set_buildoverrides_facts(facts)
@@ -1760,7 +1582,7 @@ class OpenShiftFacts(object):
hostname=hostname,
public_hostname=hostname,
portal_net='172.30.0.0/16',
- client_binary='oc', admin_binary='oadm',
+ client_binary='oc',
dns_domain='cluster.local',
config_base='/etc/origin')
@@ -1772,7 +1594,7 @@ class OpenShiftFacts(object):
console_port='8443', etcd_use_ssl=True,
etcd_hosts='', etcd_port='4001',
portal_net='172.30.0.0/16',
- embedded_etcd=True, embedded_kube=True,
+ embedded_kube=True,
embedded_dns=True,
bind_addr='0.0.0.0',
session_max_seconds=3600,
@@ -1793,178 +1615,6 @@ class OpenShiftFacts(object):
if 'cloudprovider' in roles:
defaults['cloudprovider'] = dict(kind=None)
- if 'hosted' in roles or self.role == 'hosted':
- defaults['hosted'] = dict(
- etcd=dict(
- storage=dict(
- kind=None,
- volume=dict(
- name='etcd',
- size='1Gi'
- ),
- nfs=dict(
- directory='/exports',
- options='*(rw,root_squash)'
- ),
- host=None,
- access=dict(
- modes=['ReadWriteOnce']
- ),
- create_pv=True,
- create_pvc=False
- )
- ),
- registry=dict(
- storage=dict(
- kind=None,
- volume=dict(
- name='registry',
- size='5Gi'
- ),
- nfs=dict(
- directory='/exports',
- options='*(rw,root_squash)'),
- glusterfs=dict(
- endpoints='glusterfs-registry-endpoints',
- path='glusterfs-registry-volume',
- ips=[],
- readOnly=False,
- swap=False,
- swapcopy=True),
- host=None,
- access=dict(
- modes=['ReadWriteMany']
- ),
- create_pv=True,
- create_pvc=True
- )
- ),
- router=dict()
- )
-
- defaults['logging'] = dict(
- storage=dict(
- kind=None,
- volume=dict(
- name='logging-es',
- size='10Gi'
- ),
- nfs=dict(
- directory='/exports',
- options='*(rw,root_squash)'
- ),
- host=None,
- access=dict(
- modes=['ReadWriteOnce']
- ),
- create_pv=True,
- create_pvc=False
- )
- )
-
- defaults['loggingops'] = dict(
- storage=dict(
- kind=None,
- volume=dict(
- name='logging-es-ops',
- size='10Gi'
- ),
- nfs=dict(
- directory='/exports',
- options='*(rw,root_squash)'
- ),
- host=None,
- access=dict(
- modes=['ReadWriteOnce']
- ),
- create_pv=True,
- create_pvc=False
- )
- )
-
- defaults['metrics'] = dict(
- deploy=False,
- duration=7,
- resolution='10s',
- storage=dict(
- kind=None,
- volume=dict(
- name='metrics',
- size='10Gi'
- ),
- nfs=dict(
- directory='/exports',
- options='*(rw,root_squash)'
- ),
- host=None,
- access=dict(
- modes=['ReadWriteOnce']
- ),
- create_pv=True,
- create_pvc=False
- )
- )
-
- defaults['prometheus'] = dict(
- storage=dict(
- kind=None,
- volume=dict(
- name='prometheus',
- size='10Gi'
- ),
- nfs=dict(
- directory='/exports',
- options='*(rw,root_squash)'
- ),
- host=None,
- access=dict(
- modes=['ReadWriteOnce']
- ),
- create_pv=True,
- create_pvc=False
- )
- )
-
- defaults['prometheus']['alertmanager'] = dict(
- storage=dict(
- kind=None,
- volume=dict(
- name='prometheus-alertmanager',
- size='10Gi'
- ),
- nfs=dict(
- directory='/exports',
- options='*(rw,root_squash)'
- ),
- host=None,
- access=dict(
- modes=['ReadWriteOnce']
- ),
- create_pv=True,
- create_pvc=False
- )
- )
-
- defaults['prometheus']['alertbuffer'] = dict(
- storage=dict(
- kind=None,
- volume=dict(
- name='prometheus-alertbuffer',
- size='10Gi'
- ),
- nfs=dict(
- directory='/exports',
- options='*(rw,root_squash)'
- ),
- host=None,
- access=dict(
- modes=['ReadWriteOnce']
- ),
- create_pv=True,
- create_pvc=False
- )
- )
-
return defaults
def guess_host_provider(self):
@@ -2037,65 +1687,17 @@ class OpenShiftFacts(object):
)
return provider_facts
- @staticmethod
- def split_openshift_env_fact_keys(openshift_env_fact, openshift_env_structures):
- """ Split openshift_env facts based on openshift_env structures.
-
- Args:
- openshift_env_fact (string): the openshift_env fact to split
- ex: 'openshift_cloudprovider_openstack_auth_url'
- openshift_env_structures (list): a list of structures to determine fact keys
- ex: ['openshift.cloudprovider.openstack.*']
- Returns:
- list: a list of keys that represent the fact
- ex: ['openshift', 'cloudprovider', 'openstack', 'auth_url']
- """
- # By default, we'll split an openshift_env fact by underscores.
- fact_keys = openshift_env_fact.split('_')
-
- # Determine if any of the provided variable structures match the fact.
- matching_structure = None
- if openshift_env_structures is not None:
- for structure in openshift_env_structures:
- if re.match(structure, openshift_env_fact):
- matching_structure = structure
- # Fact didn't match any variable structures so return the default fact keys.
- if matching_structure is None:
- return fact_keys
-
- final_keys = []
- structure_keys = matching_structure.split('.')
- for structure_key in structure_keys:
- # Matched current key. Add to final keys.
- if structure_key == fact_keys[structure_keys.index(structure_key)]:
- final_keys.append(structure_key)
- # Wildcard means we will be taking everything from here to the end of the fact.
- elif structure_key == '*':
- final_keys.append('_'.join(fact_keys[structure_keys.index(structure_key):]))
- # Shouldn't have gotten here, return the fact keys.
- else:
- return fact_keys
- return final_keys
-
# Disabling too-many-branches and too-many-locals.
# This should be cleaned up as a TODO item.
# pylint: disable=too-many-branches, too-many-locals
def init_local_facts(self, facts=None,
- additive_facts_to_overwrite=None,
- openshift_env=None,
- openshift_env_structures=None,
- protected_facts_to_overwrite=None):
+ additive_facts_to_overwrite=None):
""" Initialize the local facts
Args:
facts (dict): local facts to set
additive_facts_to_overwrite (list): additive facts to overwrite in jinja
'.' notation ex: ['master.named_certificates']
- openshift_env (dict): openshift env facts to set
- protected_facts_to_overwrite (list): protected facts to overwrite in jinja
- '.' notation ex: ['master.master_count']
-
-
Returns:
dict: The result of merging the provided facts with existing
local facts
@@ -2107,45 +1709,13 @@ class OpenShiftFacts(object):
if facts is not None:
facts_to_set[self.role] = facts
- if openshift_env != {} and openshift_env is not None:
- for fact, value in iteritems(openshift_env):
- oo_env_facts = dict()
- current_level = oo_env_facts
- keys = self.split_openshift_env_fact_keys(fact, openshift_env_structures)[1:]
-
- if len(keys) > 0 and keys[0] != self.role:
- continue
-
- # Build a dictionary from the split fact keys.
- # After this loop oo_env_facts is the resultant dictionary.
- # For example:
- # fact = "openshift_metrics_install_metrics"
- # value = 'true'
- # keys = ['metrics', 'install', 'metrics']
- # result = {'metrics': {'install': {'metrics': 'true'}}}
- for i, _ in enumerate(keys):
- # This is the last key. Set the value.
- if i == (len(keys) - 1):
- current_level[keys[i]] = value
- # This is a key other than the last key. Set as
- # dictionary and continue.
- else:
- current_level[keys[i]] = dict()
- current_level = current_level[keys[i]]
-
- facts_to_set = merge_facts(orig=facts_to_set,
- new=oo_env_facts,
- additive_facts_to_overwrite=[],
- protected_facts_to_overwrite=[])
-
local_facts = get_local_facts_from_file(self.filename)
migrated_facts = migrate_local_facts(local_facts)
new_local_facts = merge_facts(migrated_facts,
facts_to_set,
- additive_facts_to_overwrite,
- protected_facts_to_overwrite)
+ additive_facts_to_overwrite)
new_local_facts = self.remove_empty_facts(new_local_facts)
@@ -2253,9 +1823,6 @@ def main():
choices=OpenShiftFacts.known_roles),
local_facts=dict(default=None, type='dict', required=False),
additive_facts_to_overwrite=dict(default=[], type='list', required=False),
- openshift_env=dict(default={}, type='dict', required=False),
- openshift_env_structures=dict(default=[], type='list', required=False),
- protected_facts_to_overwrite=dict(default=[], type='list', required=False)
),
supports_check_mode=True,
add_file_common_args=True,
@@ -2271,19 +1838,13 @@ def main():
role = module.params['role'] # noqa: F405
local_facts = module.params['local_facts'] # noqa: F405
additive_facts_to_overwrite = module.params['additive_facts_to_overwrite'] # noqa: F405
- openshift_env = module.params['openshift_env'] # noqa: F405
- openshift_env_structures = module.params['openshift_env_structures'] # noqa: F405
- protected_facts_to_overwrite = module.params['protected_facts_to_overwrite'] # noqa: F405
fact_file = '/etc/ansible/facts.d/openshift.fact'
openshift_facts = OpenShiftFacts(role,
fact_file,
local_facts,
- additive_facts_to_overwrite,
- openshift_env,
- openshift_env_structures,
- protected_facts_to_overwrite)
+ additive_facts_to_overwrite)
file_params = module.params.copy() # noqa: F405
file_params['path'] = fact_file
diff --git a/roles/openshift_health_checker/HOWTO_CHECKS.md b/roles/openshift_health_checker/HOWTO_CHECKS.md
index 6c5662a4e..94961f2d4 100644
--- a/roles/openshift_health_checker/HOWTO_CHECKS.md
+++ b/roles/openshift_health_checker/HOWTO_CHECKS.md
@@ -12,7 +12,7 @@ Checks are typically implemented as two parts:
The checks are called from Ansible playbooks via the `openshift_health_check`
action plugin. See
-[playbooks/byo/openshift-preflight/check.yml](../../playbooks/byo/openshift-preflight/check.yml)
+[playbooks/openshift-checks/pre-install.yml](../../playbooks/openshift-checks/pre-install.yml)
for an example.
The action plugin dynamically discovers all checks and executes only those
diff --git a/roles/openshift_health_checker/defaults/main.yml b/roles/openshift_health_checker/defaults/main.yml
new file mode 100644
index 000000000..f25a0dc79
--- /dev/null
+++ b/roles/openshift_health_checker/defaults/main.yml
@@ -0,0 +1,6 @@
+---
+openshift_service_type_dict:
+ origin: origin
+ openshift-enterprise: atomic-openshift
+
+openshift_service_type: "{{ openshift_service_type_dict[openshift_deployment_type] }}"
diff --git a/roles/openshift_health_checker/openshift_checks/package_availability.py b/roles/openshift_health_checker/openshift_checks/package_availability.py
index 090e438ff..980e23f27 100644
--- a/roles/openshift_health_checker/openshift_checks/package_availability.py
+++ b/roles/openshift_health_checker/openshift_checks/package_availability.py
@@ -15,7 +15,9 @@ class PackageAvailability(NotContainerizedMixin, OpenShiftCheck):
return super(PackageAvailability, self).is_active() and self.get_var("ansible_pkg_mgr") == "yum"
def run(self):
- rpm_prefix = self.get_var("openshift", "common", "service_type")
+ rpm_prefix = self.get_var("openshift_service_type")
+ if self._templar is not None:
+ rpm_prefix = self._templar.template(rpm_prefix)
group_names = self.get_var("group_names", default=[])
packages = set()
diff --git a/roles/openshift_health_checker/openshift_checks/package_version.py b/roles/openshift_health_checker/openshift_checks/package_version.py
index 13a91dadf..f3a628e28 100644
--- a/roles/openshift_health_checker/openshift_checks/package_version.py
+++ b/roles/openshift_health_checker/openshift_checks/package_version.py
@@ -41,7 +41,9 @@ class PackageVersion(NotContainerizedMixin, OpenShiftCheck):
return super(PackageVersion, self).is_active() and master_or_node
def run(self):
- rpm_prefix = self.get_var("openshift", "common", "service_type")
+ rpm_prefix = self.get_var("openshift_service_type")
+ if self._templar is not None:
+ rpm_prefix = self._templar.template(rpm_prefix)
openshift_release = self.get_var("openshift_release", default='')
deployment_type = self.get_var("openshift_deployment_type")
check_multi_minor_release = deployment_type in ['openshift-enterprise']
diff --git a/roles/openshift_health_checker/test/docker_image_availability_test.py b/roles/openshift_health_checker/test/docker_image_availability_test.py
index ec46c3b4b..fc333dfd4 100644
--- a/roles/openshift_health_checker/test/docker_image_availability_test.py
+++ b/roles/openshift_health_checker/test/docker_image_availability_test.py
@@ -8,12 +8,12 @@ def task_vars():
return dict(
openshift=dict(
common=dict(
- service_type='origin',
is_containerized=False,
is_atomic=False,
),
docker=dict(),
),
+ openshift_service_type='origin',
openshift_deployment_type='origin',
openshift_image_tag='',
group_names=['oo_nodes_to_config', 'oo_masters_to_config'],
diff --git a/roles/openshift_health_checker/test/etcd_traffic_test.py b/roles/openshift_health_checker/test/etcd_traffic_test.py
index dd6f4ad81..a29dc166b 100644
--- a/roles/openshift_health_checker/test/etcd_traffic_test.py
+++ b/roles/openshift_health_checker/test/etcd_traffic_test.py
@@ -37,8 +37,9 @@ def test_log_matches_high_traffic_msg(group_names, matched, failed, extra_words)
task_vars = dict(
group_names=group_names,
openshift=dict(
- common=dict(service_type="origin", is_containerized=False),
- )
+ common=dict(is_containerized=False),
+ ),
+ openshift_service_type="origin"
)
result = EtcdTraffic(execute_module, task_vars).run()
diff --git a/roles/openshift_health_checker/test/ovs_version_test.py b/roles/openshift_health_checker/test/ovs_version_test.py
index 6f0457549..dd98ff4d8 100644
--- a/roles/openshift_health_checker/test/ovs_version_test.py
+++ b/roles/openshift_health_checker/test/ovs_version_test.py
@@ -10,10 +10,11 @@ def test_openshift_version_not_supported():
openshift_release = '111.7.0'
task_vars = dict(
- openshift=dict(common=dict(service_type='origin')),
+ openshift=dict(common=dict()),
openshift_release=openshift_release,
openshift_image_tag='v' + openshift_release,
openshift_deployment_type='origin',
+ openshift_service_type='origin'
)
with pytest.raises(OpenShiftCheckException) as excinfo:
@@ -27,9 +28,10 @@ def test_invalid_openshift_release_format():
return {}
task_vars = dict(
- openshift=dict(common=dict(service_type='origin')),
+ openshift=dict(common=dict()),
openshift_image_tag='v0',
openshift_deployment_type='origin',
+ openshift_service_type='origin'
)
with pytest.raises(OpenShiftCheckException) as excinfo:
@@ -47,9 +49,10 @@ def test_invalid_openshift_release_format():
])
def test_ovs_package_version(openshift_release, expected_ovs_version):
task_vars = dict(
- openshift=dict(common=dict(service_type='origin')),
+ openshift=dict(common=dict()),
openshift_release=openshift_release,
openshift_image_tag='v' + openshift_release,
+ openshift_service_type='origin'
)
return_value = {} # note: check.execute_module modifies return hash contents
diff --git a/roles/openshift_health_checker/test/package_availability_test.py b/roles/openshift_health_checker/test/package_availability_test.py
index 9815acb38..a1e6e0879 100644
--- a/roles/openshift_health_checker/test/package_availability_test.py
+++ b/roles/openshift_health_checker/test/package_availability_test.py
@@ -19,13 +19,13 @@ def test_is_active(pkg_mgr, is_containerized, is_active):
@pytest.mark.parametrize('task_vars,must_have_packages,must_not_have_packages', [
(
- dict(openshift=dict(common=dict(service_type='openshift'))),
+ dict(openshift_service_type='origin'),
set(),
set(['openshift-master', 'openshift-node']),
),
(
dict(
- openshift=dict(common=dict(service_type='origin')),
+ openshift_service_type='origin',
group_names=['oo_masters_to_config'],
),
set(['origin-master']),
@@ -33,7 +33,7 @@ def test_is_active(pkg_mgr, is_containerized, is_active):
),
(
dict(
- openshift=dict(common=dict(service_type='atomic-openshift')),
+ openshift_service_type='atomic-openshift',
group_names=['oo_nodes_to_config'],
),
set(['atomic-openshift-node']),
@@ -41,7 +41,7 @@ def test_is_active(pkg_mgr, is_containerized, is_active):
),
(
dict(
- openshift=dict(common=dict(service_type='atomic-openshift')),
+ openshift_service_type='atomic-openshift',
group_names=['oo_masters_to_config', 'oo_nodes_to_config'],
),
set(['atomic-openshift-master', 'atomic-openshift-node']),
diff --git a/roles/openshift_health_checker/test/package_version_test.py b/roles/openshift_health_checker/test/package_version_test.py
index 3cf4ce033..ea8e02b97 100644
--- a/roles/openshift_health_checker/test/package_version_test.py
+++ b/roles/openshift_health_checker/test/package_version_test.py
@@ -4,9 +4,12 @@ from openshift_checks.package_version import PackageVersion, OpenShiftCheckExcep
def task_vars_for(openshift_release, deployment_type):
+ service_type_dict = {'origin': 'origin',
+ 'openshift-enterprise': 'atomic-openshift'}
+ service_type = service_type_dict[deployment_type]
return dict(
ansible_pkg_mgr='yum',
- openshift=dict(common=dict(service_type=deployment_type)),
+ openshift_service_type=service_type,
openshift_release=openshift_release,
openshift_image_tag='v' + openshift_release,
openshift_deployment_type=deployment_type,
@@ -29,7 +32,7 @@ def test_openshift_version_not_supported():
def test_invalid_openshift_release_format():
task_vars = dict(
ansible_pkg_mgr='yum',
- openshift=dict(common=dict(service_type='origin')),
+ openshift_service_type='origin',
openshift_image_tag='v0',
openshift_deployment_type='origin',
)
diff --git a/roles/openshift_hosted/README.md b/roles/openshift_hosted/README.md
index a1c2c3956..3cf5d97c5 100644
--- a/roles/openshift_hosted/README.md
+++ b/roles/openshift_hosted/README.md
@@ -43,9 +43,9 @@ variables also control configuration behavior:
**NOTE:** Configuring a value for
`openshift_hosted_registry_storage_glusterfs_ips` with a `glusterfs_registry`
-host group is not allowed. Specifying a `glusterfs_registry` host group
-indicates that a new GlusterFS cluster should be configured, whereas
-specifying `openshift_hosted_registry_storage_glusterfs_ips` indicates wanting
+host group is not allowed. Specifying a `glusterfs_registry` host group
+indicates that a new GlusterFS cluster should be configured, whereas
+specifying `openshift_hosted_registry_storage_glusterfs_ips` indicates wanting
to use a pre-configured GlusterFS cluster for the registry storage.
_
@@ -53,7 +53,6 @@ _
Dependencies
------------
-* openshift_hosted_facts
* openshift_persistent_volumes
Example Playbook
diff --git a/roles/openshift_hosted/defaults/main.yml b/roles/openshift_hosted/defaults/main.yml
index e70c0c420..b6501d288 100644
--- a/roles/openshift_hosted/defaults/main.yml
+++ b/roles/openshift_hosted/defaults/main.yml
@@ -27,6 +27,9 @@ openshift_cluster_domain: 'cluster.local'
r_openshift_hosted_router_firewall_enabled: "{{ os_firewall_enabled | default(True) }}"
r_openshift_hosted_router_use_firewalld: "{{ os_firewall_use_firewalld | default(False) }}"
+openshift_hosted_router_selector: "{{ openshift_router_selector | default(openshift_hosted_infra_selector) }}"
+openshift_hosted_router_namespace: 'default'
+
openshift_hosted_router_wait: "{{ not (openshift_master_bootstrap_enabled | default(False)) }}"
openshift_hosted_router_edits:
@@ -40,13 +43,14 @@ openshift_hosted_router_edits:
value: 21600
action: put
+openshift_hosted_router_registryurl: "{{ openshift_hosted_images_dict[openshift_deployment_type] }}"
openshift_hosted_routers:
- name: router
replicas: "{{ replicas | default(1) }}"
namespace: default
serviceaccount: router
selector: "{{ openshift_hosted_router_selector | default(None) }}"
- images: "{{ openshift_hosted_router_image | default(None) }}"
+ images: "{{ openshift_hosted_router_registryurl }}"
edits: "{{ openshift_hosted_router_edits }}"
stats_port: 1936
ports:
@@ -64,6 +68,11 @@ r_openshift_hosted_router_os_firewall_allow: []
# Registry #
############
+openshift_hosted_registry_selector: "{{ openshift_registry_selector | default(openshift_hosted_infra_selector) }}"
+penshift_hosted_registry_registryurl: "{{ openshift_hosted_images_dict[openshift_deployment_type] }}"
+openshift_hosted_registry_routecertificates: {}
+openshift_hosted_registry_routetermination: "passthrough"
+
r_openshift_hosted_registry_firewall_enabled: "{{ os_firewall_enabled | default(True) }}"
r_openshift_hosted_registry_use_firewalld: "{{ os_firewall_use_firewalld | default(False) }}"
diff --git a/roles/openshift_hosted/filter_plugins/openshift_hosted_filters.py b/roles/openshift_hosted/filter_plugins/openshift_hosted_filters.py
index 7f41529ac..003ce5f9e 100644
--- a/roles/openshift_hosted/filter_plugins/openshift_hosted_filters.py
+++ b/roles/openshift_hosted/filter_plugins/openshift_hosted_filters.py
@@ -12,7 +12,7 @@ class FilterModule(object):
def get_router_replicas(replicas=None, router_nodes=None):
''' This function will return the number of replicas
based on the results from the defined
- openshift.hosted.router.replicas OR
+ openshift_hosted_router_replicas OR
the query from oc_obj on openshift nodes with a selector OR
default to 1
diff --git a/roles/openshift_hosted/meta/main.yml b/roles/openshift_hosted/meta/main.yml
index 1d70ef7eb..ac9e241a5 100644
--- a/roles/openshift_hosted/meta/main.yml
+++ b/roles/openshift_hosted/meta/main.yml
@@ -12,6 +12,6 @@ galaxy_info:
categories:
- cloud
dependencies:
-- role: openshift_hosted_facts
+- role: openshift_facts
- role: lib_openshift
- role: lib_os_firewall
diff --git a/roles/openshift_hosted/tasks/registry.yml b/roles/openshift_hosted/tasks/registry.yml
index e2e06594b..429f0c514 100644
--- a/roles/openshift_hosted/tasks/registry.yml
+++ b/roles/openshift_hosted/tasks/registry.yml
@@ -6,20 +6,20 @@
check_mode: no
- name: setup firewall
- include: firewall.yml
+ import_tasks: firewall.yml
vars:
l_openshift_hosted_firewall_enabled: "{{ r_openshift_hosted_registry_firewall_enabled }}"
l_openshift_hosted_use_firewalld: "{{ r_openshift_hosted_registry_use_firewalld }}"
l_openshift_hosted_fw_allow: "{{ r_openshift_hosted_registry_os_firewall_allow }}"
l_openshift_hosted_fw_deny: "{{ r_openshift_hosted_registry_os_firewall_deny }}"
-- when: openshift.hosted.registry.replicas | default(none) is none
+- when: openshift_hosted_registry_replicas | default(none) is none
block:
- name: Retrieve list of openshift nodes matching registry selector
oc_obj:
state: list
kind: node
- selector: "{{ openshift.hosted.registry.selector | default(omit) }}"
+ selector: "{{ openshift_hosted_registry_selector }}"
register: registry_nodes
- name: set_fact l_node_count to number of nodes matching registry selector
@@ -39,16 +39,13 @@
# just 1:
- name: set_fact l_default_replicas when l_node_count > 0
set_fact:
- l_default_replicas: "{{ l_node_count if openshift.hosted.registry.storage.kind | default(none) is not none else 1 }}"
+ l_default_replicas: "{{ l_node_count if openshift_hosted_registry_storage_kind | default(none) is not none else 1 }}"
when: l_node_count | int > 0
- name: set openshift_hosted facts
set_fact:
- openshift_hosted_registry_replicas: "{{ openshift.hosted.registry.replicas | default(l_default_replicas) }}"
- openshift_hosted_registry_namespace: "{{ openshift.hosted.registry.namespace | default('default') }}"
- openshift_hosted_registry_selector: "{{ openshift.hosted.registry.selector }}"
- openshift_hosted_registry_images: "{{ openshift.hosted.registry.registryurl | default('openshift3/ose-${component}:${version}')}}"
- openshift_hosted_registry_storage_glusterfs_ips: "{%- set gluster_ips = [] %}{% if groups.glusterfs_registry is defined %}{% for node in groups.glusterfs_registry %}{%- set _ = gluster_ips.append(hostvars[node].glusterfs_ip | default(hostvars[node].openshift.common.ip)) %}{% endfor %}{{ gluster_ips }}{% elif groups.glusterfs is defined %}{% for node in groups.glusterfs %}{%- set _ = gluster_ips.append(hostvars[node].glusterfs_ip | default(hostvars[node].openshift.common.ip)) %}{% endfor %}{{ gluster_ips }}{% else %}{{ openshift.hosted.registry.storage.glusterfs.ips }}{% endif %}"
+ # This determines the gluster_ips to use for the registry by looping over the glusterfs_registry group
+ openshift_hosted_registry_storage_glusterfs_ips: "{%- set gluster_ips = [] %}{% if groups.glusterfs_registry is defined %}{% for node in groups.glusterfs_registry %}{%- set _ = gluster_ips.append(hostvars[node].glusterfs_ip | default(hostvars[node].openshift.common.ip)) %}{% endfor %}{{ gluster_ips }}{% elif groups.glusterfs is defined %}{% for node in groups.glusterfs %}{%- set _ = gluster_ips.append(hostvars[node].glusterfs_ip | default(hostvars[node].openshift.common.ip)) %}{% endfor %}{{ gluster_ips }}{% else %}{{ openshift_hosted_registry_storage_glusterfs_ips }}{% endif %}"
- name: Update registry environment variables when pushing via dns
set_fact:
@@ -97,16 +94,14 @@
service_type: ClusterIP
clusterip: '{{ openshift_hosted_registry_clusterip | default(omit) }}'
-- include: secure.yml
- static: no
+- include_tasks: secure.yml
run_once: true
when:
- not (openshift_docker_hosted_registry_insecure | default(False)) | bool
-- include: storage/object_storage.yml
- static: no
+- include_tasks: storage/object_storage.yml
when:
- - openshift.hosted.registry.storage.kind | default(none) == 'object'
+ - openshift_hosted_registry_storage_kind | default(none) == 'object'
- name: Update openshift_hosted facts for persistent volumes
set_fact:
@@ -115,23 +110,23 @@
pvc_volume_mounts:
- name: registry-storage
type: persistentVolumeClaim
- claim_name: "{{ openshift.hosted.registry.storage.volume.name }}-claim"
+ claim_name: "{{ openshift_hosted_registry_storage_volume_name }}-claim"
when:
- - openshift.hosted.registry.storage.kind | default(none) in ['nfs', 'openstack', 'glusterfs']
+ - openshift_hosted_registry_storage_kind | default(none) in ['nfs', 'openstack', 'glusterfs']
-- include: storage/glusterfs_endpoints.yml
+- include_tasks: storage/glusterfs_endpoints.yml
when:
- openshift_hosted_registry_storage_glusterfs_ips|length > 0
- - openshift.hosted.registry.storage.kind | default(none) in ['glusterfs']
+ - openshift_hosted_registry_storage_kind | default(none) in ['glusterfs']
- name: Create OpenShift registry
oc_adm_registry:
name: "{{ openshift_hosted_registry_name }}"
namespace: "{{ openshift_hosted_registry_namespace }}"
selector: "{{ openshift_hosted_registry_selector }}"
- replicas: "{{ openshift_hosted_registry_replicas }}"
+ replicas: "{{ openshift_hosted_registry_replicas | default(l_default_replicas) }}"
service_account: "{{ openshift_hosted_registry_serviceaccount }}"
- images: "{{ openshift_hosted_registry_images }}"
+ images: "{{ openshift_hosted_registry_registryurl }}"
env_vars: "{{ openshift_hosted_registry_env_vars }}"
volume_mounts: "{{ openshift_hosted_registry_volumes }}"
edits: "{{ openshift_hosted_registry_edits }}"
@@ -144,14 +139,14 @@
namespace: "{{ openshift_hosted_registry_namespace }}"
- name: Wait for pod (Registry)
- include: wait_for_pod.yml
+ include_tasks: wait_for_pod.yml
vars:
l_openshift_hosted_wait_for_pod: "{{ openshift_hosted_registry_wait }}"
l_openshift_hosted_wfp_items: "{{ r_openshift_hosted_registry_list }}"
-- include: storage/glusterfs.yml
+- include_tasks: storage/glusterfs.yml
when:
- - openshift.hosted.registry.storage.kind | default(none) == 'glusterfs' or openshift.hosted.registry.storage.glusterfs.swap
+ - openshift_hosted_registry_storage_kind | default(none) == 'glusterfs' or openshift_hosted_registry_storage_glusterfs_swap
- name: Delete temp directory
file:
diff --git a/roles/openshift_hosted/tasks/router.yml b/roles/openshift_hosted/tasks/router.yml
index dd7053656..4e9219477 100644
--- a/roles/openshift_hosted/tasks/router.yml
+++ b/roles/openshift_hosted/tasks/router.yml
@@ -1,6 +1,6 @@
---
- name: setup firewall
- include: firewall.yml
+ import_tasks: firewall.yml
vars:
l_openshift_hosted_firewall_enabled: "{{ r_openshift_hosted_router_firewall_enabled }}"
l_openshift_hosted_use_firewalld: "{{ r_openshift_hosted_router_use_firewalld }}"
@@ -11,16 +11,14 @@
oc_obj:
state: list
kind: node
- namespace: "{{ openshift.hosted.router.namespace | default('default') }}"
- selector: "{{ openshift.hosted.router.selector | default(omit) }}"
+ namespace: "{{ openshift_hosted_router_namespace }}"
+ selector: "{{ openshift_hosted_router_selector }}"
register: router_nodes
- when: openshift.hosted.router.replicas | default(none) is none
+ when: openshift_hosted_router_replicas | default(none) is none
- name: set_fact replicas
set_fact:
- replicas: "{{ openshift.hosted.router.replicas|default(None) | get_router_replicas(router_nodes) }}"
- openshift_hosted_router_selector: "{{ openshift.hosted.router.selector | default(None) }}"
- openshift_hosted_router_image: "{{ openshift.hosted.router.registryurl }}"
+ replicas: "{{ openshift_hosted_router_replicas | default(None) | get_router_replicas(router_nodes) }}"
- name: Get the certificate contents for router
copy:
@@ -42,8 +40,8 @@
signer_key: "{{ openshift_master_config_dir }}/ca.key"
signer_serial: "{{ openshift_master_config_dir }}/ca.serial.txt"
hostnames:
- - "{{ openshift_master_default_subdomain | default('router.default.svc.cluster.local') }}"
- - "*.{{ openshift_master_default_subdomain | default('router.default.svc.cluster.local') }}"
+ - "{{ openshift_master_default_subdomain }}"
+ - "*.{{ openshift_master_default_subdomain }}"
cert: "{{ openshift_master_config_dir ~ '/openshift-router.crt' }}"
key: "{{ openshift_master_config_dir ~ '/openshift-router.key' }}"
with_items: "{{ openshift_hosted_routers }}"
@@ -102,7 +100,7 @@
with_items: "{{ openshift_hosted_routers }}"
- name: Wait for pod (Routers)
- include: wait_for_pod.yml
+ include_tasks: wait_for_pod.yml
vars:
l_openshift_hosted_wait_for_pod: "{{ openshift_hosted_router_wait }}"
l_openshift_hosted_wfp_items: "{{ openshift_hosted_routers }}"
diff --git a/roles/openshift_hosted/tasks/secure.yml b/roles/openshift_hosted/tasks/secure.yml
index 174bc39a4..378ae32dc 100644
--- a/roles/openshift_hosted/tasks/secure.yml
+++ b/roles/openshift_hosted/tasks/secure.yml
@@ -1,18 +1,10 @@
---
-- name: Configure facts for docker-registry
- set_fact:
- openshift_hosted_registry_routecertificates: "{{ ('routecertificates' in openshift.hosted.registry.keys()) | ternary(openshift_hosted_registry_routecertificates, {}) }}"
- openshift_hosted_registry_routehost: "{{ ('routehost' in openshift.hosted.registry.keys()) | ternary(openshift.hosted.registry.routehost, False) }}"
- openshift_hosted_registry_routetermination: "{{ ('routetermination' in openshift.hosted.registry.keys()) | ternary(openshift.hosted.registry.routetermination, 'passthrough') }}"
-
- name: Include reencrypt route configuration
- include: secure/reencrypt.yml
- static: no
+ include_tasks: secure/reencrypt.yml
when: openshift_hosted_registry_routetermination == 'reencrypt'
- name: Include passthrough route configuration
- include: secure/passthrough.yml
- static: no
+ include_tasks: secure/passthrough.yml
when: openshift_hosted_registry_routetermination == 'passthrough'
- name: Fetch the docker-registry route
@@ -39,7 +31,7 @@
- "{{ docker_registry_route.results[0].spec.host }}"
- "{{ openshift_hosted_registry_name }}.default.svc"
- "{{ openshift_hosted_registry_name }}.default.svc.{{ openshift_cluster_domain }}"
- - "{{ openshift_hosted_registry_routehost }}"
+ - "{{ openshift_hosted_registry_routehost | default(omit) }}"
cert: "{{ docker_registry_cert_path }}"
key: "{{ docker_registry_key_path }}"
expire_days: "{{ openshift_hosted_registry_cert_expire_days }}"
diff --git a/roles/openshift_hosted/tasks/storage/glusterfs.yml b/roles/openshift_hosted/tasks/storage/glusterfs.yml
index 7cae67baa..18b2edcc6 100644
--- a/roles/openshift_hosted/tasks/storage/glusterfs.yml
+++ b/roles/openshift_hosted/tasks/storage/glusterfs.yml
@@ -17,7 +17,7 @@
until:
- "registry_pods.results.results[0]['items'] | count > 0"
# There must be as many matching pods with 'Ready' status True as there are expected replicas
- - "registry_pods.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == openshift_hosted_registry_replicas | int"
+ - "registry_pods.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == openshift_hosted_registry_replicas | default(l_default_replicas) | int"
delay: 10
retries: "{{ (600 / 10) | int }}"
@@ -35,7 +35,7 @@
mount:
state: mounted
fstype: glusterfs
- src: "{% if 'glusterfs_registry' in groups %}{% set node = groups.glusterfs_registry[0] %}{% elif 'glusterfs' in groups %}{% set node = groups.glusterfs[0] %}{% endif %}{% if openshift_hosted_registry_storage_glusterfs_ips is defined and openshift_hosted_registry_storage_glusterfs_ips|length > 0 %}{{ openshift_hosted_registry_storage_glusterfs_ips[0] }}{% elif 'glusterfs_hostname' in hostvars[node] %}{{ hostvars[node].glusterfs_hostname }}{% elif 'openshift' in hostvars[node] %}{{ hostvars[node].openshift.node.nodename }}{% else %}{{ node }}{% endif %}:/{{ openshift.hosted.registry.storage.glusterfs.path }}"
+ src: "{% if 'glusterfs_registry' in groups %}{% set node = groups.glusterfs_registry[0] %}{% elif 'glusterfs' in groups %}{% set node = groups.glusterfs[0] %}{% endif %}{% if openshift_hosted_registry_storage_glusterfs_ips is defined and openshift_hosted_registry_storage_glusterfs_ips|length > 0 %}{{ openshift_hosted_registry_storage_glusterfs_ips[0] }}{% elif 'glusterfs_hostname' in hostvars[node] %}{{ hostvars[node].glusterfs_hostname }}{% elif 'openshift' in hostvars[node] %}{{ hostvars[node].openshift.node.nodename }}{% else %}{{ node }}{% endif %}:/{{ openshift_hosted_registry_storage_glusterfs_path }}"
name: "{{ mktemp.stdout }}"
- name: Set registry volume permissions
@@ -60,7 +60,7 @@
- name: Copy current registry contents to new GlusterFS volume
command: "oc rsync {{ registry_pod_name }}:/registry/ {{ mktemp.stdout }}/"
- when: openshift.hosted.registry.storage.glusterfs.swapcopy
+ when: openshift_hosted_registry_storage_glusterfs_swapcopy
- name: Swap new GlusterFS registry volume
oc_volume:
@@ -68,7 +68,7 @@
name: "{{ openshift_hosted_registry_name }}"
vol_name: registry-storage
mount_type: pvc
- claim_name: "{{ openshift.hosted.registry.storage.volume.name }}-glusterfs-claim"
+ claim_name: "{{ openshift_hosted_registry_storage_volume_name }}-glusterfs-claim"
- name: Deactivate registry maintenance mode
oc_env:
@@ -77,7 +77,7 @@
state: absent
env_vars:
- REGISTRY_STORAGE_MAINTENANCE_READONLY_ENABLED: 'true'
- when: openshift.hosted.registry.storage.glusterfs.swap
+ when: openshift_hosted_registry_storage_glusterfs_swap
- name: Unmount registry volume and clean up mount point/fstab
mount:
diff --git a/roles/openshift_hosted/tasks/storage/glusterfs_endpoints.yml b/roles/openshift_hosted/tasks/storage/glusterfs_endpoints.yml
index 0f4381748..bd7181c17 100644
--- a/roles/openshift_hosted/tasks/storage/glusterfs_endpoints.yml
+++ b/roles/openshift_hosted/tasks/storage/glusterfs_endpoints.yml
@@ -10,7 +10,7 @@
dest: "{{ mktempHosted.stdout }}/glusterfs-registry-service.yml"
- name: Create GlusterFS registry service and endpoint
- command: "{{ openshift.common.client_binary }} apply -f {{ item }} -n {{ openshift.hosted.registry.namespace | default('default') }}"
+ command: "{{ openshift.common.client_binary }} apply -f {{ item }} -n {{ openshift_hosted_registry_namespace | default('default') }}"
with_items:
- "{{ mktempHosted.stdout }}/glusterfs-registry-service.yml"
- "{{ mktempHosted.stdout }}/glusterfs-registry-endpoints.yml"
diff --git a/roles/openshift_hosted/tasks/storage/object_storage.yml b/roles/openshift_hosted/tasks/storage/object_storage.yml
index 8553a8098..a8c26fb51 100644
--- a/roles/openshift_hosted/tasks/storage/object_storage.yml
+++ b/roles/openshift_hosted/tasks/storage/object_storage.yml
@@ -1,6 +1,6 @@
---
-- include: s3.yml
- when: openshift.hosted.registry.storage.provider == 's3'
+- include_tasks: s3.yml
+ when: openshift_hosted_registry_storage_provider == 's3'
- name: Ensure the registry secret exists
oc_secret:
diff --git a/roles/openshift_hosted/tasks/storage/s3.yml b/roles/openshift_hosted/tasks/storage/s3.yml
index 8e905d905..4c100ee4e 100644
--- a/roles/openshift_hosted/tasks/storage/s3.yml
+++ b/roles/openshift_hosted/tasks/storage/s3.yml
@@ -2,8 +2,8 @@
- name: Assert that S3 variables are provided for registry_config template
assert:
that:
- - openshift.hosted.registry.storage.s3.bucket | default(none) is not none
- - openshift.hosted.registry.storage.s3.bucket | default(none) is not none
+ - openshift_hosted_registry_storage_s3_bucket | default(none) is not none
+ - openshift_hosted_registry_storage_s3_region | default(none) is not none
msg: |
When using S3 storage, the following variables are required:
openshift_hosted_registry_storage_s3_bucket
diff --git a/roles/openshift_hosted/templates/v3.6/glusterfs-registry-endpoints.yml.j2 b/roles/openshift_hosted/templates/v3.6/glusterfs-registry-endpoints.yml.j2
index 607d25533..3c874d910 100644
--- a/roles/openshift_hosted/templates/v3.6/glusterfs-registry-endpoints.yml.j2
+++ b/roles/openshift_hosted/templates/v3.6/glusterfs-registry-endpoints.yml.j2
@@ -2,7 +2,7 @@
apiVersion: v1
kind: Endpoints
metadata:
- name: {{ openshift.hosted.registry.storage.glusterfs.endpoints }}
+ name: {{ openshift_hosted_registry_storage_glusterfs_endpoints }}
subsets:
- addresses:
{% for ip in openshift_hosted_registry_storage_glusterfs_ips %}
diff --git a/roles/openshift_hosted/templates/v3.6/glusterfs-registry-service.yml.j2 b/roles/openshift_hosted/templates/v3.6/glusterfs-registry-service.yml.j2
index 452c7c3e1..f18c94a4f 100644
--- a/roles/openshift_hosted/templates/v3.6/glusterfs-registry-service.yml.j2
+++ b/roles/openshift_hosted/templates/v3.6/glusterfs-registry-service.yml.j2
@@ -2,7 +2,7 @@
apiVersion: v1
kind: Service
metadata:
- name: {{ openshift.hosted.registry.storage.glusterfs.endpoints }}
+ name: {{ openshift_hosted_registry_storage_glusterfs_endpoints }}
spec:
ports:
- port: 1
diff --git a/roles/openshift_hosted/templates/v3.7/glusterfs-registry-endpoints.yml.j2 b/roles/openshift_hosted/templates/v3.7/glusterfs-registry-endpoints.yml.j2
index 607d25533..3c874d910 100644
--- a/roles/openshift_hosted/templates/v3.7/glusterfs-registry-endpoints.yml.j2
+++ b/roles/openshift_hosted/templates/v3.7/glusterfs-registry-endpoints.yml.j2
@@ -2,7 +2,7 @@
apiVersion: v1
kind: Endpoints
metadata:
- name: {{ openshift.hosted.registry.storage.glusterfs.endpoints }}
+ name: {{ openshift_hosted_registry_storage_glusterfs_endpoints }}
subsets:
- addresses:
{% for ip in openshift_hosted_registry_storage_glusterfs_ips %}
diff --git a/roles/openshift_hosted/templates/v3.7/glusterfs-registry-service.yml.j2 b/roles/openshift_hosted/templates/v3.7/glusterfs-registry-service.yml.j2
index 452c7c3e1..f18c94a4f 100644
--- a/roles/openshift_hosted/templates/v3.7/glusterfs-registry-service.yml.j2
+++ b/roles/openshift_hosted/templates/v3.7/glusterfs-registry-service.yml.j2
@@ -2,7 +2,7 @@
apiVersion: v1
kind: Service
metadata:
- name: {{ openshift.hosted.registry.storage.glusterfs.endpoints }}
+ name: {{ openshift_hosted_registry_storage_glusterfs_endpoints }}
spec:
ports:
- port: 1
diff --git a/roles/openshift_hosted/templates/v3.8/glusterfs-registry-endpoints.yml.j2 b/roles/openshift_hosted/templates/v3.8/glusterfs-registry-endpoints.yml.j2
new file mode 100644
index 000000000..3c874d910
--- /dev/null
+++ b/roles/openshift_hosted/templates/v3.8/glusterfs-registry-endpoints.yml.j2
@@ -0,0 +1,12 @@
+---
+apiVersion: v1
+kind: Endpoints
+metadata:
+ name: {{ openshift_hosted_registry_storage_glusterfs_endpoints }}
+subsets:
+- addresses:
+{% for ip in openshift_hosted_registry_storage_glusterfs_ips %}
+ - ip: {{ ip }}
+{% endfor %}
+ ports:
+ - port: 1
diff --git a/roles/openshift_hosted/templates/v3.8/glusterfs-registry-service.yml.j2 b/roles/openshift_hosted/templates/v3.8/glusterfs-registry-service.yml.j2
new file mode 100644
index 000000000..f18c94a4f
--- /dev/null
+++ b/roles/openshift_hosted/templates/v3.8/glusterfs-registry-service.yml.j2
@@ -0,0 +1,10 @@
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ openshift_hosted_registry_storage_glusterfs_endpoints }}
+spec:
+ ports:
+ - port: 1
+status:
+ loadBalancer: {}
diff --git a/roles/openshift_hosted/templates/v3.9/glusterfs-registry-endpoints.yml.j2 b/roles/openshift_hosted/templates/v3.9/glusterfs-registry-endpoints.yml.j2
new file mode 100644
index 000000000..3c874d910
--- /dev/null
+++ b/roles/openshift_hosted/templates/v3.9/glusterfs-registry-endpoints.yml.j2
@@ -0,0 +1,12 @@
+---
+apiVersion: v1
+kind: Endpoints
+metadata:
+ name: {{ openshift_hosted_registry_storage_glusterfs_endpoints }}
+subsets:
+- addresses:
+{% for ip in openshift_hosted_registry_storage_glusterfs_ips %}
+ - ip: {{ ip }}
+{% endfor %}
+ ports:
+ - port: 1
diff --git a/roles/openshift_hosted/templates/v3.9/glusterfs-registry-service.yml.j2 b/roles/openshift_hosted/templates/v3.9/glusterfs-registry-service.yml.j2
new file mode 100644
index 000000000..f18c94a4f
--- /dev/null
+++ b/roles/openshift_hosted/templates/v3.9/glusterfs-registry-service.yml.j2
@@ -0,0 +1,10 @@
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ openshift_hosted_registry_storage_glusterfs_endpoints }}
+spec:
+ ports:
+ - port: 1
+status:
+ loadBalancer: {}
diff --git a/roles/openshift_hosted_facts/meta/main.yml b/roles/openshift_hosted_facts/meta/main.yml
deleted file mode 100644
index dd2de07bc..000000000
--- a/roles/openshift_hosted_facts/meta/main.yml
+++ /dev/null
@@ -1,15 +0,0 @@
----
-galaxy_info:
- author: Andrew Butcher
- description: OpenShift Hosted Facts
- company: Red Hat, Inc.
- license: Apache License, Version 2.0
- min_ansible_version: 1.9
- platforms:
- - name: EL
- versions:
- - 7
- categories:
- - cloud
-dependencies:
-- role: openshift_facts
diff --git a/roles/openshift_hosted_facts/tasks/main.yml b/roles/openshift_hosted_facts/tasks/main.yml
deleted file mode 100644
index 8fc70cecb..000000000
--- a/roles/openshift_hosted_facts/tasks/main.yml
+++ /dev/null
@@ -1,19 +0,0 @@
----
-# openshift_*_selector variables have been deprecated in favor of
-# openshift_hosted_*_selector variables.
-- set_fact:
- openshift_hosted_router_selector: "{{ openshift_router_selector | default(openshift_hosted_infra_selector) }}"
- when: openshift_hosted_router_selector is not defined and openshift_hosted_infra_selector is defined
-- set_fact:
- openshift_hosted_registry_selector: "{{ openshift_registry_selector | default(openshift_hosted_infra_selector) }}"
- when: openshift_hosted_registry_selector is not defined and openshift_hosted_infra_selector is defined
-
-- name: Set hosted facts
- openshift_facts:
- role: "{{ item }}"
- openshift_env: "{{ hostvars
- | oo_merge_hostvars(vars, inventory_hostname)
- | oo_openshift_env }}"
- openshift_env_structures:
- - 'openshift.hosted.router.*'
- with_items: [hosted, logging, loggingops, metrics, prometheus]
diff --git a/roles/openshift_hosted_metrics/README.md b/roles/openshift_hosted_metrics/README.md
deleted file mode 100644
index c2af3c494..000000000
--- a/roles/openshift_hosted_metrics/README.md
+++ /dev/null
@@ -1,54 +0,0 @@
-OpenShift Metrics with Hawkular
-====================
-
-OpenShift Metrics Installation
-
-Requirements
-------------
-
-* Ansible 2.2
-* It requires subdomain fqdn to be set.
-* If persistence is enabled, then it also requires NFS.
-
-Role Variables
---------------
-
-From this role:
-
-| Name | Default value | |
-|-------------------------------------------------|-----------------------|-------------------------------------------------------------|
-| openshift_hosted_metrics_deploy | `False` | If metrics should be deployed |
-| openshift_hosted_metrics_public_url | null | Hawkular metrics public url |
-| openshift_hosted_metrics_storage_nfs_directory | `/exports` | Root export directory. |
-| openshift_hosted_metrics_storage_volume_name | `metrics` | Metrics volume within openshift_hosted_metrics_volume_dir |
-| openshift_hosted_metrics_storage_volume_size | `10Gi` | Metrics volume size |
-| openshift_hosted_metrics_storage_nfs_options | `*(rw,root_squash)` | NFS options for configured exports. |
-| openshift_hosted_metrics_duration | `7` | Metrics query duration |
-| openshift_hosted_metrics_resolution | `10s` | Metrics resolution |
-
-
-Dependencies
-------------
-openshift_facts
-openshift_examples
-openshift_master_facts
-
-Example Playbook
-----------------
-
-```
-- name: Configure openshift-metrics
- hosts: oo_first_master
- roles:
- - role: openshift_hosted_metrics
-```
-
-License
--------
-
-Apache License, Version 2.0
-
-Author Information
-------------------
-
-Jose David Martín (j.david.nieto@gmail.com)
diff --git a/roles/openshift_hosted_metrics/defaults/main.yml b/roles/openshift_hosted_metrics/defaults/main.yml
deleted file mode 100644
index a01f24df8..000000000
--- a/roles/openshift_hosted_metrics/defaults/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-hosted_base: "{{ openshift.common.config_base if openshift.common.is_containerized | bool else '/usr/share/openshift' }}/hosted"
diff --git a/roles/openshift_hosted_metrics/handlers/main.yml b/roles/openshift_hosted_metrics/handlers/main.yml
deleted file mode 100644
index 074b72942..000000000
--- a/roles/openshift_hosted_metrics/handlers/main.yml
+++ /dev/null
@@ -1,31 +0,0 @@
----
-- name: restart master api
- systemd: name={{ openshift.common.service_type }}-master-api state=restarted
- when: (not (master_api_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native'
- notify: Verify API Server
-
-# We retry the controllers because the API may not be 100% initialized yet.
-- name: restart master controllers
- command: "systemctl restart {{ openshift.common.service_type }}-master-controllers"
- retries: 3
- delay: 5
- register: result
- until: result.rc == 0
- when: (not (master_controllers_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native'
-
-- name: Verify API Server
- # Using curl here since the uri module requires python-httplib2 and
- # wait_for port doesn't provide health information.
- command: >
- curl --silent --tlsv1.2
- --cacert {{ openshift.common.config_base }}/master/ca-bundle.crt
- {{ openshift.master.api_url }}/healthz/ready
- args:
- # Disables the following warning:
- # Consider using get_url or uri module rather than running curl
- warn: no
- register: api_available_output
- until: api_available_output.stdout == 'ok'
- retries: 120
- delay: 1
- changed_when: false
diff --git a/roles/openshift_hosted_metrics/meta/main.yaml b/roles/openshift_hosted_metrics/meta/main.yaml
deleted file mode 100644
index debca3ca6..000000000
--- a/roles/openshift_hosted_metrics/meta/main.yaml
+++ /dev/null
@@ -1,18 +0,0 @@
----
-galaxy_info:
- author: David Martín
- description:
- company:
- license: Apache License, Version 2.0
- min_ansible_version: 2.2
- platforms:
- - name: EL
- versions:
- - 7
- categories:
- - cloud
- - system
-dependencies:
-- { role: openshift_examples }
-- { role: openshift_facts }
-- { role: openshift_master_facts }
diff --git a/roles/openshift_hosted_metrics/tasks/install.yml b/roles/openshift_hosted_metrics/tasks/install.yml
deleted file mode 100644
index 15dd1bd54..000000000
--- a/roles/openshift_hosted_metrics/tasks/install.yml
+++ /dev/null
@@ -1,132 +0,0 @@
----
-
-- name: Test if metrics-deployer service account exists
- command: >
- {{ openshift.common.client_binary }}
- --config={{ openshift_hosted_metrics_kubeconfig }}
- --namespace=openshift-infra
- get serviceaccount metrics-deployer -o json
- register: serviceaccount
- changed_when: false
- failed_when: false
-
-- name: Create metrics-deployer Service Account
- shell: >
- echo {{ metrics_deployer_sa | to_json | quote }} |
- {{ openshift.common.client_binary }}
- --config={{ openshift_hosted_metrics_kubeconfig }}
- --namespace openshift-infra
- create -f -
- when: serviceaccount.rc == 1
-
-- name: Test edit permissions
- command: >
- {{ openshift.common.client_binary }}
- --config={{ openshift_hosted_metrics_kubeconfig }}
- --namespace openshift-infra
- get rolebindings -o jsonpath='{.items[?(@.metadata.name == "edit")].userNames}'
- register: edit_rolebindings
- changed_when: false
-
-- name: Add edit permission to the openshift-infra project to metrics-deployer SA
- command: >
- {{ openshift.common.client_binary }} adm
- --config={{ openshift_hosted_metrics_kubeconfig }}
- --namespace openshift-infra
- policy add-role-to-user edit
- system:serviceaccount:openshift-infra:metrics-deployer
- when: "'system:serviceaccount:openshift-infra:metrics-deployer' not in edit_rolebindings.stdout"
-
-- name: Test hawkular view permissions
- command: >
- {{ openshift.common.client_binary }}
- --config={{ openshift_hosted_metrics_kubeconfig }}
- --namespace openshift-infra
- get rolebindings -o jsonpath='{.items[?(@.metadata.name == "view")].userNames}'
- register: view_rolebindings
- changed_when: false
-
-- name: Add view permissions to hawkular SA
- command: >
- {{ openshift.common.client_binary }} adm
- --config={{ openshift_hosted_metrics_kubeconfig }}
- --namespace openshift-infra
- policy add-role-to-user view
- system:serviceaccount:openshift-infra:hawkular
- when: "'system:serviceaccount:openshift-infra:hawkular' not in view_rolebindings"
-
-- name: Test cluster-reader permissions
- command: >
- {{ openshift.common.client_binary }}
- --config={{ openshift_hosted_metrics_kubeconfig }}
- --namespace openshift-infra
- get clusterrolebindings -o jsonpath='{.items[?(@.metadata.name == "cluster-reader")].userNames}'
- register: cluster_reader_clusterrolebindings
- changed_when: false
-
-- name: Add cluster-reader permission to the openshift-infra project to heapster SA
- command: >
- {{ openshift.common.client_binary }} adm
- --config={{ openshift_hosted_metrics_kubeconfig }}
- --namespace openshift-infra
- policy add-cluster-role-to-user cluster-reader
- system:serviceaccount:openshift-infra:heapster
- when: "'system:serviceaccount:openshift-infra:heapster' not in cluster_reader_clusterrolebindings.stdout"
-
-- name: Create metrics-deployer secret
- command: >
- {{ openshift.common.client_binary }}
- --config={{ openshift_hosted_metrics_kubeconfig }}
- --namespace openshift-infra
- secrets new metrics-deployer nothing=/dev/null
- register: metrics_deployer_secret
- changed_when: metrics_deployer_secret.rc == 0
- failed_when: metrics_deployer_secret.rc == 1 and 'already exists' not in metrics_deployer_secret.stderr
-
-# TODO: extend this to allow user passed in certs or generating cert with
-# OpenShift CA
-- name: Build metrics deployer command
- set_fact:
- deployer_cmd: "{{ openshift.common.client_binary }} process -f \
- {{ hosted_base }}/metrics-deployer.yaml -v \
- HAWKULAR_METRICS_HOSTNAME={{ g_metrics_hostname }} \
- -v USE_PERSISTENT_STORAGE={{metrics_persistence | string | lower }} \
- -v DYNAMICALLY_PROVISION_STORAGE={{metrics_dynamic_vol | string | lower }} \
- -v METRIC_DURATION={{ openshift.hosted.metrics.duration }} \
- -v METRIC_RESOLUTION={{ openshift.hosted.metrics.resolution }}
- {{ image_prefix }} \
- {{ image_version }} \
- -v MODE={{ deployment_mode }} \
- | {{ openshift.common.client_binary }} --namespace openshift-infra \
- --config={{ openshift_hosted_metrics_kubeconfig }} \
- create -o name -f -"
-
-- name: Deploy Metrics
- shell: "{{ deployer_cmd }}"
- register: deploy_metrics
- failed_when: "'already exists' not in deploy_metrics.stderr and deploy_metrics.rc != 0"
- changed_when: deploy_metrics.rc == 0
-
-- set_fact:
- deployer_pod: "{{ deploy_metrics.stdout[1:2] }}"
-
-# TODO: re-enable this once the metrics deployer validation issue is fixed
-# when using dynamically provisioned volumes
-- name: "Wait for image pull and deployer pod"
- shell: >
- {{ openshift.common.client_binary }}
- --namespace openshift-infra
- --config={{ openshift_hosted_metrics_kubeconfig }}
- get {{ deploy_metrics.stdout }}
- register: deploy_result
- until: "{{ 'Completed' in deploy_result.stdout }}"
- failed_when: False
- retries: 60
- delay: 10
-
-- name: Configure master for metrics
- modify_yaml:
- dest: "{{ openshift.common.config_base }}/master/master-config.yaml"
- yaml_key: assetConfig.metricsPublicURL
- yaml_value: "{{ openshift_hosted_metrics_deploy_url }}"
- notify: restart master
diff --git a/roles/openshift_hosted_metrics/tasks/main.yaml b/roles/openshift_hosted_metrics/tasks/main.yaml
deleted file mode 100644
index 5ce8aa92b..000000000
--- a/roles/openshift_hosted_metrics/tasks/main.yaml
+++ /dev/null
@@ -1,75 +0,0 @@
----
-- name: Create temp directory for kubeconfig
- command: mktemp -d /tmp/openshift-ansible-XXXXXX
- register: mktemp
- changed_when: False
-
-- name: Record kubeconfig tmp dir
- set_fact:
- openshift_hosted_metrics_kubeconfig: "{{ mktemp.stdout }}/admin.kubeconfig"
-
-- name: Copy the admin client config(s)
- command: >
- cp {{ openshift_master_config_dir }}/admin.kubeconfig {{ openshift_hosted_metrics_kubeconfig }}
- changed_when: False
-
-- name: Set hosted metrics facts
- openshift_facts:
- role: hosted
- openshift_env: "{{ hostvars
- | oo_merge_hostvars(vars, inventory_hostname)
- | oo_openshift_env }}"
- openshift_env_structures:
- - 'openshift.hosted.metrics.*'
-
-- set_fact:
- metrics_persistence: "{{ openshift.hosted.metrics.storage_kind | default(none) is not none }}"
- metrics_dynamic_vol: "{{ openshift.hosted.metrics.storage_kind | default(none) == 'dynamic' }}"
- metrics_template_dir: "{{ openshift.common.config_base if openshift.common.is_containerized | bool else '/usr/share/openshift' }}/examples/infrastructure-templates/{{ 'origin' if deployment_type == 'origin' else 'enterprise' }}"
- image_prefix: "{{ '-v IMAGE_PREFIX=' ~ openshift.hosted.metrics.deployer.prefix if 'prefix' in openshift.hosted.metrics.deployer else '' }}"
- image_version: "{{ '-v IMAGE_VERSION=' ~ openshift.hosted.metrics.deployer.version if 'version' in openshift.hosted.metrics.deployer else '' }}"
-
-
-- name: Check for existing metrics pods
- shell: >
- {{ openshift.common.client_binary }}
- --config={{ openshift_hosted_metrics_kubeconfig }}
- --namespace openshift-infra
- get pods -l {{ item }} | grep -q Running
- register: metrics_pods_status
- with_items:
- - metrics-infra=hawkular-metrics
- - metrics-infra=heapster
- - metrics-infra=hawkular-cassandra
- failed_when: false
- changed_when: false
-
-- name: Check for previous deployer
- shell: >
- {{ openshift.common.client_binary }}
- --config={{ openshift_hosted_metrics_kubeconfig }}
- --namespace openshift-infra
- get pods -l metrics-infra=deployer --sort-by='{.metadata.creationTimestamp}' | tail -1 | grep metrics-deployer-
- register: metrics_deployer_status
- failed_when: false
- changed_when: false
-
-- name: Record current deployment status
- set_fact:
- greenfield: "{{ not metrics_deployer_status.rc == 0 }}"
- failed_error: "{{ True if 'Error' in metrics_deployer_status.stdout else False }}"
- metrics_running: "{{ metrics_pods_status.results | oo_collect(attribute='rc') == [0,0,0] }}"
-
-- name: Set deployment mode
- set_fact:
- deployment_mode: "{{ 'refresh' if (failed_error | bool or metrics_upgrade | bool) else 'deploy' }}"
-
-# TODO: handle non greenfield deployments in the future
-- include: install.yml
- when: greenfield
-
-- name: Delete temp directory
- file:
- name: "{{ mktemp.stdout }}"
- state: absent
- changed_when: False
diff --git a/roles/openshift_hosted_metrics/vars/main.yaml b/roles/openshift_hosted_metrics/vars/main.yaml
deleted file mode 100644
index 6c207d6ac..000000000
--- a/roles/openshift_hosted_metrics/vars/main.yaml
+++ /dev/null
@@ -1,21 +0,0 @@
----
-hawkular_permission_oc_commands:
- - policy add-role-to-user edit system:serviceaccount:openshift-infra:metrics-deployer -n openshift-infra
- - policy add-cluster-role-to-user cluster-admin system:serviceaccount:openshift-infra:heapster
-
-metrics_deployer_sa:
- apiVersion: v1
- kind: ServiceAccount
- metadata:
- name: metrics-deployer
- secrets:
- - name: metrics-deployer
-
-
-hawkular_tmp_conf: /tmp/hawkular_admin.kubeconfig
-
-hawkular_persistence: "{% if openshift.hosted.metrics.storage.kind != None %}true{% else %}false{% endif %}"
-
-hawkular_type: "{{ 'origin' if deployment_type == 'origin' else 'enterprise' }}"
-
-metrics_upgrade: openshift.hosted.metrics.upgrade | default(False)
diff --git a/roles/openshift_loadbalancer/templates/haproxy.docker.service.j2 b/roles/openshift_loadbalancer/templates/haproxy.docker.service.j2
index 57121447d..0343a7eb0 100644
--- a/roles/openshift_loadbalancer/templates/haproxy.docker.service.j2
+++ b/roles/openshift_loadbalancer/templates/haproxy.docker.service.j2
@@ -5,7 +5,7 @@ PartOf={{ openshift_docker_service_name }}.service
[Service]
ExecStartPre=-/usr/bin/docker rm -f openshift_loadbalancer
-ExecStart=/usr/bin/docker run --rm --name openshift_loadbalancer -p {{ openshift_master_api_port | default(8443) }}:{{ openshift_master_api_port | default(8443) }} -v /etc/haproxy/haproxy.cfg:/etc/haproxy/haproxy.cfg:ro --entrypoint=haproxy {{ openshift.common.router_image }}:{{ openshift_image_tag }} -f /etc/haproxy/haproxy.cfg
+ExecStart=/usr/bin/docker run --rm --name openshift_loadbalancer {% for frontend in openshift_loadbalancer_frontends %} {% for bind in frontend.binds %} -p {{ bind |regex_replace('^[^:]*:(\d+).*$', '\\1') }}:{{ bind |regex_replace('^[^:]*:(\d+).*$', '\\1') }} {% endfor %} {% endfor %} -v /etc/haproxy/haproxy.cfg:/etc/haproxy/haproxy.cfg:ro --entrypoint=haproxy {{ openshift.common.router_image }}:{{ openshift_image_tag }} -f /etc/haproxy/haproxy.cfg
ExecStartPost=/usr/bin/sleep 10
ExecStop=/usr/bin/docker stop openshift_loadbalancer
LimitNOFILE={{ openshift_loadbalancer_limit_nofile | default(100000) }}
diff --git a/roles/openshift_logging/defaults/main.yml b/roles/openshift_logging/defaults/main.yml
index 497c6e0c5..2f1aa061f 100644
--- a/roles/openshift_logging/defaults/main.yml
+++ b/roles/openshift_logging/defaults/main.yml
@@ -28,7 +28,7 @@ openshift_logging_curator_ops_memory_limit: 256Mi
openshift_logging_curator_ops_cpu_request: 100m
openshift_logging_curator_ops_nodeselector: {}
-openshift_logging_kibana_hostname: "{{ 'kibana.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true)) }}"
+openshift_logging_kibana_hostname: "{{ 'kibana.' ~ openshift_master_default_subdomain }}"
openshift_logging_kibana_cpu_limit: null
openshift_logging_kibana_memory_limit: 736Mi
openshift_logging_kibana_cpu_request: 100m
@@ -54,7 +54,7 @@ openshift_logging_kibana_key: ""
#for the public facing kibana certs
openshift_logging_kibana_ca: ""
-openshift_logging_kibana_ops_hostname: "{{ 'kibana-ops.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true)) }}"
+openshift_logging_kibana_ops_hostname: "{{ 'kibana-ops.' ~ openshift_master_default_subdomain }}"
openshift_logging_kibana_ops_cpu_limit: null
openshift_logging_kibana_ops_memory_limit: 736Mi
openshift_logging_kibana_ops_cpu_request: 100m
@@ -109,7 +109,7 @@ openshift_logging_es_config: {}
# for exposing es to external (outside of the cluster) clients
openshift_logging_es_allow_external: False
-openshift_logging_es_hostname: "{{ 'es.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true)) }}"
+openshift_logging_es_hostname: "{{ 'es.' ~ openshift_master_default_subdomain }}"
#The absolute path on the control node to the cert file to use
#for the public facing es certs
@@ -145,7 +145,7 @@ openshift_logging_es_ops_nodeselector: {}
# for exposing es-ops to external (outside of the cluster) clients
openshift_logging_es_ops_allow_external: False
-openshift_logging_es_ops_hostname: "{{ 'es-ops.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true)) }}"
+openshift_logging_es_ops_hostname: "{{ 'es-ops.' ~ openshift_master_default_subdomain }}"
#The absolute path on the control node to the cert file to use
#for the public facing es-ops certs
@@ -165,7 +165,7 @@ openshift_logging_storage_access_modes: ['ReadWriteOnce']
# mux - secure_forward listener service
openshift_logging_mux_allow_external: False
openshift_logging_use_mux: "{{ openshift_logging_mux_allow_external | default(False) }}"
-openshift_logging_mux_hostname: "{{ 'mux.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true)) }}"
+openshift_logging_mux_hostname: "{{ 'mux.' ~ openshift_master_default_subdomain}}"
openshift_logging_mux_port: 24284
openshift_logging_mux_cpu_limit: null
openshift_logging_mux_memory_limit: 512Mi
diff --git a/roles/openshift_logging/handlers/main.yml b/roles/openshift_logging/handlers/main.yml
index 074b72942..1f4b5a116 100644
--- a/roles/openshift_logging/handlers/main.yml
+++ b/roles/openshift_logging/handlers/main.yml
@@ -1,17 +1,17 @@
---
- name: restart master api
- systemd: name={{ openshift.common.service_type }}-master-api state=restarted
- when: (not (master_api_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native'
+ systemd: name={{ openshift_service_type }}-master-api state=restarted
+ when: (not (master_api_service_status_changed | default(false) | bool))
notify: Verify API Server
# We retry the controllers because the API may not be 100% initialized yet.
- name: restart master controllers
- command: "systemctl restart {{ openshift.common.service_type }}-master-controllers"
+ command: "systemctl restart {{ openshift_service_type }}-master-controllers"
retries: 3
delay: 5
register: result
until: result.rc == 0
- when: (not (master_controllers_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native'
+ when: (not (master_controllers_service_status_changed | default(false) | bool))
- name: Verify API Server
# Using curl here since the uri module requires python-httplib2 and
diff --git a/roles/openshift_logging/tasks/generate_certs.yaml b/roles/openshift_logging/tasks/generate_certs.yaml
index f526fd734..082c0128f 100644
--- a/roles/openshift_logging/tasks/generate_certs.yaml
+++ b/roles/openshift_logging/tasks/generate_certs.yaml
@@ -36,7 +36,7 @@
- top_dir: '{{generated_certs_dir}}'
when: not signing_conf_file.stat.exists
-- include: procure_server_certs.yaml
+- include_tasks: procure_server_certs.yaml
loop_control:
loop_var: cert_info
with_items:
@@ -45,7 +45,7 @@
- procure_component: kibana-internal
hostnames: "kibana, kibana-ops, {{openshift_logging_kibana_hostname}}, {{openshift_logging_kibana_ops_hostname}}"
-- include: procure_server_certs.yaml
+- include_tasks: procure_server_certs.yaml
loop_control:
loop_var: cert_info
with_items:
@@ -53,14 +53,14 @@
hostnames: "logging-mux, {{openshift_logging_mux_hostname}}"
when: openshift_logging_use_mux | bool
-- include: procure_shared_key.yaml
+- include_tasks: procure_shared_key.yaml
loop_control:
loop_var: shared_key_info
with_items:
- procure_component: mux
when: openshift_logging_use_mux | bool
-- include: procure_server_certs.yaml
+- include_tasks: procure_server_certs.yaml
loop_control:
loop_var: cert_info
with_items:
@@ -68,7 +68,7 @@
hostnames: "es, {{openshift_logging_es_hostname}}"
when: openshift_logging_es_allow_external | bool
-- include: procure_server_certs.yaml
+- include_tasks: procure_server_certs.yaml
loop_control:
loop_var: cert_info
with_items:
@@ -109,7 +109,7 @@
- not ca_cert_srl_file.stat.exists
- name: Generate PEM certs
- include: generate_pems.yaml component={{node_name}}
+ include_tasks: generate_pems.yaml component={{node_name}}
with_items:
- system.logging.fluentd
- system.logging.kibana
@@ -119,7 +119,7 @@
loop_var: node_name
- name: Generate PEM cert for mux
- include: generate_pems.yaml component={{node_name}}
+ include_tasks: generate_pems.yaml component={{node_name}}
with_items:
- system.logging.mux
loop_control:
@@ -127,7 +127,7 @@
when: openshift_logging_use_mux | bool
- name: Generate PEM cert for Elasticsearch external route
- include: generate_pems.yaml component={{node_name}}
+ include_tasks: generate_pems.yaml component={{node_name}}
with_items:
- system.logging.es
loop_control:
@@ -135,7 +135,7 @@
when: openshift_logging_es_allow_external | bool
- name: Creating necessary JKS certs
- include: generate_jks.yaml
+ include_tasks: generate_jks.yaml
# TODO: make idempotent
- name: Generate proxy session
diff --git a/roles/openshift_logging/tasks/install_logging.yaml b/roles/openshift_logging/tasks/install_logging.yaml
index 2fefdc894..bb8ebec6b 100644
--- a/roles/openshift_logging/tasks/install_logging.yaml
+++ b/roles/openshift_logging/tasks/install_logging.yaml
@@ -52,7 +52,7 @@
changed_when: False
check_mode: no
-- include: generate_certs.yaml
+- include_tasks: generate_certs.yaml
vars:
generated_certs_dir: "{{openshift.common.config_base}}/logging"
@@ -250,7 +250,7 @@
when:
- openshift_logging_use_ops | bool
-- include: annotate_ops_projects.yaml
+- include_tasks: annotate_ops_projects.yaml
## Curator
- include_role:
@@ -311,4 +311,4 @@
openshift_logging_install_eventrouter | default(false) | bool
-- include: update_master_config.yaml
+- include_tasks: update_master_config.yaml
diff --git a/roles/openshift_logging/tasks/main.yaml b/roles/openshift_logging/tasks/main.yaml
index 2571548d6..9949bb95d 100644
--- a/roles/openshift_logging/tasks/main.yaml
+++ b/roles/openshift_logging/tasks/main.yaml
@@ -19,11 +19,11 @@
check_mode: no
become: no
-- include: install_logging.yaml
+- include_tasks: install_logging.yaml
when:
- openshift_logging_install_logging | default(false) | bool
-- include: delete_logging.yaml
+- include_tasks: delete_logging.yaml
when:
- not openshift_logging_install_logging | default(false) | bool
diff --git a/roles/openshift_logging_curator/meta/main.yaml b/roles/openshift_logging_curator/meta/main.yaml
index 6752fb7f9..d4635aab0 100644
--- a/roles/openshift_logging_curator/meta/main.yaml
+++ b/roles/openshift_logging_curator/meta/main.yaml
@@ -13,3 +13,4 @@ galaxy_info:
- cloud
dependencies:
- role: lib_openshift
+- role: openshift_facts
diff --git a/roles/openshift_logging_curator/tasks/main.yaml b/roles/openshift_logging_curator/tasks/main.yaml
index 7ddf57450..e7ef5ff22 100644
--- a/roles/openshift_logging_curator/tasks/main.yaml
+++ b/roles/openshift_logging_curator/tasks/main.yaml
@@ -12,7 +12,7 @@
openshift_logging_curator_image_prefix: "{{ openshift_logging_curator_image_prefix | default(__openshift_logging_curator_image_prefix) }}"
openshift_logging_curator_image_version: "{{ openshift_logging_curator_image_version | default(__openshift_logging_curator_image_version) }}"
-- include: determine_version.yaml
+- include_tasks: determine_version.yaml
# allow passing in a tempdir
- name: Create temp directory for doing work in
diff --git a/roles/openshift_logging_elasticsearch/meta/main.yaml b/roles/openshift_logging_elasticsearch/meta/main.yaml
index 097270772..6a9a6539c 100644
--- a/roles/openshift_logging_elasticsearch/meta/main.yaml
+++ b/roles/openshift_logging_elasticsearch/meta/main.yaml
@@ -13,3 +13,4 @@ galaxy_info:
- cloud
dependencies:
- role: lib_openshift
+- role: openshift_facts
diff --git a/roles/openshift_logging_elasticsearch/tasks/main.yaml b/roles/openshift_logging_elasticsearch/tasks/main.yaml
index 770892d52..8f2050043 100644
--- a/roles/openshift_logging_elasticsearch/tasks/main.yaml
+++ b/roles/openshift_logging_elasticsearch/tasks/main.yaml
@@ -30,7 +30,7 @@
openshift_logging_elasticsearch_image_prefix: "{{ openshift_logging_elasticsearch_image_prefix | default(__openshift_logging_elasticsearch_image_prefix) }}"
openshift_logging_elasticsearch_image_version: "{{ openshift_logging_elasticsearch_image_version | default(__openshift_logging_elasticsearch_image_version) }}"
-- include: determine_version.yaml
+- include_tasks: determine_version.yaml
# allow passing in a tempdir
- name: Create temp directory for doing work in
diff --git a/roles/openshift_logging_elasticsearch/templates/es.j2 b/roles/openshift_logging_elasticsearch/templates/es.j2
index 0bfa9e85b..bf04094a3 100644
--- a/roles/openshift_logging_elasticsearch/templates/es.j2
+++ b/roles/openshift_logging_elasticsearch/templates/es.j2
@@ -9,6 +9,7 @@ metadata:
logging-infra: "{{logging_component}}"
spec:
replicas: {{es_replicas|default(1)}}
+ revisionHistoryLimit: 0
selector:
provider: openshift
component: "{{component}}"
diff --git a/roles/openshift_logging_eventrouter/tasks/main.yaml b/roles/openshift_logging_eventrouter/tasks/main.yaml
index b1f93eeb9..96b181d61 100644
--- a/roles/openshift_logging_eventrouter/tasks/main.yaml
+++ b/roles/openshift_logging_eventrouter/tasks/main.yaml
@@ -12,8 +12,8 @@
openshift_logging_eventrouter_image_prefix: "{{ openshift_logging_eventrouter_image_prefix | default(__openshift_logging_eventrouter_image_prefix) }}"
openshift_logging_eventrouter_image_version: "{{ openshift_logging_eventrouter_image_version | default(__openshift_logging_eventrouter_image_version) }}"
-- include: "{{ role_path }}/tasks/install_eventrouter.yaml"
+- include_tasks: install_eventrouter.yaml
when: openshift_logging_install_eventrouter | default(false) | bool
-- include: "{{ role_path }}/tasks/delete_eventrouter.yaml"
+- include_tasks: delete_eventrouter.yaml
when: not openshift_logging_install_eventrouter | default(false) | bool
diff --git a/roles/openshift_logging_fluentd/meta/main.yaml b/roles/openshift_logging_fluentd/meta/main.yaml
index 2003aacb2..89c98204f 100644
--- a/roles/openshift_logging_fluentd/meta/main.yaml
+++ b/roles/openshift_logging_fluentd/meta/main.yaml
@@ -13,3 +13,4 @@ galaxy_info:
- cloud
dependencies:
- role: lib_openshift
+- role: openshift_facts
diff --git a/roles/openshift_logging_fluentd/tasks/main.yaml b/roles/openshift_logging_fluentd/tasks/main.yaml
index f8683ab75..87eedfb4b 100644
--- a/roles/openshift_logging_fluentd/tasks/main.yaml
+++ b/roles/openshift_logging_fluentd/tasks/main.yaml
@@ -47,7 +47,7 @@
openshift_logging_fluentd_image_prefix: "{{ openshift_logging_fluentd_image_prefix | default(__openshift_logging_fluentd_image_prefix) }}"
openshift_logging_fluentd_image_version: "{{ openshift_logging_fluentd_image_version | default(__openshift_logging_fluentd_image_version) }}"
-- include: determine_version.yaml
+- include_tasks: determine_version.yaml
# allow passing in a tempdir
- name: Create temp directory for doing work in
@@ -216,7 +216,7 @@
openshift_logging_fluentd_hosts: "{{ fluentd_hosts.results.results[0]['items'] | map(attribute='metadata.name') | list }}"
when: "'--all' in openshift_logging_fluentd_hosts"
-- include: label_and_wait.yaml
+- include_tasks: label_and_wait.yaml
vars:
node: "{{ fluentd_host }}"
with_items: "{{ openshift_logging_fluentd_hosts }}"
diff --git a/roles/openshift_logging_kibana/defaults/main.yml b/roles/openshift_logging_kibana/defaults/main.yml
index 6cdf7c8f3..899193838 100644
--- a/roles/openshift_logging_kibana/defaults/main.yml
+++ b/roles/openshift_logging_kibana/defaults/main.yml
@@ -10,7 +10,7 @@ openshift_logging_kibana_cpu_limit: null
openshift_logging_kibana_cpu_request: 100m
openshift_logging_kibana_memory_limit: 736Mi
-openshift_logging_kibana_hostname: "{{ openshift_hosted_logging_hostname | default('kibana.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true))) }}"
+openshift_logging_kibana_hostname: "{{ openshift_hosted_logging_hostname | default('kibana.' ~ openshift_master_default_subdomain) }}"
openshift_logging_kibana_es_host: "logging-es"
openshift_logging_kibana_es_port: 9200
diff --git a/roles/openshift_logging_kibana/meta/main.yaml b/roles/openshift_logging_kibana/meta/main.yaml
index 89e08abc0..d97586a37 100644
--- a/roles/openshift_logging_kibana/meta/main.yaml
+++ b/roles/openshift_logging_kibana/meta/main.yaml
@@ -13,3 +13,4 @@ galaxy_info:
- cloud
dependencies:
- role: lib_openshift
+- role: openshift_facts
diff --git a/roles/openshift_logging_kibana/tasks/main.yaml b/roles/openshift_logging_kibana/tasks/main.yaml
index 9d99114c5..77bf8042a 100644
--- a/roles/openshift_logging_kibana/tasks/main.yaml
+++ b/roles/openshift_logging_kibana/tasks/main.yaml
@@ -15,7 +15,7 @@
openshift_logging_kibana_proxy_image_prefix: "{{ openshift_logging_kibana_proxy_image_prefix | default(__openshift_logging_kibana_proxy_image_prefix) }}"
openshift_logging_kibana_proxy_image_version: "{{ openshift_logging_kibana_proxy_image_version | default(__openshift_logging_kibana_proxy_image_version) }}"
-- include: determine_version.yaml
+- include_tasks: determine_version.yaml
# allow passing in a tempdir
- name: Create temp directory for doing work in
diff --git a/roles/openshift_logging_mux/defaults/main.yml b/roles/openshift_logging_mux/defaults/main.yml
index cd15da939..1e6c501bf 100644
--- a/roles/openshift_logging_mux/defaults/main.yml
+++ b/roles/openshift_logging_mux/defaults/main.yml
@@ -28,7 +28,7 @@ openshift_logging_mux_journal_read_from_head: "{{ openshift_hosted_logging_journ
openshift_logging_mux_allow_external: False
openshift_logging_use_mux: "{{ openshift_logging_mux_allow_external | default(False) }}"
-openshift_logging_mux_hostname: "{{ 'mux.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true)) }}"
+openshift_logging_mux_hostname: "{{ 'mux.' ~ openshift_master_default_subdomain }}"
openshift_logging_mux_port: 24284
# the namespace to use for undefined projects should come first, followed by any
# additional namespaces to create by default - users will typically not need to set this
diff --git a/roles/openshift_logging_mux/meta/main.yaml b/roles/openshift_logging_mux/meta/main.yaml
index f40beb79d..f271d8d7d 100644
--- a/roles/openshift_logging_mux/meta/main.yaml
+++ b/roles/openshift_logging_mux/meta/main.yaml
@@ -13,3 +13,4 @@ galaxy_info:
- cloud
dependencies:
- role: lib_openshift
+- role: openshift_facts
diff --git a/roles/openshift_logging_mux/tasks/main.yaml b/roles/openshift_logging_mux/tasks/main.yaml
index 242d92188..68948bce2 100644
--- a/roles/openshift_logging_mux/tasks/main.yaml
+++ b/roles/openshift_logging_mux/tasks/main.yaml
@@ -20,7 +20,7 @@
openshift_logging_mux_image_prefix: "{{ openshift_logging_mux_image_prefix | default(__openshift_logging_mux_image_prefix) }}"
openshift_logging_mux_image_version: "{{ openshift_logging_mux_image_version | default(__openshift_logging_mux_image_version) }}"
-- include: determine_version.yaml
+- include_tasks: determine_version.yaml
# allow passing in a tempdir
- name: Create temp directory for doing work in
diff --git a/roles/openshift_management/README.md b/roles/openshift_management/README.md
index 96de82669..974d9781a 100644
--- a/roles/openshift_management/README.md
+++ b/roles/openshift_management/README.md
@@ -164,14 +164,14 @@ away.
If you want to install CFME/MIQ at the same time you install your
OCP/Origin cluster, ensure that `openshift_management_install_management` is set
to `true` in your inventory. Call the standard
-`playbooks/byo/config.yml` playbook to begin the cluster and CFME/MIQ
+`playbooks/deploy_cluster.yml` playbook to begin the cluster and CFME/MIQ
installation.
If you are installing CFME/MIQ on an *already provisioned cluster*
then you can call the CFME/MIQ playbook directly:
```
-$ ansible-playbook -v -i <YOUR_INVENTORY> playbooks/byo/openshift-management/config.yml
+$ ansible-playbook -v -i <YOUR_INVENTORY> playbooks/openshift-management/config.yml
```
*Note: Use `miq-template` in the following examples for ManageIQ installs*
@@ -489,7 +489,7 @@ This playbook will:
```
-$ ansible-playbook -v -i <YOUR_INVENTORY> playbooks/byo/openshift-management/add_container_provider.yml
+$ ansible-playbook -v -i <YOUR_INVENTORY> playbooks/openshift-management/add_container_provider.yml
```
## Multiple Providers
@@ -567,7 +567,7 @@ the config file path.
```
$ ansible-playbook -v -e container_providers_config=/tmp/cp.yml \
- playbooks/byo/openshift-management/add_many_container_providers.yml
+ playbooks/openshift-management/add_many_container_providers.yml
```
Afterwards you will find two new container providers in your
@@ -579,7 +579,7 @@ to see an overview.
This role includes a playbook to uninstall and erase the CFME/MIQ
installation:
-* `playbooks/byo/openshift-management/uninstall.yml`
+* `playbooks/openshift-management/uninstall.yml`
NFS export definitions and data stored on NFS exports are not
automatically removed. You are urged to manually erase any data from
diff --git a/roles/openshift_management/defaults/main.yml b/roles/openshift_management/defaults/main.yml
index e768961ce..b5e234b7f 100644
--- a/roles/openshift_management/defaults/main.yml
+++ b/roles/openshift_management/defaults/main.yml
@@ -88,7 +88,7 @@ openshift_management_storage_nfs_local_hostname: false
# name and password AND are trying to use integration scripts.
#
# For example, adding this cluster as a container provider,
-# playbooks/byo/openshift-management/add_container_provider.yml
+# playbooks/openshift-management/add_container_provider.yml
openshift_management_username: admin
openshift_management_password: smartvm
diff --git a/roles/openshift_management/meta/main.yml b/roles/openshift_management/meta/main.yml
index 07ad51126..9f19704a8 100644
--- a/roles/openshift_management/meta/main.yml
+++ b/roles/openshift_management/meta/main.yml
@@ -16,3 +16,4 @@ galaxy_info:
dependencies:
- role: lib_openshift
- role: lib_utils
+- role: openshift_facts
diff --git a/roles/openshift_management/tasks/main.yml b/roles/openshift_management/tasks/main.yml
index 3bade9e8c..f212dba7c 100644
--- a/roles/openshift_management/tasks/main.yml
+++ b/roles/openshift_management/tasks/main.yml
@@ -3,7 +3,7 @@
# Users, projects, and privileges
- name: Run pre-install Management validation checks
- include: validate.yml
+ include_tasks: validate.yml
# This creates a service account allowing Container Provider
# integration (managing OCP/Origin via MIQ/Management)
@@ -18,18 +18,18 @@
display_name: "{{ openshift_management_project_description }}"
- name: Create and Authorize Management Accounts
- include: accounts.yml
+ include_tasks: accounts.yml
######################################################################
# STORAGE - Initialize basic storage class
- name: Determine the correct NFS host if required
- include: storage/nfs_server.yml
+ include_tasks: storage/nfs_server.yml
when: openshift_management_storage_class in ['nfs', 'nfs_external']
#---------------------------------------------------------------------
# * nfs - set up NFS shares on the first master for a proof of concept
- name: Create required NFS exports for Management app storage
- include: storage/nfs.yml
+ include_tasks: storage/nfs.yml
when: openshift_management_storage_class == 'nfs'
#---------------------------------------------------------------------
@@ -56,14 +56,14 @@
######################################################################
# APPLICATION TEMPLATE
- name: Install the Management app and PV templates
- include: template.yml
+ include_tasks: template.yml
######################################################################
# APP & DB Storage
# For local/external NFS backed installations
- name: "Create the required App and DB PVs using {{ openshift_management_storage_class }}"
- include: storage/create_nfs_pvs.yml
+ include_tasks: storage/create_nfs_pvs.yml
when:
- openshift_management_storage_class in ['nfs', 'nfs_external']
diff --git a/roles/openshift_management/tasks/storage/storage.yml b/roles/openshift_management/tasks/storage/storage.yml
index d8bf7aa3e..a3675b29b 100644
--- a/roles/openshift_management/tasks/storage/storage.yml
+++ b/roles/openshift_management/tasks/storage/storage.yml
@@ -1,3 +1,3 @@
---
-- include: nfs.yml
+- include_tasks: nfs.yml
when: not (openshift_cloudprovider_kind is defined and (openshift_cloudprovider_kind == 'aws' or openshift_cloudprovider_kind == 'gce'))
diff --git a/roles/openshift_master/handlers/main.yml b/roles/openshift_master/handlers/main.yml
index 359536202..557bfe022 100644
--- a/roles/openshift_master/handlers/main.yml
+++ b/roles/openshift_master/handlers/main.yml
@@ -1,24 +1,22 @@
---
- name: restart master api
systemd:
- name: "{{ openshift.common.service_type }}-master-api"
+ name: "{{ openshift_service_type }}-master-api"
state: restarted
when:
- not (master_api_service_status_changed | default(false) | bool)
- - openshift.master.cluster_method == 'native'
notify:
- Verify API Server
# We retry the controllers because the API may not be 100% initialized yet.
- name: restart master controllers
- command: "systemctl restart {{ openshift.common.service_type }}-master-controllers"
+ command: "systemctl restart {{ openshift_service_type }}-master-controllers"
retries: 3
delay: 5
register: result
until: result.rc == 0
when:
- not (master_controllers_service_status_changed | default(false) | bool)
- - openshift.master.cluster_method == 'native'
- name: Verify API Server
# Using curl here since the uri module requires python-httplib2 and
diff --git a/roles/openshift_master/meta/main.yml b/roles/openshift_master/meta/main.yml
index a1cda2ad4..bf0cbbf18 100644
--- a/roles/openshift_master/meta/main.yml
+++ b/roles/openshift_master/meta/main.yml
@@ -15,3 +15,4 @@ dependencies:
- role: lib_openshift
- role: lib_utils
- role: lib_os_firewall
+- role: openshift_facts
diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml
index e52cd6231..9be5508aa 100644
--- a/roles/openshift_master/tasks/main.yml
+++ b/roles/openshift_master/tasks/main.yml
@@ -11,31 +11,12 @@
- openshift_master_oauth_grant_method is defined
- openshift_master_oauth_grant_method not in openshift_master_valid_grant_methods
-# HA Variable Validation
-- fail:
- msg: "openshift_master_cluster_method must be set to either 'native' or 'pacemaker' for multi-master installations"
- when:
- - openshift.master.ha | bool
- - (openshift.master.cluster_method is not defined) or (openshift.master.cluster_method is defined and openshift.master.cluster_method not in ["native", "pacemaker"])
-- fail:
- msg: "openshift_master_cluster_password must be set for multi-master installations"
- when:
- - openshift.master.ha | bool
- - openshift.master.cluster_method == "pacemaker"
- - openshift_master_cluster_password is not defined or not openshift_master_cluster_password
-- fail:
- msg: "Pacemaker based HA is not supported at this time when used with containerized installs"
- when:
- - openshift.master.ha | bool
- - openshift.master.cluster_method == "pacemaker"
- - openshift.common.is_containerized | bool
-
- name: Open up firewall ports
import_tasks: firewall.yml
- name: Install Master package
package:
- name: "{{ openshift.common.service_type }}-master{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }}"
+ name: "{{ openshift_service_type }}-master{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }}"
state: present
when:
- not openshift.common.is_containerized | bool
@@ -160,7 +141,7 @@
# The template file will stomp any other settings made.
- block:
- name: check whether our docker-registry setting exists in the env file
- command: "awk '/^OPENSHIFT_DEFAULT_REGISTRY=docker-registry.default.svc:5000/' /etc/sysconfig/{{ openshift.common.service_type }}-master"
+ command: "awk '/^OPENSHIFT_DEFAULT_REGISTRY=docker-registry.default.svc:5000/' /etc/sysconfig/{{ openshift_service_type }}-master"
failed_when: false
changed_when: false
register: l_already_set
@@ -222,11 +203,10 @@
- name: Start and enable master api on first master
systemd:
- name: "{{ openshift.common.service_type }}-master-api"
+ name: "{{ openshift_service_type }}-master-api"
enabled: yes
state: started
when:
- - openshift.master.cluster_method == 'native'
- inventory_hostname == openshift_master_hosts[0]
register: l_start_result
until: not l_start_result | failed
@@ -234,29 +214,26 @@
delay: 60
- name: Dump logs from master-api if it failed
- command: journalctl --no-pager -n 100 -u {{ openshift.common.service_type }}-master-api
+ command: journalctl --no-pager -n 100 -u {{ openshift_service_type }}-master-api
when:
- l_start_result | failed
- set_fact:
master_api_service_status_changed: "{{ l_start_result | changed }}"
when:
- - openshift.master.cluster_method == 'native'
- inventory_hostname == openshift_master_hosts[0]
- pause:
seconds: 15
when:
- openshift.master.ha | bool
- - openshift.master.cluster_method == 'native'
- name: Start and enable master api all masters
systemd:
- name: "{{ openshift.common.service_type }}-master-api"
+ name: "{{ openshift_service_type }}-master-api"
enabled: yes
state: started
when:
- - openshift.master.cluster_method == 'native'
- inventory_hostname != openshift_master_hosts[0]
register: l_start_result
until: not l_start_result | failed
@@ -264,67 +241,39 @@
delay: 60
- name: Dump logs from master-api if it failed
- command: journalctl --no-pager -n 100 -u {{ openshift.common.service_type }}-master-api
+ command: journalctl --no-pager -n 100 -u {{ openshift_service_type }}-master-api
when:
- l_start_result | failed
- set_fact:
master_api_service_status_changed: "{{ l_start_result | changed }}"
when:
- - openshift.master.cluster_method == 'native'
- inventory_hostname != openshift_master_hosts[0]
# A separate wait is required here for native HA since notifies will
# be resolved after all tasks in the role.
- include_tasks: check_master_api_is_ready.yml
when:
- - openshift.master.cluster_method == 'native'
- master_api_service_status_changed | bool
- name: Start and enable master controller service
systemd:
- name: "{{ openshift.common.service_type }}-master-controllers"
+ name: "{{ openshift_service_type }}-master-controllers"
enabled: yes
state: started
- when:
- - openshift.master.cluster_method == 'native'
register: l_start_result
until: not l_start_result | failed
retries: 1
delay: 60
- name: Dump logs from master-controllers if it failed
- command: journalctl --no-pager -n 100 -u {{ openshift.common.service_type }}-master-controllers
+ command: journalctl --no-pager -n 100 -u {{ openshift_service_type }}-master-controllers
when:
- l_start_result | failed
- name: Set fact master_controllers_service_status_changed
set_fact:
master_controllers_service_status_changed: "{{ l_start_result | changed }}"
- when:
- - openshift.master.cluster_method == 'native'
-
-- name: Install cluster packages
- package: name=pcs state=present
- when:
- - openshift.master.cluster_method == 'pacemaker'
- - not openshift.common.is_containerized | bool
- register: l_install_result
- until: l_install_result | success
-
-- name: Start and enable cluster service
- systemd:
- name: pcsd
- enabled: yes
- state: started
- when:
- - openshift.master.cluster_method == 'pacemaker'
- - not openshift.common.is_containerized | bool
-
-- name: Set the cluster user password
- shell: echo {{ openshift_master_cluster_password | quote }} | passwd --stdin hacluster
- when:
- - l_install_result | changed
- name: node bootstrap settings
include_tasks: bootstrap.yml
diff --git a/roles/openshift_master/tasks/registry_auth.yml b/roles/openshift_master/tasks/registry_auth.yml
index c95f562d0..8b342a5b4 100644
--- a/roles/openshift_master/tasks/registry_auth.yml
+++ b/roles/openshift_master/tasks/registry_auth.yml
@@ -32,8 +32,8 @@
when:
- openshift_docker_alternative_creds | default(False) | bool
- oreg_auth_user is defined
- - (not docker_cli_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool
- register: master_oreg_auth_credentials_create
+ - (not master_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool
+ register: master_oreg_auth_credentials_create_alt
notify:
- restart master api
- restart master controllers
@@ -45,4 +45,8 @@
when:
- openshift.common.is_containerized | bool
- oreg_auth_user is defined
- - (master_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace or master_oreg_auth_credentials_create.changed) | bool
+ - >
+ (master_oreg_auth_credentials_stat.stat.exists
+ or oreg_auth_credentials_replace
+ or master_oreg_auth_credentials_create.changed
+ or master_oreg_auth_credentials_create_alt.changed) | bool
diff --git a/roles/openshift_master/tasks/restart.yml b/roles/openshift_master/tasks/restart.yml
index 4f8b758fd..715347101 100644
--- a/roles/openshift_master/tasks/restart.yml
+++ b/roles/openshift_master/tasks/restart.yml
@@ -1,7 +1,7 @@
---
- name: Restart master API
service:
- name: "{{ openshift.common.service_type }}-master-api"
+ name: "{{ openshift_service_type }}-master-api"
state: restarted
when: openshift_master_ha | bool
- name: Wait for master API to come back online
@@ -14,7 +14,7 @@
when: openshift_master_ha | bool
- name: Restart master controllers
service:
- name: "{{ openshift.common.service_type }}-master-controllers"
+ name: "{{ openshift_service_type }}-master-controllers"
state: restarted
# Ignore errrors since it is possible that type != simple for
# pre-3.1.1 installations.
diff --git a/roles/openshift_master/tasks/system_container.yml b/roles/openshift_master/tasks/system_container.yml
index 23386f11b..f6c5ce0dd 100644
--- a/roles/openshift_master/tasks/system_container.yml
+++ b/roles/openshift_master/tasks/system_container.yml
@@ -1,8 +1,4 @@
---
-- name: Ensure proxies are in the atomic.conf
- include_role:
- name: openshift_atomic
- tasks_from: proxy
- name: Pre-pull master system container image
command: >
@@ -12,12 +8,12 @@
- name: Check Master system container package
command: >
- atomic containers list --no-trunc -a -f container={{ openshift.common.service_type }}-master
+ atomic containers list --no-trunc -a -f container={{ openshift_service_type }}-master
# HA
- name: Install or Update HA api master system container
oc_atomic_container:
- name: "{{ openshift.common.service_type }}-master-api"
+ name: "{{ openshift_service_type }}-master-api"
image: "{{ 'docker:' if system_images_registry == 'docker' else system_images_registry + '/' }}{{ openshift.master.master_system_image }}:{{ openshift_image_tag }}"
state: latest
values:
@@ -25,7 +21,7 @@
- name: Install or Update HA controller master system container
oc_atomic_container:
- name: "{{ openshift.common.service_type }}-master-controllers"
+ name: "{{ openshift_service_type }}-master-controllers"
image: "{{ 'docker:' if system_images_registry == 'docker' else system_images_registry + '/' }}{{ openshift.master.master_system_image }}:{{ openshift_image_tag }}"
state: latest
values:
diff --git a/roles/openshift_master/tasks/systemd_units.yml b/roles/openshift_master/tasks/systemd_units.yml
index 9d11ed574..76b6f46aa 100644
--- a/roles/openshift_master/tasks/systemd_units.yml
+++ b/roles/openshift_master/tasks/systemd_units.yml
@@ -13,7 +13,7 @@
- name: Disable the legacy master service if it exists
systemd:
- name: "{{ openshift.common.service_type }}-master"
+ name: "{{ openshift_service_type }}-master"
state: stopped
enabled: no
masked: yes
@@ -21,11 +21,10 @@
- name: Remove the legacy master service if it exists
file:
- path: "{{ containerized_svc_dir }}/{{ openshift.common.service_type }}-master.service"
+ path: "{{ containerized_svc_dir }}/{{ openshift_service_type }}-master.service"
state: absent
ignore_errors: true
when:
- - openshift.master.cluster_method == "native"
- not l_is_master_system_container | bool
# This is the image used for both HA and non-HA clusters:
@@ -41,9 +40,8 @@
- name: Create the ha systemd unit files
template:
src: "{{ ha_svc_template_path }}/atomic-openshift-master-{{ item }}.service.j2"
- dest: "{{ containerized_svc_dir }}/{{ openshift.common.service_type }}-master-{{ item }}.service"
+ dest: "{{ containerized_svc_dir }}/{{ openshift_service_type }}-master-{{ item }}.service"
when:
- - openshift.master.cluster_method == "native"
- not l_is_master_system_container | bool
with_items:
- api
@@ -57,106 +55,89 @@
- name: enable master services
systemd:
- name: "{{ openshift.common.service_type }}-master-{{ item }}"
+ name: "{{ openshift_service_type }}-master-{{ item }}"
enabled: yes
with_items:
- api
- controllers
when:
- - openshift.master.cluster_method == "native"
- not l_is_master_system_container | bool
- name: Preserve Master API Proxy Config options
- command: grep PROXY /etc/sysconfig/{{ openshift.common.service_type }}-master-api
+ command: grep PROXY /etc/sysconfig/{{ openshift_service_type }}-master-api
register: l_master_api_proxy
- when:
- - openshift.master.cluster_method == "native"
failed_when: false
changed_when: false
- name: Preserve Master API AWS options
- command: grep AWS_ /etc/sysconfig/{{ openshift.common.service_type }}-master-api
+ command: grep AWS_ /etc/sysconfig/{{ openshift_service_type }}-master-api
register: master_api_aws
- when:
- - openshift.master.cluster_method == "native"
failed_when: false
changed_when: false
- name: Create the master api service env file
template:
src: "{{ ha_svc_template_path }}/atomic-openshift-master-api.j2"
- dest: /etc/sysconfig/{{ openshift.common.service_type }}-master-api
+ dest: /etc/sysconfig/{{ openshift_service_type }}-master-api
backup: true
- when:
- - openshift.master.cluster_method == "native"
notify:
- restart master api
- name: Restore Master API Proxy Config Options
when:
- - openshift.master.cluster_method == "native"
- l_master_api_proxy.rc == 0
- "'http_proxy' not in openshift.common"
- "'https_proxy' not in openshift.common"
lineinfile:
- dest: /etc/sysconfig/{{ openshift.common.service_type }}-master-api
+ dest: /etc/sysconfig/{{ openshift_service_type }}-master-api
line: "{{ item }}"
with_items: "{{ l_master_api_proxy.stdout_lines | default([]) }}"
- name: Restore Master API AWS Options
when:
- - openshift.master.cluster_method == "native"
- master_api_aws.rc == 0
- not (openshift_cloudprovider_kind is defined and openshift_cloudprovider_kind == 'aws' and openshift_cloudprovider_aws_access_key is defined and openshift_cloudprovider_aws_secret_key is defined)
lineinfile:
- dest: /etc/sysconfig/{{ openshift.common.service_type }}-master-api
+ dest: /etc/sysconfig/{{ openshift_service_type }}-master-api
line: "{{ item }}"
with_items: "{{ master_api_aws.stdout_lines | default([]) }}"
no_log: True
- name: Preserve Master Controllers Proxy Config options
- command: grep PROXY /etc/sysconfig/{{ openshift.common.service_type }}-master-controllers
+ command: grep PROXY /etc/sysconfig/{{ openshift_service_type }}-master-controllers
register: master_controllers_proxy
- when:
- - openshift.master.cluster_method == "native"
failed_when: false
changed_when: false
- name: Preserve Master Controllers AWS options
- command: grep AWS_ /etc/sysconfig/{{ openshift.common.service_type }}-master-controllers
+ command: grep AWS_ /etc/sysconfig/{{ openshift_service_type }}-master-controllers
register: master_controllers_aws
- when:
- - openshift.master.cluster_method == "native"
failed_when: false
changed_when: false
- name: Create the master controllers service env file
template:
src: "{{ ha_svc_template_path }}/atomic-openshift-master-controllers.j2"
- dest: /etc/sysconfig/{{ openshift.common.service_type }}-master-controllers
+ dest: /etc/sysconfig/{{ openshift_service_type }}-master-controllers
backup: true
- when:
- - openshift.master.cluster_method == "native"
notify:
- restart master controllers
- name: Restore Master Controllers Proxy Config Options
lineinfile:
- dest: /etc/sysconfig/{{ openshift.common.service_type }}-master-controllers
+ dest: /etc/sysconfig/{{ openshift_service_type }}-master-controllers
line: "{{ item }}"
with_items: "{{ master_controllers_proxy.stdout_lines | default([]) }}"
when:
- - openshift.master.cluster_method == "native"
- master_controllers_proxy.rc == 0
- "'http_proxy' not in openshift.common"
- "'https_proxy' not in openshift.common"
- name: Restore Master Controllers AWS Options
lineinfile:
- dest: /etc/sysconfig/{{ openshift.common.service_type }}-master-controllers
+ dest: /etc/sysconfig/{{ openshift_service_type }}-master-controllers
line: "{{ item }}"
with_items: "{{ master_controllers_aws.stdout_lines | default([]) }}"
when:
- - openshift.master.cluster_method == "native"
- master_controllers_aws.rc == 0
- not (openshift_cloudprovider_kind is defined and openshift_cloudprovider_kind == 'aws' and openshift_cloudprovider_aws_access_key is defined and openshift_cloudprovider_aws_secret_key is defined)
diff --git a/roles/openshift_master/tasks/upgrade/rpm_upgrade.yml b/roles/openshift_master/tasks/upgrade/rpm_upgrade.yml
index caab3045a..f50b91ff5 100644
--- a/roles/openshift_master/tasks/upgrade/rpm_upgrade.yml
+++ b/roles/openshift_master/tasks/upgrade/rpm_upgrade.yml
@@ -12,11 +12,11 @@
package: name={{ master_pkgs | join(',') }} state=present
vars:
master_pkgs:
- - "{{ openshift.common.service_type }}{{ openshift_pkg_version }}"
- - "{{ openshift.common.service_type }}-master{{ openshift_pkg_version }}"
- - "{{ openshift.common.service_type }}-node{{ openshift_pkg_version }}"
- - "{{ openshift.common.service_type }}-sdn-ovs{{ openshift_pkg_version }}"
- - "{{ openshift.common.service_type }}-clients{{ openshift_pkg_version }}"
- - "tuned-profiles-{{ openshift.common.service_type }}-node{{ openshift_pkg_version }}"
+ - "{{ openshift_service_type }}{{ openshift_pkg_version }}"
+ - "{{ openshift_service_type }}-master{{ openshift_pkg_version }}"
+ - "{{ openshift_service_type }}-node{{ openshift_pkg_version }}"
+ - "{{ openshift_service_type }}-sdn-ovs{{ openshift_pkg_version }}"
+ - "{{ openshift_service_type }}-clients{{ openshift_pkg_version }}"
+ - "tuned-profiles-{{ openshift_service_type }}-node{{ openshift_pkg_version }}"
register: result
until: result | success
diff --git a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2 b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2
index cec3d3fb1..5e46d9121 100644
--- a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2
+++ b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2
@@ -3,18 +3,18 @@ Description=Atomic OpenShift Master API
Documentation=https://github.com/openshift/origin
After=etcd_container.service
Wants=etcd_container.service
-Before={{ openshift.common.service_type }}-node.service
+Before={{ openshift_service_type }}-node.service
After={{ openshift_docker_service_name }}.service
PartOf={{ openshift_docker_service_name }}.service
Requires={{ openshift_docker_service_name }}.service
[Service]
-EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-master-api
+EnvironmentFile=/etc/sysconfig/{{ openshift_service_type }}-master-api
Environment=GOTRACEBACK=crash
-ExecStartPre=-/usr/bin/docker rm -f {{ openshift.common.service_type}}-master-api
+ExecStartPre=-/usr/bin/docker rm -f {{ openshift_service_type}}-master-api
ExecStart=/usr/bin/docker run --rm --privileged --net=host \
- --name {{ openshift.common.service_type }}-master-api \
- --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-master-api \
+ --name {{ openshift_service_type }}-master-api \
+ --env-file=/etc/sysconfig/{{ openshift_service_type }}-master-api \
-v {{ r_openshift_master_data_dir }}:{{ r_openshift_master_data_dir }} \
-v /var/log:/var/log -v /var/run/docker.sock:/var/run/docker.sock \
-v {{ openshift.common.config_base }}:{{ openshift.common.config_base }} \
@@ -24,14 +24,14 @@ ExecStart=/usr/bin/docker run --rm --privileged --net=host \
{{ openshift.master.master_image }}:${IMAGE_VERSION} start master api \
--config=${CONFIG_FILE} $OPTIONS
ExecStartPost=/usr/bin/sleep 10
-ExecStop=/usr/bin/docker stop {{ openshift.common.service_type }}-master-api
+ExecStop=/usr/bin/docker stop {{ openshift_service_type }}-master-api
LimitNOFILE=131072
LimitCORE=infinity
WorkingDirectory={{ r_openshift_master_data_dir }}
-SyslogIdentifier={{ openshift.common.service_type }}-master-api
+SyslogIdentifier={{ openshift_service_type }}-master-api
Restart=always
RestartSec=5s
[Install]
WantedBy={{ openshift_docker_service_name }}.service
-WantedBy={{ openshift.common.service_type }}-node.service
+WantedBy={{ openshift_service_type }}-node.service
diff --git a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2 b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2
index a0248151d..899575f1a 100644
--- a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2
+++ b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2
@@ -1,19 +1,19 @@
[Unit]
Description=Atomic OpenShift Master Controllers
Documentation=https://github.com/openshift/origin
-Wants={{ openshift.common.service_type }}-master-api.service
-After={{ openshift.common.service_type }}-master-api.service
+Wants={{ openshift_service_type }}-master-api.service
+After={{ openshift_service_type }}-master-api.service
After={{ openshift_docker_service_name }}.service
Requires={{ openshift_docker_service_name }}.service
PartOf={{ openshift_docker_service_name }}.service
[Service]
-EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-master-controllers
+EnvironmentFile=/etc/sysconfig/{{ openshift_service_type }}-master-controllers
Environment=GOTRACEBACK=crash
-ExecStartPre=-/usr/bin/docker rm -f {{ openshift.common.service_type}}-master-controllers
+ExecStartPre=-/usr/bin/docker rm -f {{ openshift_service_type}}-master-controllers
ExecStart=/usr/bin/docker run --rm --privileged --net=host \
- --name {{ openshift.common.service_type }}-master-controllers \
- --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-master-controllers \
+ --name {{ openshift_service_type }}-master-controllers \
+ --env-file=/etc/sysconfig/{{ openshift_service_type }}-master-controllers \
-v {{ r_openshift_master_data_dir }}:{{ r_openshift_master_data_dir }} \
-v /var/run/docker.sock:/var/run/docker.sock \
-v {{ openshift.common.config_base }}:{{ openshift.common.config_base }} \
@@ -23,11 +23,11 @@ ExecStart=/usr/bin/docker run --rm --privileged --net=host \
{{ openshift.master.master_image }}:${IMAGE_VERSION} start master controllers \
--config=${CONFIG_FILE} $OPTIONS
ExecStartPost=/usr/bin/sleep 10
-ExecStop=/usr/bin/docker stop {{ openshift.common.service_type }}-master-controllers
+ExecStop=/usr/bin/docker stop {{ openshift_service_type }}-master-controllers
LimitNOFILE=131072
LimitCORE=infinity
WorkingDirectory={{ r_openshift_master_data_dir }}
-SyslogIdentifier={{ openshift.common.service_type }}-master-controllers
+SyslogIdentifier={{ openshift_service_type }}-master-controllers
Restart=always
RestartSec=5s
diff --git a/roles/openshift_master/templates/master.yaml.v1.j2 b/roles/openshift_master/templates/master.yaml.v1.j2
index a0f00e545..f1a76e5f5 100644
--- a/roles/openshift_master/templates/master.yaml.v1.j2
+++ b/roles/openshift_master/templates/master.yaml.v1.j2
@@ -69,29 +69,13 @@ dnsConfig:
bindNetwork: tcp4
{% endif %}
etcdClientInfo:
- ca: {{ "ca-bundle.crt" if (openshift.master.embedded_etcd | bool) else "master.etcd-ca.crt" }}
+ ca: master.etcd-ca.crt
certFile: master.etcd-client.crt
keyFile: master.etcd-client.key
urls:
{% for etcd_url in openshift.master.etcd_urls %}
- {{ etcd_url }}
{% endfor %}
-{% if openshift.master.embedded_etcd | bool %}
-etcdConfig:
- address: {{ openshift.common.hostname }}:{{ openshift.master.etcd_port }}
- peerAddress: {{ openshift.common.hostname }}:7001
- peerServingInfo:
- bindAddress: {{ openshift.master.bind_addr }}:7001
- certFile: etcd.server.crt
- clientCA: ca-bundle.crt
- keyFile: etcd.server.key
- servingInfo:
- bindAddress: {{ openshift.master.bind_addr }}:{{ openshift.master.etcd_port }}
- certFile: etcd.server.crt
- clientCA: ca-bundle.crt
- keyFile: etcd.server.key
- storageDirectory: {{ r_openshift_master_data_dir }}/openshift.local.etcd
-{% endif %}
etcdStorageConfig:
kubernetesStoragePrefix: kubernetes.io
kubernetesStorageVersion: v1
@@ -120,7 +104,7 @@ kubernetesMasterConfig:
- application/vnd.kubernetes.protobuf
{% endif %}
controllerArguments: {{ openshift.master.controller_args | default(None) | to_padded_yaml( level=2 ) }}
- masterCount: {{ openshift.master.master_count if openshift.master.cluster_method | default(None) == 'native' else 1 }}
+ masterCount: {{ openshift.master.master_count }}
masterIP: {{ openshift.common.ip }}
podEvictionTimeout: {{ openshift.master.pod_eviction_timeout | default("") }}
proxyClientInfo:
@@ -204,7 +188,7 @@ projectConfig:
mcsLabelsPerProject: {{ osm_mcs_labels_per_project }}
uidAllocatorRange: "{{ osm_uid_allocator_range }}"
routingConfig:
- subdomain: "{{ openshift_master_default_subdomain | default("") }}"
+ subdomain: "{{ openshift_master_default_subdomain }}"
serviceAccountConfig:
limitSecretReferences: {{ openshift_master_saconfig_limitsecretreferences | default(false) }}
managedNames:
diff --git a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.service.j2 b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.service.j2
index 02bfd6f62..ed8a47df8 100644
--- a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.service.j2
+++ b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.service.j2
@@ -3,12 +3,12 @@ Description=Atomic OpenShift Master API
Documentation=https://github.com/openshift/origin
After=network-online.target
After=etcd.service
-Before={{ openshift.common.service_type }}-node.service
+Before={{ openshift_service_type }}-node.service
Requires=network-online.target
[Service]
Type=notify
-EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-master-api
+EnvironmentFile=/etc/sysconfig/{{ openshift_service_type }}-master-api
Environment=GOTRACEBACK=crash
ExecStart=/usr/bin/openshift start master api --config=${CONFIG_FILE} $OPTIONS
LimitNOFILE=131072
@@ -20,4 +20,4 @@ RestartSec=5s
[Install]
WantedBy=multi-user.target
-WantedBy={{ openshift.common.service_type }}-node.service
+WantedBy={{ openshift_service_type }}-node.service
diff --git a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.service.j2 b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.service.j2
index fae021845..b36963f73 100644
--- a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.service.j2
+++ b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.service.j2
@@ -2,19 +2,19 @@
Description=Atomic OpenShift Master Controllers
Documentation=https://github.com/openshift/origin
After=network-online.target
-After={{ openshift.common.service_type }}-master-api.service
-Wants={{ openshift.common.service_type }}-master-api.service
+After={{ openshift_service_type }}-master-api.service
+Wants={{ openshift_service_type }}-master-api.service
Requires=network-online.target
[Service]
Type=notify
-EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-master-controllers
+EnvironmentFile=/etc/sysconfig/{{ openshift_service_type }}-master-controllers
Environment=GOTRACEBACK=crash
ExecStart=/usr/bin/openshift start master controllers --config=${CONFIG_FILE} $OPTIONS
LimitNOFILE=131072
LimitCORE=infinity
WorkingDirectory={{ r_openshift_master_data_dir }}
-SyslogIdentifier={{ openshift.common.service_type }}-master-controllers
+SyslogIdentifier={{ openshift_service_type }}-master-controllers
Restart=always
RestartSec=5s
diff --git a/roles/openshift_master_cluster/README.md b/roles/openshift_master_cluster/README.md
deleted file mode 100644
index 58dd19ac3..000000000
--- a/roles/openshift_master_cluster/README.md
+++ /dev/null
@@ -1,34 +0,0 @@
-OpenShift Master Cluster
-========================
-
-TODO
-
-Requirements
-------------
-
-* Ansible 2.2
-
-Role Variables
---------------
-
-TODO
-
-Dependencies
-------------
-
-TODO
-
-Example Playbook
-----------------
-
-TODO
-
-License
--------
-
-Apache License Version 2.0
-
-Author Information
-------------------
-
-Jason DeTiberus (jdetiber@redhat.com)
diff --git a/roles/openshift_master_cluster/meta/main.yml b/roles/openshift_master_cluster/meta/main.yml
deleted file mode 100644
index c452b165e..000000000
--- a/roles/openshift_master_cluster/meta/main.yml
+++ /dev/null
@@ -1,15 +0,0 @@
----
-galaxy_info:
- author: Jason DeTiberus
- description:
- company: Red Hat, Inc.
- license: Apache License, Version 2.0
- min_ansible_version: 2.2
- platforms:
- - name: EL
- versions:
- - 7
- categories:
- - cloud
- - system
-dependencies: []
diff --git a/roles/openshift_master_cluster/tasks/configure.yml b/roles/openshift_master_cluster/tasks/configure.yml
deleted file mode 100644
index 1b94598dd..000000000
--- a/roles/openshift_master_cluster/tasks/configure.yml
+++ /dev/null
@@ -1,43 +0,0 @@
----
-- fail:
- msg: This role requires that openshift_master_cluster_vip is set
- when: openshift_master_cluster_vip is not defined or not openshift_master_cluster_vip
-- fail:
- msg: This role requires that openshift_master_cluster_public_vip is set
- when: openshift_master_cluster_public_vip is not defined or not openshift_master_cluster_public_vip
-
-- name: Authenticate to the cluster
- command: pcs cluster auth -u hacluster -p {{ openshift_master_cluster_password }} {{ omc_cluster_hosts }}
-
-- name: Create the cluster
- command: pcs cluster setup --name openshift_master {{ omc_cluster_hosts }}
-
-- name: Start the cluster
- command: pcs cluster start --all
-
-- name: Enable the cluster on all nodes
- command: pcs cluster enable --all
-
-- name: Set default resource stickiness
- command: pcs resource defaults resource-stickiness=100
-
-- name: Add the cluster VIP resource
- command: pcs resource create virtual-ip IPaddr2 ip={{ openshift_master_cluster_vip }} --group {{ openshift.common.service_type }}-master
-
-- name: Add the cluster public VIP resource
- command: pcs resource create virtual-ip IPaddr2 ip={{ openshift_master_cluster_public_vip }} --group {{ openshift.common.service_type }}-master
- when: openshift_master_cluster_public_vip != openshift_master_cluster_vip
-
-- name: Add the cluster master service resource
- command: pcs resource create master systemd:{{ openshift.common.service_type }}-master op start timeout=90s stop timeout=90s --group {{ openshift.common.service_type }}-master
-
-- name: Disable stonith
- command: pcs property set stonith-enabled=false
-
-- name: Wait for the clustered master service to be available
- wait_for:
- host: "{{ openshift_master_cluster_vip }}"
- port: "{{ openshift.master.api_port }}"
- state: started
- timeout: 180
- delay: 90
diff --git a/roles/openshift_master_cluster/tasks/main.yml b/roles/openshift_master_cluster/tasks/main.yml
deleted file mode 100644
index 41bfc72cb..000000000
--- a/roles/openshift_master_cluster/tasks/main.yml
+++ /dev/null
@@ -1,14 +0,0 @@
----
-- fail:
- msg: "Not possible on atomic hosts for now"
- when: openshift.common.is_containerized | bool
-
-- name: Test if cluster is already configured
- command: pcs status
- register: pcs_status
- changed_when: false
- failed_when: false
- when: openshift.master.cluster_method == "pacemaker"
-
-- include_tasks: configure.yml
- when: "pcs_status | failed and 'Error: cluster is not currently running on this node' in pcs_status.stderr"
diff --git a/roles/openshift_master_facts/defaults/main.yml b/roles/openshift_master_facts/defaults/main.yml
index d0dcdae4b..a89f48afa 100644
--- a/roles/openshift_master_facts/defaults/main.yml
+++ b/roles/openshift_master_facts/defaults/main.yml
@@ -1,5 +1,4 @@
---
-openshift_master_default_subdomain: "router.default.svc.cluster.local"
openshift_master_admission_plugin_config:
openshift.io/ImagePolicy:
configuration:
diff --git a/roles/openshift_master_facts/filter_plugins/openshift_master.py b/roles/openshift_master_facts/filter_plugins/openshift_master.py
index c827f2d26..ff15f693b 100644
--- a/roles/openshift_master_facts/filter_plugins/openshift_master.py
+++ b/roles/openshift_master_facts/filter_plugins/openshift_master.py
@@ -485,31 +485,6 @@ class FilterModule(object):
Dumper=AnsibleDumper))
@staticmethod
- def validate_pcs_cluster(data, masters=None):
- ''' Validates output from "pcs status", ensuring that each master
- provided is online.
- Ex: data = ('...',
- 'PCSD Status:',
- 'master1.example.com: Online',
- 'master2.example.com: Online',
- 'master3.example.com: Online',
- '...')
- masters = ['master1.example.com',
- 'master2.example.com',
- 'master3.example.com']
- returns True
- '''
- if not issubclass(type(data), string_types):
- raise errors.AnsibleFilterError("|failed expects data is a string or unicode")
- if not issubclass(type(masters), list):
- raise errors.AnsibleFilterError("|failed expects masters is a list")
- valid = True
- for master in masters:
- if "{0}: Online".format(master) not in data:
- valid = False
- return valid
-
- @staticmethod
def certificates_to_synchronize(hostvars, include_keys=True, include_ca=True):
''' Return certificates to synchronize based on facts. '''
if not issubclass(type(hostvars), dict):
@@ -553,6 +528,5 @@ class FilterModule(object):
def filters(self):
''' returns a mapping of filters to methods '''
return {"translate_idps": self.translate_idps,
- "validate_pcs_cluster": self.validate_pcs_cluster,
"certificates_to_synchronize": self.certificates_to_synchronize,
"oo_htpasswd_users_from_file": self.oo_htpasswd_users_from_file}
diff --git a/roles/openshift_master_facts/tasks/main.yml b/roles/openshift_master_facts/tasks/main.yml
index 20cc5358e..418dcba67 100644
--- a/roles/openshift_master_facts/tasks/main.yml
+++ b/roles/openshift_master_facts/tasks/main.yml
@@ -1,14 +1,8 @@
---
-# Ensure the default sub-domain is set:
-- name: Migrate legacy osm_default_subdomain fact
- set_fact:
- openshift_master_default_subdomain: "{{ osm_default_subdomain | default(None) }}"
- when: openshift_master_default_subdomain is not defined
-
- name: Verify required variables are set
fail:
msg: openshift_master_default_subdomain must be set to deploy metrics
- when: openshift_hosted_metrics_deploy | default(false) | bool and openshift_master_default_subdomain | default("") == ""
+ when: openshift_hosted_metrics_deploy | default(false) | bool and openshift_master_default_subdomain == ""
# NOTE: These metrics variables are unfortunately needed by both the master and the metrics roles
# to properly configure the master-config.yaml file.
@@ -20,7 +14,7 @@
- name: Set g_metrics_hostname
set_fact:
g_metrics_hostname: "{{ openshift_hosted_metrics_public_url
- | default('hawkular-metrics.' ~ (openshift_master_default_subdomain))
+ | default('hawkular-metrics.' ~ openshift_master_default_subdomain)
| oo_hostname_from_url }}"
- set_fact:
@@ -31,7 +25,6 @@
openshift_facts:
role: master
local_facts:
- cluster_method: "{{ openshift_master_cluster_method | default('native') }}"
cluster_hostname: "{{ openshift_master_cluster_hostname | default(None) }}"
cluster_public_hostname: "{{ openshift_master_cluster_public_hostname | default(None) }}"
api_port: "{{ openshift_master_api_port | default(None) }}"
@@ -52,7 +45,6 @@
etcd_port: "{{ openshift_master_etcd_port | default(None) }}"
etcd_use_ssl: "{{ openshift_master_etcd_use_ssl | default(None) }}"
etcd_urls: "{{ openshift_master_etcd_urls | default(None) }}"
- embedded_etcd: "{{ openshift_master_embedded_etcd | default(None) }}"
embedded_kube: "{{ openshift_master_embedded_kube | default(None) }}"
embedded_dns: "{{ openshift_master_embedded_dns | default(None) }}"
bind_addr: "{{ openshift_master_bind_addr | default(None) }}"
diff --git a/roles/openshift_metrics/handlers/main.yml b/roles/openshift_metrics/handlers/main.yml
index 074b72942..1f4b5a116 100644
--- a/roles/openshift_metrics/handlers/main.yml
+++ b/roles/openshift_metrics/handlers/main.yml
@@ -1,17 +1,17 @@
---
- name: restart master api
- systemd: name={{ openshift.common.service_type }}-master-api state=restarted
- when: (not (master_api_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native'
+ systemd: name={{ openshift_service_type }}-master-api state=restarted
+ when: (not (master_api_service_status_changed | default(false) | bool))
notify: Verify API Server
# We retry the controllers because the API may not be 100% initialized yet.
- name: restart master controllers
- command: "systemctl restart {{ openshift.common.service_type }}-master-controllers"
+ command: "systemctl restart {{ openshift_service_type }}-master-controllers"
retries: 3
delay: 5
register: result
until: result.rc == 0
- when: (not (master_controllers_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native'
+ when: (not (master_controllers_service_status_changed | default(false) | bool))
- name: Verify API Server
# Using curl here since the uri module requires python-httplib2 and
diff --git a/roles/openshift_nfs/tasks/setup.yml b/roles/openshift_nfs/tasks/setup.yml
index edb854467..1aa7e7079 100644
--- a/roles/openshift_nfs/tasks/setup.yml
+++ b/roles/openshift_nfs/tasks/setup.yml
@@ -1,7 +1,6 @@
---
- name: setup firewall
- include: firewall.yml
- static: yes
+ import_tasks: firewall.yml
- name: Install nfs-utils
package: name=nfs-utils state=present
diff --git a/roles/openshift_node/README.md b/roles/openshift_node/README.md
index 67f697924..87ceb8103 100644
--- a/roles/openshift_node/README.md
+++ b/roles/openshift_node/README.md
@@ -33,9 +33,9 @@ Notes
Currently we support re-labeling nodes but we don't re-schedule running pods nor remove existing labels. That means you will have to trigger the re-schedulling manually. To re-schedule your pods, just follow the steps below:
```
-oadm manage-node --schedulable=false ${NODE}
-oadm manage-node --drain ${NODE}
-oadm manage-node --schedulable=true ${NODE}
+oc adm manage-node --schedulable=false ${NODE}
+oc adm manage-node --drain ${NODE}
+oc adm manage-node --schedulable=true ${NODE}
````
> If you are using version less than 1.5/3.5 you must replace `--drain` with `--evacuate`.
diff --git a/roles/openshift_node/defaults/main.yml b/roles/openshift_node/defaults/main.yml
index f3867fe4a..fff927944 100644
--- a/roles/openshift_node/defaults/main.yml
+++ b/roles/openshift_node/defaults/main.yml
@@ -14,7 +14,11 @@ r_openshift_node_use_firewalld: "{{ os_firewall_use_firewalld | default(False) }
l_is_node_system_container: "{{ (openshift_use_node_system_container | default(openshift_use_system_containers | default(false)) | bool) }}"
openshift_deployment_type: "{{ openshift_deployment_type | default('origin') }}"
-openshift_service_type: "{{ 'origin' if openshift_deployment_type == 'origin' else 'atomic-openshift' }}"
+openshift_service_type_dict:
+ origin: origin
+ openshift-enterprise: atomic-openshift
+
+openshift_service_type: "{{ openshift_service_type_dict[openshift_deployment_type] }}"
system_images_registry_dict:
openshift-enterprise: "registry.access.redhat.com"
diff --git a/roles/openshift_node/handlers/main.yml b/roles/openshift_node/handlers/main.yml
index 229c6bbed..170a3dc6e 100644
--- a/roles/openshift_node/handlers/main.yml
+++ b/roles/openshift_node/handlers/main.yml
@@ -34,7 +34,7 @@
- name: restart node
systemd:
- name: "{{ openshift.common.service_type }}-node"
+ name: "{{ openshift_service_type }}-node"
state: restarted
register: l_openshift_node_restart_node_result
until: not l_openshift_node_restart_node_result | failed
diff --git a/roles/openshift_node/tasks/aws.yml b/roles/openshift_node/tasks/aws.yml
index 38c2b794d..a7f1fc116 100644
--- a/roles/openshift_node/tasks/aws.yml
+++ b/roles/openshift_node/tasks/aws.yml
@@ -1,7 +1,7 @@
---
- name: Configure AWS Cloud Provider Settings
lineinfile:
- dest: /etc/sysconfig/{{ openshift.common.service_type }}-node
+ dest: /etc/sysconfig/{{ openshift_service_type }}-node
regexp: "{{ item.regex }}"
line: "{{ item.line }}"
create: true
diff --git a/roles/openshift_node/tasks/config.yml b/roles/openshift_node/tasks/config.yml
index 741a2234f..e5c80bd09 100644
--- a/roles/openshift_node/tasks/config.yml
+++ b/roles/openshift_node/tasks/config.yml
@@ -38,7 +38,7 @@
- name: Configure Node Environment Variables
lineinfile:
- dest: /etc/sysconfig/{{ openshift.common.service_type }}-node
+ dest: /etc/sysconfig/{{ openshift_service_type }}-node
regexp: "^{{ item.key }}="
line: "{{ item.key }}={{ item.value }}"
create: true
@@ -76,7 +76,7 @@
- name: Start and enable node dep
systemd:
daemon_reload: yes
- name: "{{ openshift.common.service_type }}-node-dep"
+ name: "{{ openshift_service_type }}-node-dep"
enabled: yes
state: started
@@ -84,7 +84,7 @@
block:
- name: Start and enable node
systemd:
- name: "{{ openshift.common.service_type }}-node"
+ name: "{{ openshift_service_type }}-node"
enabled: yes
state: started
daemon_reload: yes
@@ -95,7 +95,7 @@
ignore_errors: true
- name: Dump logs from node service if it failed
- command: journalctl --no-pager -n 100 -u {{ openshift.common.service_type }}-node
+ command: journalctl --no-pager -n 100 -u {{ openshift_service_type }}-node
when: node_start_result | failed
- name: Abort if node failed to start
diff --git a/roles/openshift_node/tasks/config/configure-node-settings.yml b/roles/openshift_node/tasks/config/configure-node-settings.yml
index 527580481..ebc1426d3 100644
--- a/roles/openshift_node/tasks/config/configure-node-settings.yml
+++ b/roles/openshift_node/tasks/config/configure-node-settings.yml
@@ -1,7 +1,7 @@
---
- name: Configure Node settings
lineinfile:
- dest: /etc/sysconfig/{{ openshift.common.service_type }}-node
+ dest: /etc/sysconfig/{{ openshift_service_type }}-node
regexp: "{{ item.regex }}"
line: "{{ item.line }}"
create: true
diff --git a/roles/openshift_node/tasks/config/configure-proxy-settings.yml b/roles/openshift_node/tasks/config/configure-proxy-settings.yml
index d60794305..7ddd319d2 100644
--- a/roles/openshift_node/tasks/config/configure-proxy-settings.yml
+++ b/roles/openshift_node/tasks/config/configure-proxy-settings.yml
@@ -1,7 +1,7 @@
---
- name: Configure Proxy Settings
lineinfile:
- dest: /etc/sysconfig/{{ openshift.common.service_type }}-node
+ dest: /etc/sysconfig/{{ openshift_service_type }}-node
regexp: "{{ item.regex }}"
line: "{{ item.line }}"
create: true
diff --git a/roles/openshift_node/tasks/config/install-node-deps-docker-service-file.yml b/roles/openshift_node/tasks/config/install-node-deps-docker-service-file.yml
index ee91a88ab..9f1145d12 100644
--- a/roles/openshift_node/tasks/config/install-node-deps-docker-service-file.yml
+++ b/roles/openshift_node/tasks/config/install-node-deps-docker-service-file.yml
@@ -1,7 +1,7 @@
---
- name: Install Node dependencies docker service file
template:
- dest: "/etc/systemd/system/{{ openshift.common.service_type }}-node-dep.service"
+ dest: "/etc/systemd/system/{{ openshift_service_type }}-node-dep.service"
src: openshift.docker.node.dep.service
notify:
- reload systemd units
diff --git a/roles/openshift_node/tasks/config/install-node-docker-service-file.yml b/roles/openshift_node/tasks/config/install-node-docker-service-file.yml
index f92ff79b5..649fc5f6b 100644
--- a/roles/openshift_node/tasks/config/install-node-docker-service-file.yml
+++ b/roles/openshift_node/tasks/config/install-node-docker-service-file.yml
@@ -1,7 +1,7 @@
---
- name: Install Node docker service file
template:
- dest: "/etc/systemd/system/{{ openshift.common.service_type }}-node.service"
+ dest: "/etc/systemd/system/{{ openshift_service_type }}-node.service"
src: openshift.docker.node.service
notify:
- reload systemd units
diff --git a/roles/openshift_node/tasks/docker/upgrade.yml b/roles/openshift_node/tasks/docker/upgrade.yml
index d743d2188..bbe9c71f5 100644
--- a/roles/openshift_node/tasks/docker/upgrade.yml
+++ b/roles/openshift_node/tasks/docker/upgrade.yml
@@ -1,8 +1,7 @@
---
# input variables:
-# - openshift.common.service_type
+# - openshift_service_type
# - openshift.common.is_containerized
-# - docker_upgrade_nuke_images
# - docker_version
# - skip_docker_restart
@@ -12,20 +11,6 @@
- debug: var=docker_image_count.stdout
-# TODO(jchaloup): put all docker_upgrade_nuke_images into a block with only one condition
-- name: Remove all containers and images
- script: nuke_images.sh
- register: nuke_images_result
- when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool
-
-- name: Check Docker image count
- shell: "docker images -aq | wc -l"
- register: docker_image_count
- when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool
-
-- debug: var=docker_image_count.stdout
- when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool
-
- service:
name: docker
state: stopped
diff --git a/roles/openshift_node/tasks/install.yml b/roles/openshift_node/tasks/install.yml
index 1ed4a05c1..f93aed246 100644
--- a/roles/openshift_node/tasks/install.yml
+++ b/roles/openshift_node/tasks/install.yml
@@ -3,14 +3,14 @@
block:
- name: Install Node package
package:
- name: "{{ openshift.common.service_type }}-node{{ (openshift_pkg_version | default('')) | oo_image_tag_to_rpm_version(include_dash=True) }}"
+ name: "{{ openshift_service_type }}-node{{ (openshift_pkg_version | default('')) | oo_image_tag_to_rpm_version(include_dash=True) }}"
state: present
register: result
until: result | success
- name: Install sdn-ovs package
package:
- name: "{{ openshift.common.service_type }}-sdn-ovs{{ (openshift_pkg_version | default('')) | oo_image_tag_to_rpm_version(include_dash=True) }}"
+ name: "{{ openshift_service_type }}-sdn-ovs{{ (openshift_pkg_version | default('')) | oo_image_tag_to_rpm_version(include_dash=True) }}"
state: present
when:
- openshift_node_use_openshift_sdn | bool
diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml
index e60d96760..32c5f495f 100644
--- a/roles/openshift_node/tasks/main.yml
+++ b/roles/openshift_node/tasks/main.yml
@@ -6,7 +6,7 @@
- deployment_type == 'openshift-enterprise'
- not openshift_use_crio
-- include: dnsmasq.yml
+- include_tasks: dnsmasq.yml
- name: setup firewall
import_tasks: firewall.yml
@@ -50,6 +50,8 @@
enabled: yes
state: restarted
when: openshift_use_crio
+ register: task_result
+ failed_when: task_result|failed and 'could not find the requested service' not in task_result.msg|lower
- name: restart NetworkManager to ensure resolv.conf is present
systemd:
diff --git a/roles/openshift_node/tasks/node_system_container.yml b/roles/openshift_node/tasks/node_system_container.yml
index eb8d9a6a5..98978ec6f 100644
--- a/roles/openshift_node/tasks/node_system_container.yml
+++ b/roles/openshift_node/tasks/node_system_container.yml
@@ -1,8 +1,4 @@
---
-- name: Ensure proxies are in the atomic.conf
- include_role:
- name: openshift_atomic
- tasks_from: proxy
- name: Pre-pull node system container image
command: >
@@ -12,10 +8,10 @@
- name: Install or Update node system container
oc_atomic_container:
- name: "{{ openshift.common.service_type }}-node"
+ name: "{{ openshift_service_type }}-node"
image: "{{ 'docker:' if system_images_registry == 'docker' else system_images_registry + '/' }}{{ openshift.node.node_system_image }}:{{ openshift_image_tag }}"
values:
- "DNS_DOMAIN={{ openshift.common.dns_domain }}"
- "DOCKER_SERVICE={{ openshift_docker_service_name }}.service"
- - "MASTER_SERVICE={{ openshift.common.service_type }}.service"
+ - "MASTER_SERVICE={{ openshift_service_type }}.service"
state: latest
diff --git a/roles/openshift_node/tasks/openvswitch_system_container.yml b/roles/openshift_node/tasks/openvswitch_system_container.yml
index d33e172c1..b61bc84c1 100644
--- a/roles/openshift_node/tasks/openvswitch_system_container.yml
+++ b/roles/openshift_node/tasks/openvswitch_system_container.yml
@@ -7,11 +7,6 @@
l_service_name: "{{ openshift_docker_service_name }}"
when: not openshift_use_crio
-- name: Ensure proxies are in the atomic.conf
- include_role:
- name: openshift_atomic
- tasks_from: proxy
-
- name: Pre-pull OpenVSwitch system container image
command: >
atomic pull --storage=ostree {{ 'docker:' if system_images_registry == 'docker' else system_images_registry + '/' }}{{ openshift.node.ovs_system_image }}:{{ openshift_image_tag }}
diff --git a/roles/openshift_node/tasks/registry_auth.yml b/roles/openshift_node/tasks/registry_auth.yml
index f5428867a..ab43ec049 100644
--- a/roles/openshift_node/tasks/registry_auth.yml
+++ b/roles/openshift_node/tasks/registry_auth.yml
@@ -32,7 +32,7 @@
- openshift_docker_alternative_creds | bool
- oreg_auth_user is defined
- (not node_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool
- register: node_oreg_auth_credentials_create
+ register: node_oreg_auth_credentials_create_alt
notify:
- restart node
@@ -43,4 +43,8 @@
when:
- openshift.common.is_containerized | bool
- oreg_auth_user is defined
- - (node_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace or node_oreg_auth_credentials_create.changed) | bool
+ - >
+ (node_oreg_auth_credentials_stat.stat.exists
+ or oreg_auth_credentials_replace
+ or node_oreg_auth_credentials_create.changed
+ or node_oreg_auth_credentials_create_alt.changed) | bool
diff --git a/roles/openshift_node/tasks/systemd_units.yml b/roles/openshift_node/tasks/systemd_units.yml
index 397e1ba18..c532147b1 100644
--- a/roles/openshift_node/tasks/systemd_units.yml
+++ b/roles/openshift_node/tasks/systemd_units.yml
@@ -1,7 +1,7 @@
---
- name: Install Node service file
template:
- dest: "/etc/systemd/system/{{ openshift.common.service_type }}-node.service"
+ dest: "/etc/systemd/system/{{ openshift_service_type }}-node.service"
src: "{{ openshift.common.is_containerized | bool | ternary('openshift.docker.node.service', 'node.service.j2') }}"
when: not l_is_node_system_container | bool
notify:
diff --git a/roles/openshift_node/tasks/upgrade.yml b/roles/openshift_node/tasks/upgrade.yml
index 561b56918..9f333645a 100644
--- a/roles/openshift_node/tasks/upgrade.yml
+++ b/roles/openshift_node/tasks/upgrade.yml
@@ -17,7 +17,7 @@
name: "{{ item }}"
state: stopped
with_items:
- - "{{ openshift.common.service_type }}-node"
+ - "{{ openshift_service_type }}-node"
- openvswitch
failed_when: false
@@ -26,8 +26,8 @@
name: "{{ item }}"
state: stopped
with_items:
- - "{{ openshift.common.service_type }}-master-controllers"
- - "{{ openshift.common.service_type }}-master-api"
+ - "{{ openshift_service_type }}-master-controllers"
+ - "{{ openshift_service_type }}-master-api"
- etcd_container
failed_when: false
when: openshift.common.is_containerized | bool
@@ -80,9 +80,9 @@
with_items:
- etcd_container
- openvswitch
- - "{{ openshift.common.service_type }}-master-api"
- - "{{ openshift.common.service_type }}-master-controllers"
- - "{{ openshift.common.service_type }}-node"
+ - "{{ openshift_service_type }}-master-api"
+ - "{{ openshift_service_type }}-master-controllers"
+ - "{{ openshift_service_type }}-node"
failed_when: false
when: openshift.common.is_containerized | bool
@@ -91,7 +91,7 @@
name: "{{ item }}"
state: stopped
with_items:
- - "{{ openshift.common.service_type }}-node"
+ - "{{ openshift_service_type }}-node"
- openvswitch
failed_when: false
when: not openshift.common.is_containerized | bool
diff --git a/roles/openshift_node/tasks/upgrade/restart.yml b/roles/openshift_node/tasks/upgrade/restart.yml
index 3f1abceab..65c301783 100644
--- a/roles/openshift_node/tasks/upgrade/restart.yml
+++ b/roles/openshift_node/tasks/upgrade/restart.yml
@@ -1,6 +1,6 @@
---
# input variables:
-# - openshift.common.service_type
+# - openshift_service_type
# - openshift.common.is_containerized
# - openshift.common.hostname
# - openshift.master.api_port
@@ -27,9 +27,9 @@
with_items:
- etcd_container
- openvswitch
- - "{{ openshift.common.service_type }}-master-api"
- - "{{ openshift.common.service_type }}-master-controllers"
- - "{{ openshift.common.service_type }}-node"
+ - "{{ openshift_service_type }}-master-api"
+ - "{{ openshift_service_type }}-master-controllers"
+ - "{{ openshift_service_type }}-node"
failed_when: false
- name: Wait for master API to come back online
diff --git a/roles/openshift_node/tasks/upgrade/rpm_upgrade.yml b/roles/openshift_node/tasks/upgrade/rpm_upgrade.yml
index fcbe1a598..120b93bc3 100644
--- a/roles/openshift_node/tasks/upgrade/rpm_upgrade.yml
+++ b/roles/openshift_node/tasks/upgrade/rpm_upgrade.yml
@@ -1,13 +1,13 @@
---
# input variables:
-# - openshift.common.service_type
+# - openshift_service_type
# - component
# - openshift_pkg_version
# - openshift.common.is_atomic
# We verified latest rpm available is suitable, so just yum update.
- name: Upgrade packages
- package: "name={{ openshift.common.service_type }}-{{ component }}{{ openshift_pkg_version }} state=present"
+ package: "name={{ openshift_service_type }}-{{ component }}{{ openshift_pkg_version }} state=present"
register: result
until: result | success
@@ -19,7 +19,7 @@
- name: Install Node service file
template:
- dest: "/etc/systemd/system/{{ openshift.common.service_type }}-node.service"
+ dest: "/etc/systemd/system/{{ openshift_service_type }}-node.service"
src: "node.service.j2"
register: l_node_unit
diff --git a/roles/openshift_node/templates/openshift.docker.node.dep.service b/roles/openshift_node/templates/openshift.docker.node.dep.service
index 5964ac095..8b43beb07 100644
--- a/roles/openshift_node/templates/openshift.docker.node.dep.service
+++ b/roles/openshift_node/templates/openshift.docker.node.dep.service
@@ -1,11 +1,11 @@
[Unit]
Requires={{ openshift_docker_service_name }}.service
After={{ openshift_docker_service_name }}.service
-PartOf={{ openshift.common.service_type }}-node.service
-Before={{ openshift.common.service_type }}-node.service
+PartOf={{ openshift_service_type }}-node.service
+Before={{ openshift_service_type }}-node.service
{% if openshift_use_crio %}Wants=cri-o.service{% endif %}
[Service]
-ExecStart=/bin/bash -c "if [[ -f /usr/bin/docker-current ]]; then echo \"DOCKER_ADDTL_BIND_MOUNTS=--volume=/usr/bin/docker-current:/usr/bin/docker-current:ro --volume=/etc/sysconfig/docker:/etc/sysconfig/docker:ro --volume=/etc/containers/registries:/etc/containers/registries:ro\" > /etc/sysconfig/{{ openshift.common.service_type }}-node-dep; else echo \"#DOCKER_ADDTL_BIND_MOUNTS=\" > /etc/sysconfig/{{ openshift.common.service_type }}-node-dep; fi"
+ExecStart=/bin/bash -c "if [[ -f /usr/bin/docker-current ]]; then echo \"DOCKER_ADDTL_BIND_MOUNTS=--volume=/usr/bin/docker-current:/usr/bin/docker-current:ro --volume=/etc/sysconfig/docker:/etc/sysconfig/docker:ro --volume=/etc/containers/registries:/etc/containers/registries:ro\" > /etc/sysconfig/{{ openshift_service_type }}-node-dep; else echo \"#DOCKER_ADDTL_BIND_MOUNTS=\" > /etc/sysconfig/{{ openshift_service_type }}-node-dep; fi"
ExecStop=
-SyslogIdentifier={{ openshift.common.service_type }}-node-dep
+SyslogIdentifier={{ openshift_service_type }}-node-dep
diff --git a/roles/openshift_node/templates/openshift.docker.node.service b/roles/openshift_node/templates/openshift.docker.node.service
index 3b33ca542..b174c7023 100644
--- a/roles/openshift_node/templates/openshift.docker.node.service
+++ b/roles/openshift_node/templates/openshift.docker.node.service
@@ -1,5 +1,5 @@
[Unit]
-After={{ openshift.common.service_type }}-master.service
+After={{ openshift_service_type }}-master.service
After={{ openshift_docker_service_name }}.service
After=openvswitch.service
PartOf={{ openshift_docker_service_name }}.service
@@ -10,20 +10,20 @@ PartOf=openvswitch.service
After=ovsdb-server.service
After=ovs-vswitchd.service
{% endif %}
-Wants={{ openshift.common.service_type }}-master.service
-Requires={{ openshift.common.service_type }}-node-dep.service
-After={{ openshift.common.service_type }}-node-dep.service
+Wants={{ openshift_service_type }}-master.service
+Requires={{ openshift_service_type }}-node-dep.service
+After={{ openshift_service_type }}-node-dep.service
Requires=dnsmasq.service
After=dnsmasq.service
[Service]
-EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-node
-EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-node-dep
-ExecStartPre=-/usr/bin/docker rm -f {{ openshift.common.service_type }}-node
+EnvironmentFile=/etc/sysconfig/{{ openshift_service_type }}-node
+EnvironmentFile=/etc/sysconfig/{{ openshift_service_type }}-node-dep
+ExecStartPre=-/usr/bin/docker rm -f {{ openshift_service_type }}-node
ExecStartPre=/usr/bin/cp /etc/origin/node/node-dnsmasq.conf /etc/dnsmasq.d/
ExecStartPre=/usr/bin/dbus-send --system --dest=uk.org.thekelleys.dnsmasq /uk/org/thekelleys/dnsmasq uk.org.thekelleys.SetDomainServers array:string:/in-addr.arpa/127.0.0.1,/{{ openshift.common.dns_domain }}/127.0.0.1
-ExecStart=/usr/bin/docker run --name {{ openshift.common.service_type }}-node \
- --rm --privileged --net=host --pid=host --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-node \
+ExecStart=/usr/bin/docker run --name {{ openshift_service_type }}-node \
+ --rm --privileged --net=host --pid=host --env-file=/etc/sysconfig/{{ openshift_service_type }}-node \
-v /:/rootfs:ro,rslave -e CONFIG_FILE=${CONFIG_FILE} -e OPTIONS=${OPTIONS} \
-e HOST=/rootfs -e HOST_ETC=/host-etc \
-v {{ openshift_node_data_dir }}:{{ openshift_node_data_dir }}:rslave \
@@ -40,10 +40,10 @@ ExecStart=/usr/bin/docker run --name {{ openshift.common.service_type }}-node \
{% if l_bind_docker_reg_auth %} -v {{ oreg_auth_credentials_path }}:/root/.docker:ro{% endif %}\
{{ openshift.node.node_image }}:${IMAGE_VERSION}
ExecStartPost=/usr/bin/sleep 10
-ExecStop=/usr/bin/docker stop {{ openshift.common.service_type }}-node
+ExecStop=/usr/bin/docker stop {{ openshift_service_type }}-node
ExecStopPost=/usr/bin/rm /etc/dnsmasq.d/node-dnsmasq.conf
ExecStopPost=/usr/bin/dbus-send --system --dest=uk.org.thekelleys.dnsmasq /uk/org/thekelleys/dnsmasq uk.org.thekelleys.SetDomainServers array:string:
-SyslogIdentifier={{ openshift.common.service_type }}-node
+SyslogIdentifier={{ openshift_service_type }}-node
Restart=always
RestartSec=5s
diff --git a/roles/openshift_openstack/defaults/main.yml b/roles/openshift_openstack/defaults/main.yml
index 5f182e0d6..65a647b8f 100644
--- a/roles/openshift_openstack/defaults/main.yml
+++ b/roles/openshift_openstack/defaults/main.yml
@@ -4,11 +4,9 @@ openshift_openstack_stack_state: 'present'
openshift_openstack_ssh_ingress_cidr: 0.0.0.0/0
openshift_openstack_node_ingress_cidr: 0.0.0.0/0
openshift_openstack_lb_ingress_cidr: 0.0.0.0/0
-openshift_openstack_bastion_ingress_cidr: 0.0.0.0/0
openshift_openstack_num_etcd: 0
openshift_openstack_num_masters: 1
openshift_openstack_num_nodes: 1
-openshift_openstack_num_dns: 0
openshift_openstack_num_infra: 1
openshift_openstack_dns_nameservers: []
openshift_openstack_nodes_to_remove: []
@@ -45,8 +43,10 @@ openshift_openstack_container_storage_setup:
# populate-dns
openshift_openstack_dns_records_add: []
-openshift_openstack_external_nsupdate_keys: {}
+openshift_openstack_public_hostname_suffix: ""
+openshift_openstack_private_hostname_suffix: ""
+openshift_openstack_public_dns_domain: "example.com"
openshift_openstack_full_dns_domain: "{{ (openshift_openstack_clusterid|trim == '') | ternary(openshift_openstack_public_dns_domain, openshift_openstack_clusterid + '.' + openshift_openstack_public_dns_domain) }}"
openshift_openstack_app_subdomain: "apps"
@@ -60,20 +60,17 @@ openshift_openstack_infra_hostname: infra-node
openshift_openstack_node_hostname: app-node
openshift_openstack_lb_hostname: lb
openshift_openstack_etcd_hostname: etcd
-openshift_openstack_dns_hostname: dns
openshift_openstack_keypair_name: openshift
openshift_openstack_lb_flavor: "{{ openshift_openstack_default_flavor }}"
openshift_openstack_etcd_flavor: "{{ openshift_openstack_default_flavor }}"
openshift_openstack_master_flavor: "{{ openshift_openstack_default_flavor }}"
openshift_openstack_node_flavor: "{{ openshift_openstack_default_flavor }}"
openshift_openstack_infra_flavor: "{{ openshift_openstack_default_flavor }}"
-openshift_openstack_dns_flavor: "{{ openshift_openstack_default_flavor }}"
openshift_openstack_master_image: "{{ openshift_openstack_default_image_name }}"
openshift_openstack_infra_image: "{{ openshift_openstack_default_image_name }}"
openshift_openstack_node_image: "{{ openshift_openstack_default_image_name }}"
openshift_openstack_lb_image: "{{ openshift_openstack_default_image_name }}"
openshift_openstack_etcd_image: "{{ openshift_openstack_default_image_name }}"
-openshift_openstack_dns_image: "{{ openshift_openstack_default_image_name }}"
openshift_openstack_provider_network_name: null
openshift_openstack_external_network_name: null
openshift_openstack_private_network: >-
@@ -89,8 +86,5 @@ openshift_openstack_master_volume_size: "{{ openshift_openstack_docker_volume_si
openshift_openstack_infra_volume_size: "{{ openshift_openstack_docker_volume_size }}"
openshift_openstack_node_volume_size: "{{ openshift_openstack_docker_volume_size }}"
openshift_openstack_etcd_volume_size: 2
-openshift_openstack_dns_volume_size: 1
openshift_openstack_lb_volume_size: 5
-openshift_openstack_use_bastion: false
-openshift_openstack_ui_ssh_tunnel: false
openshift_openstack_ephemeral_volumes: false
diff --git a/roles/openshift_openstack/tasks/check-prerequisites.yml b/roles/openshift_openstack/tasks/check-prerequisites.yml
index 57c7238d1..30996cc47 100644
--- a/roles/openshift_openstack/tasks/check-prerequisites.yml
+++ b/roles/openshift_openstack/tasks/check-prerequisites.yml
@@ -32,10 +32,12 @@
command: python -c "import dns"
ignore_errors: yes
register: pythondns_result
+ when: openshift_openstack_external_nsupdate_keys is defined
- name: Check if python-dns is installed
assert:
that: 'pythondns_result.rc == 0'
msg: "Python module python-dns is not installed"
+ when: openshift_openstack_external_nsupdate_keys is defined
# Check jinja2
- name: Try to import jinja2 module
@@ -85,21 +87,19 @@
msg: "Keypair {{ openshift_openstack_keypair_name }} is not available"
# Check that custom images are available
-- include: custom_image_check.yaml
+- include_tasks: custom_image_check.yaml
with_items:
- "{{ openshift_openstack_master_image }}"
- "{{ openshift_openstack_infra_image }}"
- "{{ openshift_openstack_node_image }}"
- "{{ openshift_openstack_lb_image }}"
- "{{ openshift_openstack_etcd_image }}"
- - "{{ openshift_openstack_dns_image }}"
# Check that custom flavors are available
-- include: custom_flavor_check.yaml
+- include_tasks: custom_flavor_check.yaml
with_items:
- "{{ openshift_openstack_master_flavor }}"
- "{{ openshift_openstack_infra_flavor }}"
- "{{ openshift_openstack_node_flavor }}"
- "{{ openshift_openstack_lb_flavor }}"
- "{{ openshift_openstack_etcd_flavor }}"
- - "{{ openshift_openstack_dns_flavor }}"
diff --git a/roles/openshift_openstack/tasks/hostname.yml b/roles/openshift_openstack/tasks/hostname.yml
deleted file mode 100644
index e1a18425f..000000000
--- a/roles/openshift_openstack/tasks/hostname.yml
+++ /dev/null
@@ -1,26 +0,0 @@
----
-- name: Setting Hostname Fact
- set_fact:
- new_hostname: "{{ custom_hostname | default(inventory_hostname_short) }}"
-
-- name: Setting FQDN Fact
- set_fact:
- new_fqdn: "{{ new_hostname }}.{{ openshift_openstack_full_dns_domain }}"
-
-- name: Setting hostname and DNS domain
- hostname: name="{{ new_fqdn }}"
-
-- name: Check for cloud.cfg
- stat: path=/etc/cloud/cloud.cfg
- register: cloud_cfg
-
-- name: Prevent cloud-init updates of hostname/fqdn (if applicable)
- lineinfile:
- dest: /etc/cloud/cloud.cfg
- state: present
- regexp: "{{ item.regexp }}"
- line: "{{ item.line }}"
- with_items:
- - { regexp: '^ - set_hostname', line: '# - set_hostname' }
- - { regexp: '^ - update_hostname', line: '# - update_hostname' }
- when: cloud_cfg.stat.exists == True
diff --git a/roles/openshift_openstack/tasks/node-configuration.yml b/roles/openshift_openstack/tasks/node-configuration.yml
index 89e58d830..59df2e396 100644
--- a/roles/openshift_openstack/tasks/node-configuration.yml
+++ b/roles/openshift_openstack/tasks/node-configuration.yml
@@ -4,8 +4,6 @@
msg: "SELinux is required for OpenShift and has been detected as '{{ ansible_selinux.config_mode }}'"
when: ansible_selinux.config_mode != "enforcing"
-- include: hostname.yml
+- include_tasks: container-storage-setup.yml
-- include: container-storage-setup.yml
-
-- include: node-network.yml
+- include_tasks: node-network.yml
diff --git a/roles/openshift_openstack/tasks/populate-dns.yml b/roles/openshift_openstack/tasks/populate-dns.yml
index c03aceb94..cf2ead5c3 100644
--- a/roles/openshift_openstack/tasks/populate-dns.yml
+++ b/roles/openshift_openstack/tasks/populate-dns.yml
@@ -1,7 +1,7 @@
---
- name: "Generate list of private A records"
set_fact:
- private_records: "{{ private_records | default([]) + [ { 'type': 'A', 'hostname': hostvars[item]['ansible_hostname'], 'ip': hostvars[item]['private_v4'] } ] }}"
+ private_records: "{{ private_records | default([]) + [ { 'type': 'A', 'hostname': hostvars[item]['ansible_hostname'] + openshift_openstack_private_hostname_suffix, 'ip': hostvars[item]['private_v4'] } ] }}"
with_items: "{{ groups['cluster_hosts'] }}"
- name: "Add wildcard records to the private A records for infrahosts"
@@ -30,7 +30,6 @@
nsupdate_key_algorithm_private: "{{ openshift_openstack_external_nsupdate_keys['private']['key_algorithm'] }}"
nsupdate_private_key_name: "{{ openshift_openstack_external_nsupdate_keys['private']['key_name']|default('private-' + openshift_openstack_full_dns_domain) }}"
when:
- - openshift_openstack_external_nsupdate_keys is defined
- openshift_openstack_external_nsupdate_keys['private'] is defined
@@ -44,10 +43,12 @@
key_secret: "{{ nsupdate_key_secret_private }}"
key_algorithm: "{{ nsupdate_key_algorithm_private | lower }}"
entries: "{{ private_records }}"
+ when:
+ - openshift_openstack_external_nsupdate_keys['private'] is defined
- name: "Generate list of public A records"
set_fact:
- public_records: "{{ public_records | default([]) + [ { 'type': 'A', 'hostname': hostvars[item]['ansible_hostname'], 'ip': hostvars[item]['public_v4'] } ] }}"
+ public_records: "{{ public_records | default([]) + [ { 'type': 'A', 'hostname': hostvars[item]['ansible_hostname'] + openshift_openstack_public_hostname_suffix, 'ip': hostvars[item]['public_v4'] } ] }}"
with_items: "{{ groups['cluster_hosts'] }}"
when: hostvars[item]['public_v4'] is defined
@@ -63,15 +64,6 @@
when:
- hostvars[groups.masters[0]].openshift_master_cluster_public_hostname is defined
- openshift_openstack_num_masters == 1
- - not openshift_openstack_use_bastion|bool
-
-- name: "Add public master cluster hostname records to the public A records (single master behind a bastion)"
- set_fact:
- public_records: "{{ public_records | default([]) + [ { 'type': 'A', 'hostname': (hostvars[groups.masters[0]].openshift_master_cluster_public_hostname | replace(openshift_openstack_full_dns_domain, ''))[:-1], 'ip': hostvars[groups.bastions[0]].public_v4 } ] }}"
- when:
- - hostvars[groups.masters[0]].openshift_master_cluster_public_hostname is defined
- - openshift_openstack_num_masters == 1
- - openshift_openstack_use_bastion|bool
- name: "Add public master cluster hostname records to the public A records (multi-master)"
set_fact:
@@ -87,7 +79,6 @@
nsupdate_key_algorithm_public: "{{ openshift_openstack_external_nsupdate_keys['public']['key_algorithm'] }}"
nsupdate_public_key_name: "{{ openshift_openstack_external_nsupdate_keys['public']['key_name']|default('public-' + openshift_openstack_full_dns_domain) }}"
when:
- - openshift_openstack_external_nsupdate_keys is defined
- openshift_openstack_external_nsupdate_keys['public'] is defined
- name: "Generate the public Add section for DNS"
@@ -100,11 +91,13 @@
key_secret: "{{ nsupdate_key_secret_public }}"
key_algorithm: "{{ nsupdate_key_algorithm_public | lower }}"
entries: "{{ public_records }}"
+ when:
+ - openshift_openstack_external_nsupdate_keys['public'] is defined
- name: "Generate the final openshift_openstack_dns_records_add"
set_fact:
- openshift_openstack_dns_records_add: "{{ private_named_records + public_named_records }}"
+ openshift_openstack_dns_records_add: "{{ private_named_records|default([]) + public_named_records|default([]) }}"
- name: "Add DNS A records"
@@ -120,7 +113,7 @@
# TODO(shadower): add a cleanup playbook that removes these records, too!
state: present
with_subelements:
- - "{{ openshift_openstack_dns_records_add | default({}) }}"
+ - "{{ openshift_openstack_dns_records_add | default([]) }}"
- entries
register: nsupdate_add_result
until: nsupdate_add_result|succeeded
diff --git a/roles/openshift_openstack/tasks/provision.yml b/roles/openshift_openstack/tasks/provision.yml
index dccbe334c..b774bd620 100644
--- a/roles/openshift_openstack/tasks/provision.yml
+++ b/roles/openshift_openstack/tasks/provision.yml
@@ -1,6 +1,6 @@
---
- name: Generate the templates
- include: generate-templates.yml
+ include_tasks: generate-templates.yml
when:
- openshift_openstack_stack_state == 'present'
@@ -17,7 +17,7 @@
meta: refresh_inventory
- name: CleanUp
- include: cleanup.yml
+ include_tasks: cleanup.yml
when:
- openshift_openstack_stack_state == 'present'
diff --git a/roles/openshift_openstack/templates/heat_stack.yaml.j2 b/roles/openshift_openstack/templates/heat_stack.yaml.j2
index 0e7538629..8d13eb81e 100644
--- a/roles/openshift_openstack/templates/heat_stack.yaml.j2
+++ b/roles/openshift_openstack/templates/heat_stack.yaml.j2
@@ -54,25 +54,8 @@ outputs:
description: Floating IPs of the nodes
value: { get_attr: [ infra_nodes, floating_ip ] }
-{% if openshift_openstack_num_dns|int > 0 %}
- dns_name:
- description: Name of the DNS
- value:
- get_attr:
- - dns
- - name
-
- dns_floating_ips:
- description: Floating IPs of the DNS
- value: { get_attr: [ dns, floating_ip ] }
-
- dns_private_ips:
- description: Private IPs of the DNS
- value: { get_attr: [ dns, private_ip ] }
-{% endif %}
-
conditions:
- no_floating: {% if openshift_openstack_provider_network_name or openshift_openstack_use_bastion|bool %}true{% else %}false{% endif %}
+ no_floating: {% if openshift_openstack_provider_network_name %}true{% else %}false{% endif %}
resources:
@@ -180,13 +163,6 @@ resources:
port_range_min: 22
port_range_max: 22
remote_ip_prefix: {{ openshift_openstack_ssh_ingress_cidr }}
-{% if openshift_openstack_use_bastion|bool %}
- - direction: ingress
- protocol: tcp
- port_range_min: 22
- port_range_max: 22
- remote_ip_prefix: {{ openshift_openstack_bastion_ingress_cidr }}
-{% endif %}
- direction: ingress
protocol: icmp
remote_ip_prefix: {{ openshift_openstack_ssh_ingress_cidr }}
@@ -443,44 +419,7 @@ resources:
port_range_min: 443
port_range_max: 443
-{% if openshift_openstack_num_dns|int > 0 %}
- dns-secgrp:
- type: OS::Neutron::SecurityGroup
- properties:
- name:
- str_replace:
- template: openshift-ansible-cluster_id-dns-secgrp
- params:
- cluster_id: {{ openshift_openstack_stack_name }}
- description:
- str_replace:
- template: Security group for cluster_id cluster DNS
- params:
- cluster_id: {{ openshift_openstack_stack_name }}
- rules:
- - direction: ingress
- protocol: udp
- port_range_min: 53
- port_range_max: 53
- remote_ip_prefix: {{ openshift_openstack_node_ingress_cidr }}
- - direction: ingress
- protocol: udp
- port_range_min: 53
- port_range_max: 53
- remote_ip_prefix: "{{ openshift_openstack_subnet_prefix }}.0/24"
- - direction: ingress
- protocol: tcp
- port_range_min: 53
- port_range_max: 53
- remote_ip_prefix: {{ openshift_openstack_node_ingress_cidr }}
- - direction: ingress
- protocol: tcp
- port_range_min: 53
- port_range_max: 53
- remote_ip_prefix: "{{ openshift_openstack_subnet_prefix }}.0/24"
-{% endif %}
-
-{% if openshift_openstack_num_masters|int > 1 or openshift_openstack_ui_ssh_tunnel|bool %}
+{% if openshift_openstack_num_masters|int > 1 %}
lb-secgrp:
type: OS::Neutron::SecurityGroup
properties:
@@ -491,20 +430,13 @@ resources:
protocol: tcp
port_range_min: {{ openshift_master_api_port | default(8443) }}
port_range_max: {{ openshift_master_api_port | default(8443) }}
- remote_ip_prefix: {{ openshift_openstack_lb_ingress_cidr | default(openshift_openstack_bastion_ingress_cidr) }}
-{% if openshift_openstack_ui_ssh_tunnel|bool %}
- - direction: ingress
- protocol: tcp
- port_range_min: {{ openshift_master_api_port | default(8443) }}
- port_range_max: {{ openshift_master_api_port | default(8443) }}
- remote_ip_prefix: {{ openshift_openstack_ssh_ingress_cidr }}
-{% endif %}
+ remote_ip_prefix: {{ openshift_openstack_lb_ingress_cidr }}
{% if openshift_master_console_port is defined and openshift_master_console_port != openshift_master_api_port %}
- direction: ingress
protocol: tcp
port_range_min: {{ openshift_master_console_port | default(8443) }}
port_range_max: {{ openshift_master_console_port | default(8443) }}
- remote_ip_prefix: {{ openshift_openstack_lb_ingress_cidr | default(openshift_openstack_bastion_ingress_cidr) }}
+ remote_ip_prefix: {{ openshift_openstack_lb_ingress_cidr }}
{% endif %}
{% endif %}
@@ -553,7 +485,7 @@ resources:
- no_floating
- null
- {{ openshift_openstack_external_network_name }}
-{% if openshift_openstack_use_bastion|bool or openshift_openstack_provider_network_name %}
+{% if openshift_openstack_provider_network_name %}
attach_float_net: false
{% endif %}
volume_size: {{ openshift_openstack_etcd_volume_size }}
@@ -685,7 +617,7 @@ resources:
- no_floating
- null
- {{ openshift_openstack_external_network_name }}
-{% if openshift_openstack_use_bastion|bool or openshift_openstack_provider_network_name %}
+{% if openshift_openstack_provider_network_name %}
attach_float_net: false
{% endif %}
volume_size: {{ openshift_openstack_master_volume_size }}
@@ -755,7 +687,7 @@ resources:
- no_floating
- null
- {{ openshift_openstack_external_network_name }}
-{% if openshift_openstack_use_bastion|bool or openshift_openstack_provider_network_name %}
+{% if openshift_openstack_provider_network_name %}
attach_float_net: false
{% endif %}
volume_size: {{ openshift_openstack_node_volume_size }}
@@ -818,9 +750,6 @@ resources:
{% else %}
- { get_resource: node-secgrp }
{% endif %}
-{% if openshift_openstack_ui_ssh_tunnel|bool and openshift_openstack_num_masters|int < 2 %}
- - { get_resource: lb-secgrp }
-{% endif %}
- { get_resource: infra-secgrp }
- { get_resource: common-secgrp }
{% if not openshift_openstack_provider_network_name %}
@@ -835,54 +764,3 @@ resources:
depends_on:
- interface
{% endif %}
-
-{% if openshift_openstack_num_dns|int > 0 %}
- dns:
- type: OS::Heat::ResourceGroup
- properties:
- count: {{ openshift_openstack_num_dns }}
- resource_def:
- type: server.yaml
- properties:
- name:
- str_replace:
- template: k8s_type-%index%.cluster_id
- params:
- cluster_id: {{ openshift_openstack_stack_name }}
- k8s_type: {{ openshift_openstack_dns_hostname }}
- cluster_env: {{ openshift_openstack_public_dns_domain }}
- cluster_id: {{ openshift_openstack_stack_name }}
- group:
- str_replace:
- template: k8s_type.cluster_id
- params:
- k8s_type: dns
- cluster_id: {{ openshift_openstack_stack_name }}
- type: dns
- image: {{ openshift_openstack_dns_image }}
- flavor: {{ openshift_openstack_dns_flavor }}
- key_name: {{ openshift_openstack_keypair_name }}
-{% if openshift_openstack_provider_network_name %}
- net: {{ openshift_openstack_provider_network_name }}
- net_name: {{ openshift_openstack_provider_network_name }}
-{% else %}
- net: { get_resource: net }
- subnet: { get_resource: subnet }
- net_name:
- str_replace:
- template: openshift-ansible-cluster_id-net
- params:
- cluster_id: {{ openshift_openstack_stack_name }}
-{% endif %}
- secgrp:
- - { get_resource: dns-secgrp }
- - { get_resource: common-secgrp }
-{% if not openshift_openstack_provider_network_name %}
- floating_network: {{ openshift_openstack_external_network_name }}
-{% endif %}
- volume_size: {{ openshift_openstack_dns_volume_size }}
-{% if not openshift_openstack_provider_network_name %}
- depends_on:
- - interface
-{% endif %}
-{% endif %}
diff --git a/roles/openshift_persistent_volumes/action_plugins/generate_pv_pvcs_list.py b/roles/openshift_persistent_volumes/action_plugins/generate_pv_pvcs_list.py
new file mode 100644
index 000000000..eb13a58ba
--- /dev/null
+++ b/roles/openshift_persistent_volumes/action_plugins/generate_pv_pvcs_list.py
@@ -0,0 +1,157 @@
+"""
+Ansible action plugin to generate pv and pvc dictionaries lists
+"""
+
+from ansible.plugins.action import ActionBase
+from ansible import errors
+
+
+class ActionModule(ActionBase):
+ """Action plugin to execute health checks."""
+
+ def get_templated(self, var_to_template):
+ """Return a properly templated ansible variable"""
+ return self._templar.template(self.task_vars.get(var_to_template))
+
+ def build_common(self, varname=None):
+ """Retrieve common variables for each pv and pvc type"""
+ volume = self.get_templated(str(varname) + '_volume_name')
+ size = self.get_templated(str(varname) + '_volume_size')
+ labels = self.task_vars.get(str(varname) + '_labels')
+ if labels:
+ labels = self._templar.template(labels)
+ else:
+ labels = dict()
+ access_modes = self.get_templated(str(varname) + '_access_modes')
+ return (volume, size, labels, access_modes)
+
+ def build_pv_nfs(self, varname=None):
+ """Build pv dictionary for nfs storage type"""
+ host = self.task_vars.get(str(varname) + '_host')
+ if host:
+ self._templar.template(host)
+ elif host is None:
+ groups = self.task_vars.get('groups')
+ default_group_name = self.get_templated('openshift_persistent_volumes_default_nfs_group')
+ if groups and default_group_name and default_group_name in groups and len(groups[default_group_name]) > 0:
+ host = groups['oo_nfs_to_config'][0]
+ else:
+ raise errors.AnsibleModuleError("|failed no storage host detected")
+ volume, size, labels, access_modes = self.build_common(varname=varname)
+ directory = self.get_templated(str(varname) + '_nfs_directory')
+ path = directory + '/' + volume
+ return dict(
+ name="{0}-volume".format(volume),
+ capacity=size,
+ labels=labels,
+ access_modes=access_modes,
+ storage=dict(
+ nfs=dict(
+ server=host,
+ path=path)))
+
+ def build_pv_openstack(self, varname=None):
+ """Build pv dictionary for openstack storage type"""
+ volume, size, labels, access_modes = self.build_common(varname=varname)
+ filesystem = self.get_templated(str(varname) + '_openstack_filesystem')
+ volume_id = self.get_templated(str(varname) + '_openstack_volumeID')
+ return dict(
+ name="{0}-volume".format(volume),
+ capacity=size,
+ labels=labels,
+ access_modes=access_modes,
+ storage=dict(
+ cinder=dict(
+ fsType=filesystem,
+ volumeID=volume_id)))
+
+ def build_pv_glusterfs(self, varname=None):
+ """Build pv dictionary for glusterfs storage type"""
+ volume, size, labels, access_modes = self.build_common(varname=varname)
+ endpoints = self.get_templated(str(varname) + '_glusterfs_endpoints')
+ path = self.get_templated(str(varname) + '_glusterfs_path')
+ read_only = self.get_templated(str(varname) + '_glusterfs_readOnly')
+ return dict(
+ name="{0}-volume".format(volume),
+ capacity=size,
+ labels=labels,
+ access_modes=access_modes,
+ storage=dict(
+ glusterfs=dict(
+ endpoints=endpoints,
+ path=path,
+ readOnly=read_only)))
+
+ def build_pv_dict(self, varname=None):
+ """Check for the existence of PV variables"""
+ kind = self.task_vars.get(str(varname) + '_kind')
+ if kind:
+ kind = self._templar.template(kind)
+ create_pv = self.task_vars.get(str(varname) + '_create_pv')
+ if create_pv and self._templar.template(create_pv):
+ if kind == 'nfs':
+ return self.build_pv_nfs(varname=varname)
+
+ elif kind == 'openstack':
+ return self.build_pv_openstack(varname=varname)
+
+ elif kind == 'glusterfs':
+ return self.build_pv_glusterfs(varname=varname)
+
+ elif not (kind == 'object' or kind == 'dynamic'):
+ msg = "|failed invalid storage kind '{0}' for component '{1}'".format(
+ kind,
+ varname)
+ raise errors.AnsibleModuleError(msg)
+ return None
+
+ def build_pvc_dict(self, varname=None):
+ """Check for the existence of PVC variables"""
+ kind = self.task_vars.get(str(varname) + '_kind')
+ if kind:
+ kind = self._templar.template(kind)
+ create_pv = self.task_vars.get(str(varname) + '_create_pv')
+ if create_pv:
+ create_pv = self._templar.template(create_pv)
+ create_pvc = self.task_vars.get(str(varname) + '_create_pvc')
+ if create_pvc:
+ create_pvc = self._templar.template(create_pvc)
+ if kind != 'object' and create_pv and create_pvc:
+ volume, size, _, access_modes = self.build_common(varname=varname)
+ return dict(
+ name="{0}-claim".format(volume),
+ capacity=size,
+ access_modes=access_modes)
+ return None
+
+ def run(self, tmp=None, task_vars=None):
+ """Run generate_pv_pvcs_list action plugin"""
+ result = super(ActionModule, self).run(tmp, task_vars)
+ # Ignore settting self.task_vars outside of init.
+ # pylint: disable=W0201
+ self.task_vars = task_vars or {}
+
+ result["changed"] = False
+ result["failed"] = False
+ result["msg"] = "persistent_volumes list and persistent_volume_claims list created"
+ vars_to_check = ['openshift_hosted_registry_storage',
+ 'openshift_hosted_router_storage',
+ 'openshift_hosted_etcd_storage',
+ 'openshift_logging_storage',
+ 'openshift_loggingops_storage',
+ 'openshift_metrics_storage',
+ 'openshift_prometheus_storage',
+ 'openshift_prometheus_alertmanager_storage',
+ 'openshift_prometheus_alertbuffer_storage']
+ persistent_volumes = []
+ persistent_volume_claims = []
+ for varname in vars_to_check:
+ pv_dict = self.build_pv_dict(varname)
+ if pv_dict:
+ persistent_volumes.append(pv_dict)
+ pvc_dict = self.build_pvc_dict(varname)
+ if pvc_dict:
+ persistent_volume_claims.append(pvc_dict)
+ result["persistent_volumes"] = persistent_volumes
+ result["persistent_volume_claims"] = persistent_volume_claims
+ return result
diff --git a/roles/openshift_persistent_volumes/defaults/main.yml b/roles/openshift_persistent_volumes/defaults/main.yml
new file mode 100644
index 000000000..b16e164e6
--- /dev/null
+++ b/roles/openshift_persistent_volumes/defaults/main.yml
@@ -0,0 +1,9 @@
+---
+
+openshift_persistent_volumes_default_nfs_group: 'oo_nfs_to_config'
+
+openshift_persistent_volume_extras: []
+openshift_persistent_volume_claims_extras: []
+
+glusterfs_pv: []
+glusterfs_pvc: []
diff --git a/roles/openshift_persistent_volumes/meta/main.yml b/roles/openshift_persistent_volumes/meta/main.yml
index 19e9a56b7..48b0699ab 100644
--- a/roles/openshift_persistent_volumes/meta/main.yml
+++ b/roles/openshift_persistent_volumes/meta/main.yml
@@ -9,4 +9,5 @@ galaxy_info:
- name: EL
versions:
- 7
-dependencies: {}
+dependencies:
+- role: openshift_facts
diff --git a/roles/openshift_persistent_volumes/tasks/main.yml b/roles/openshift_persistent_volumes/tasks/main.yml
index e431e978c..0b4dd7d1f 100644
--- a/roles/openshift_persistent_volumes/tasks/main.yml
+++ b/roles/openshift_persistent_volumes/tasks/main.yml
@@ -9,39 +9,36 @@
cp {{ openshift_master_config_dir }}/admin.kubeconfig {{ mktemp.stdout }}/admin.kubeconfig
changed_when: False
-- name: Deploy PersistentVolume definitions
- template:
- dest: "{{ mktemp.stdout }}/persistent-volumes.yml"
- src: persistent-volume.yml.j2
- when: persistent_volumes | length > 0
- changed_when: False
+- set_fact:
+ glusterfs_pv:
+ - name: "{{ openshift_hosted_registry_storage_volume_name }}-glusterfs-volume"
+ capacity: "{{ openshift_hosted_registry_storage_volume_size }}"
+ access_modes: "{{ openshift_hosted_registry_storage_access_modes }}"
+ storage:
+ glusterfs:
+ endpoints: "{{ openshift_hosted_registry_storage_glusterfs_endpoints }}"
+ path: "{{ openshift_hosted_registry_storage_glusterfs_path }}"
+ readOnly: "{{ openshift_hosted_registry_storage_glusterfs_readOnly }}"
+ glusterfs_pvc:
+ - name: "{{ openshift_hosted_registry_storage_volume_name }}-glusterfs-claim"
+ capacity: "{{ openshift_hosted_registry_storage_volume_size }}"
+ access_modes: "{{ openshift_hosted_registry_storage_access_modes }}"
+ when: openshift_hosted_registry_storage_glusterfs_swap | default(False)
-- name: Create PersistentVolumes
- command: >
- {{ openshift.common.client_binary }} create
- -f {{ mktemp.stdout }}/persistent-volumes.yml
- --config={{ mktemp.stdout }}/admin.kubeconfig
- register: pv_create_output
- when: persistent_volumes | length > 0
- failed_when: ('already exists' not in pv_create_output.stderr) and ('created' not in pv_create_output.stdout)
- changed_when: ('created' in pv_create_output.stdout)
+- name: create standard pv and pvc lists
+ # generate_pv_pvcs_list is a custom action module defined in ../action_plugins
+ generate_pv_pvcs_list: {}
+ register: l_pv_pvcs_list
-- name: Deploy PersistentVolumeClaim definitions
- template:
- dest: "{{ mktemp.stdout }}/persistent-volume-claims.yml"
- src: persistent-volume-claim.yml.j2
- when: persistent_volume_claims | length > 0
- changed_when: False
+- include_tasks: pv.yml
+ vars:
+ l_extra_persistent_volumes: "{{ openshift_persistent_volume_extras | union(glusterfs_pv) }}"
+ persistent_volumes: "{{ l_pv_pvcs_list.persistent_volumes | union(l_extra_persistent_volumes) }}"
-- name: Create PersistentVolumeClaims
- command: >
- {{ openshift.common.client_binary }} create
- -f {{ mktemp.stdout }}/persistent-volume-claims.yml
- --config={{ mktemp.stdout }}/admin.kubeconfig
- register: pvc_create_output
- when: persistent_volume_claims | length > 0
- failed_when: ('already exists' not in pvc_create_output.stderr) and ('created' not in pvc_create_output.stdout)
- changed_when: ('created' in pvc_create_output.stdout)
+- include_tasks: pvc.yml
+ vars:
+ l_extra_persistent_volume_claims: "{{ openshift_persistent_volume_claims_extras | union(glusterfs_pvc) }}"
+ persistent_volume_claims: "{{ l_pv_pvcs_list.persistent_volume_claims | union(l_extra_persistent_volume_claims) }}"
- name: Delete temp directory
file:
diff --git a/roles/openshift_persistent_volumes/tasks/pv.yml b/roles/openshift_persistent_volumes/tasks/pv.yml
new file mode 100644
index 000000000..346605ff7
--- /dev/null
+++ b/roles/openshift_persistent_volumes/tasks/pv.yml
@@ -0,0 +1,17 @@
+---
+- name: Deploy PersistentVolume definitions
+ template:
+ dest: "{{ mktemp.stdout }}/persistent-volumes.yml"
+ src: persistent-volume.yml.j2
+ when: persistent_volumes | length > 0
+ changed_when: False
+
+- name: Create PersistentVolumes
+ command: >
+ {{ openshift.common.client_binary }} create
+ -f {{ mktemp.stdout }}/persistent-volumes.yml
+ --config={{ mktemp.stdout }}/admin.kubeconfig
+ register: pv_create_output
+ when: persistent_volumes | length > 0
+ failed_when: ('already exists' not in pv_create_output.stderr) and ('created' not in pv_create_output.stdout)
+ changed_when: ('created' in pv_create_output.stdout)
diff --git a/roles/openshift_persistent_volumes/tasks/pvc.yml b/roles/openshift_persistent_volumes/tasks/pvc.yml
new file mode 100644
index 000000000..e44f9b18f
--- /dev/null
+++ b/roles/openshift_persistent_volumes/tasks/pvc.yml
@@ -0,0 +1,17 @@
+---
+- name: Deploy PersistentVolumeClaim definitions
+ template:
+ dest: "{{ mktemp.stdout }}/persistent-volume-claims.yml"
+ src: persistent-volume-claim.yml.j2
+ when: persistent_volume_claims | length > 0
+ changed_when: False
+
+- name: Create PersistentVolumeClaims
+ command: >
+ {{ openshift.common.client_binary }} create
+ -f {{ mktemp.stdout }}/persistent-volume-claims.yml
+ --config={{ mktemp.stdout }}/admin.kubeconfig
+ register: pvc_create_output
+ when: persistent_volume_claims | length > 0
+ failed_when: ('already exists' not in pvc_create_output.stderr) and ('created' not in pvc_create_output.stdout)
+ changed_when: ('created' in pvc_create_output.stdout)
diff --git a/roles/openshift_persistent_volumes/templates/persistent-volume.yml.j2 b/roles/openshift_persistent_volumes/templates/persistent-volume.yml.j2
index ee9dac7cb..9ec14208b 100644
--- a/roles/openshift_persistent_volumes/templates/persistent-volume.yml.j2
+++ b/roles/openshift_persistent_volumes/templates/persistent-volume.yml.j2
@@ -17,5 +17,5 @@ items:
capacity:
storage: "{{ volume.capacity }}"
accessModes: {{ volume.access_modes | to_padded_yaml(2, 2) }}
- {{ volume.storage.keys()[0] }}: {{ volume.storage[volume.storage.keys()[0]] | to_padded_yaml(3, 2) }}
+ {{ (volume.storage.keys() | list)[0] }}: {{ volume.storage[(volume.storage.keys() | list)[0]] | to_padded_yaml(3, 2) }}
{% endfor %}
diff --git a/roles/openshift_prometheus/README.md b/roles/openshift_prometheus/README.md
index f1eca1da6..1ebeacabf 100644
--- a/roles/openshift_prometheus/README.md
+++ b/roles/openshift_prometheus/README.md
@@ -38,11 +38,13 @@ openshift_prometheus_args=['--storage.tsdb.retention=6h', '--storage.tsdb.min-bl
Each prometheus component (prometheus, alertmanager, alertbuffer) can set pv claim by setting corresponding role variable:
```
openshift_prometheus_<COMPONENT>_storage_type: <VALUE> (pvc, emptydir)
+openshift_prometheus_<COMPONENT>_storage_class: <VALUE>
openshift_prometheus_<COMPONENT>_pvc_(name|size|access_modes|pv_selector): <VALUE>
```
e.g
```
openshift_prometheus_storage_type: pvc
+openshift_prometheus_storage_class: glusterfs-storage
openshift_prometheus_alertmanager_pvc_name: alertmanager
openshift_prometheus_alertbuffer_pvc_size: 10G
openshift_prometheus_pvc_access_modes: [ReadWriteOnce]
diff --git a/roles/openshift_prometheus/defaults/main.yaml b/roles/openshift_prometheus/defaults/main.yaml
index df331a4bb..e30108d2c 100644
--- a/roles/openshift_prometheus/defaults/main.yaml
+++ b/roles/openshift_prometheus/defaults/main.yaml
@@ -23,6 +23,7 @@ openshift_prometheus_pvc_name: prometheus
openshift_prometheus_pvc_size: "{{ openshift_prometheus_storage_volume_size | default('10Gi') }}"
openshift_prometheus_pvc_access_modes: [ReadWriteOnce]
openshift_prometheus_pvc_pv_selector: "{{ openshift_prometheus_storage_labels | default({}) }}"
+openshift_prometheus_sc_name: "{{ openshift_prometheus_storage_class | default(None) }}"
# One of ['emptydir', 'pvc']
openshift_prometheus_alertmanager_storage_type: "emptydir"
@@ -30,6 +31,7 @@ openshift_prometheus_alertmanager_pvc_name: prometheus-alertmanager
openshift_prometheus_alertmanager_pvc_size: "{{ openshift_prometheus_alertmanager_storage_volume_size | default('10Gi') }}"
openshift_prometheus_alertmanager_pvc_access_modes: [ReadWriteOnce]
openshift_prometheus_alertmanager_pvc_pv_selector: "{{ openshift_prometheus_alertmanager_storage_labels | default({}) }}"
+openshift_prometheus_alertmanager_sc_name: "{{ openshift_prometheus_alertmanager_storage_class | default(None) }}"
# One of ['emptydir', 'pvc']
openshift_prometheus_alertbuffer_storage_type: "emptydir"
@@ -37,6 +39,7 @@ openshift_prometheus_alertbuffer_pvc_name: prometheus-alertbuffer
openshift_prometheus_alertbuffer_pvc_size: "{{ openshift_prometheus_alertbuffer_storage_volume_size | default('10Gi') }}"
openshift_prometheus_alertbuffer_pvc_access_modes: [ReadWriteOnce]
openshift_prometheus_alertbuffer_pvc_pv_selector: "{{ openshift_prometheus_alertbuffer_storage_labels | default({}) }}"
+openshift_prometheus_alertbuffer_sc_name: "{{ openshift_prometheus_alertbuffer_storage_class | default(None) }}"
# container resources
openshift_prometheus_cpu_limit: null
diff --git a/roles/openshift_prometheus/tasks/install_prometheus.yaml b/roles/openshift_prometheus/tasks/install_prometheus.yaml
index ad15dc65f..abc5dd476 100644
--- a/roles/openshift_prometheus/tasks/install_prometheus.yaml
+++ b/roles/openshift_prometheus/tasks/install_prometheus.yaml
@@ -131,6 +131,7 @@
access_modes: "{{ openshift_prometheus_pvc_access_modes }}"
volume_capacity: "{{ openshift_prometheus_pvc_size }}"
selector: "{{ openshift_prometheus_pvc_pv_selector }}"
+ storage_class_name: "{{ openshift_prometheus_sc_name }}"
when: openshift_prometheus_storage_type == 'pvc'
- name: create alertmanager pvc
@@ -140,6 +141,7 @@
access_modes: "{{ openshift_prometheus_alertmanager_pvc_access_modes }}"
volume_capacity: "{{ openshift_prometheus_alertmanager_pvc_size }}"
selector: "{{ openshift_prometheus_alertmanager_pvc_pv_selector }}"
+ storage_class_name: "{{ openshift_prometheus_alertmanager_sc_name }}"
when: openshift_prometheus_alertmanager_storage_type == 'pvc'
- name: create alertbuffer pvc
@@ -149,6 +151,7 @@
access_modes: "{{ openshift_prometheus_alertbuffer_pvc_access_modes }}"
volume_capacity: "{{ openshift_prometheus_alertbuffer_pvc_size }}"
selector: "{{ openshift_prometheus_alertbuffer_pvc_pv_selector }}"
+ storage_class_name: "{{ openshift_prometheus_alertbuffer_sc_name }}"
when: openshift_prometheus_alertbuffer_storage_type == 'pvc'
# prometheus configmap
diff --git a/roles/openshift_provisioners/tasks/install_efs.yaml b/roles/openshift_provisioners/tasks/install_efs.yaml
index 6e8792446..e543d753c 100644
--- a/roles/openshift_provisioners/tasks/install_efs.yaml
+++ b/roles/openshift_provisioners/tasks/install_efs.yaml
@@ -66,7 +66,7 @@
- name: "Set anyuid permissions for efs"
command: >
- {{ openshift.common.admin_binary}} --config={{ mktemp.stdout }}/admin.kubeconfig policy
+ {{ openshift.common.client_binary}} adm --config={{ mktemp.stdout }}/admin.kubeconfig policy
add-scc-to-user anyuid system:serviceaccount:{{openshift_provisioners_project}}:provisioners-efs
register: efs_output
failed_when: efs_output.rc == 1 and 'exists' not in efs_output.stderr
diff --git a/roles/openshift_storage_glusterfs/README.md b/roles/openshift_storage_glusterfs/README.md
index be749a2e1..f7bd58db3 100644
--- a/roles/openshift_storage_glusterfs/README.md
+++ b/roles/openshift_storage_glusterfs/README.md
@@ -82,6 +82,7 @@ GlusterFS cluster into a new or existing OpenShift cluster:
| openshift_storage_glusterfs_nodeselector | 'glusterfs=storage-host'| Selector to determine which nodes will host GlusterFS pods in native mode. **NOTE:** The label value is taken from the cluster name
| openshift_storage_glusterfs_use_default_selector | False | Whether to use a default node selector for the GlusterFS namespace/project. If False, the namespace/project will have no restricting node selector. If True, uses pre-existing or default (e.g. osm_default_node_selector) node selectors. **NOTE:** If True, nodes which will host GlusterFS pods must already have the additional labels.
| openshift_storage_glusterfs_storageclass | True | Automatically create a StorageClass for each GlusterFS cluster
+| openshift_storage_glusterfs_storageclass_default | False | Sets the StorageClass for each GlusterFS cluster as default
| openshift_storage_glusterfs_image | 'gluster/gluster-centos'| Container image to use for GlusterFS pods, enterprise default is 'rhgs3/rhgs-server-rhel7'
| openshift_storage_glusterfs_version | 'latest' | Container image version to use for GlusterFS pods
| openshift_storage_glusterfs_block_deploy | True | Deploy glusterblock provisioner service
@@ -125,13 +126,14 @@ registry. These variables start with the prefix
values in their corresponding non-registry variables. The following variables
are an exception:
-| Name | Default value | Description |
-|-------------------------------------------------------|-----------------------|-----------------------------------------|
-| openshift_storage_glusterfs_registry_namespace | registry namespace | Default is to use the hosted registry's namespace, otherwise 'glusterfs'
-| openshift_storage_glusterfs_registry_name | 'registry' | This allows for the logical separation of the registry GlusterFS cluster from other GlusterFS clusters
-| openshift_storage_glusterfs_registry_storageclass | False | It is recommended to not create a StorageClass for GlusterFS clusters serving registry storage, so as to avoid performance penalties
-| openshift_storage_glusterfs_registry_heketi_admin_key | auto-generated | Separate from the above
-| openshift_storage_glusterfs_registry_heketi_user_key | auto-generated | Separate from the above
+| Name | Default value | Description |
+|-----------------------------------------------------------|-----------------------|-----------------------------------------|
+| openshift_storage_glusterfs_registry_namespace | registry namespace | Default is to use the hosted registry's namespace, otherwise 'glusterfs'
+| openshift_storage_glusterfs_registry_name | 'registry' | This allows for the logical separation of the registry GlusterFS cluster from other GlusterFS clusters
+| openshift_storage_glusterfs_registry_storageclass | False | It is recommended to not create a StorageClass for GlusterFS clusters serving registry storage, so as to avoid performance penalties
+| openshift_storage_glusterfs_registry_storageclass_default | False | Sets the StorageClass for each GlusterFS cluster as default
+| openshift_storage_glusterfs_registry_heketi_admin_key | auto-generated | Separate from the above
+| openshift_storage_glusterfs_registry_heketi_user_key | auto-generated | Separate from the above
Additionally, this role's behavior responds to several registry-specific variables in the [openshift_hosted role](../openshift_hosted/README.md):
@@ -147,7 +149,6 @@ Dependencies
------------
* os_firewall
-* openshift_hosted_facts
* openshift_repos
* lib_openshift
diff --git a/roles/openshift_storage_glusterfs/defaults/main.yml b/roles/openshift_storage_glusterfs/defaults/main.yml
index 814d6ff28..da34fab2a 100644
--- a/roles/openshift_storage_glusterfs/defaults/main.yml
+++ b/roles/openshift_storage_glusterfs/defaults/main.yml
@@ -5,6 +5,7 @@ openshift_storage_glusterfs_name: 'storage'
openshift_storage_glusterfs_nodeselector: "glusterfs={{ openshift_storage_glusterfs_name }}-host"
openshift_storage_glusterfs_use_default_selector: False
openshift_storage_glusterfs_storageclass: True
+openshift_storage_glusterfs_storageclass_default: False
openshift_storage_glusterfs_image: "{{ 'rhgs3/rhgs-server-rhel7' | quote if deployment_type == 'openshift-enterprise' else 'gluster/gluster-centos' | quote }}"
openshift_storage_glusterfs_version: 'latest'
openshift_storage_glusterfs_block_deploy: True
@@ -45,12 +46,13 @@ openshift_storage_glusterfs_heketi_fstab: "{{ '/var/lib/heketi/fstab' | quote if
openshift_storage_glusterfs_namespace: "{{ 'glusterfs' | quote if openshift_storage_glusterfs_is_native or openshift_storage_glusterfs_heketi_is_native else 'default' | quote }}"
openshift_storage_glusterfs_registry_timeout: "{{ openshift_storage_glusterfs_timeout }}"
-openshift_storage_glusterfs_registry_namespace: "{{ openshift.hosted.registry.namespace | default(openshift_storage_glusterfs_namespace) }}"
+openshift_storage_glusterfs_registry_namespace: "{{ openshift_hosted_registry_namespace | default(openshift_storage_glusterfs_namespace) }}"
openshift_storage_glusterfs_registry_is_native: "{{ openshift_storage_glusterfs_is_native }}"
openshift_storage_glusterfs_registry_name: 'registry'
openshift_storage_glusterfs_registry_nodeselector: "glusterfs={{ openshift_storage_glusterfs_registry_name }}-host"
openshift_storage_glusterfs_registry_use_default_selector: "{{ openshift_storage_glusterfs_use_default_selector }}"
openshift_storage_glusterfs_registry_storageclass: False
+openshift_storage_glusterfs_registry_storageclass_default: False
openshift_storage_glusterfs_registry_image: "{{ openshift_storage_glusterfs_image }}"
openshift_storage_glusterfs_registry_version: "{{ openshift_storage_glusterfs_version }}"
openshift_storage_glusterfs_registry_block_deploy: "{{ openshift_storage_glusterfs_block_deploy }}"
diff --git a/roles/openshift_storage_glusterfs/meta/main.yml b/roles/openshift_storage_glusterfs/meta/main.yml
index 0cdd33880..6a4ef942b 100644
--- a/roles/openshift_storage_glusterfs/meta/main.yml
+++ b/roles/openshift_storage_glusterfs/meta/main.yml
@@ -10,6 +10,6 @@ galaxy_info:
versions:
- 7
dependencies:
-- role: openshift_hosted_facts
+- role: openshift_facts
- role: lib_openshift
- role: lib_os_firewall
diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml
index 4b33e91b4..315bc5614 100644
--- a/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml
+++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml
@@ -82,7 +82,7 @@
retries: "{{ (glusterfs_timeout | int / 10) | int }}"
when: glusterfs_heketi_wipe
-- include: glusterfs_deploy.yml
+- include_tasks: glusterfs_deploy.yml
when: glusterfs_is_native
- name: Create heketi service account
@@ -212,7 +212,7 @@
when:
- glusterfs_heketi_is_native
-- include: heketi_deploy_part1.yml
+- include_tasks: heketi_deploy_part1.yml
when:
- glusterfs_heketi_is_native
- glusterfs_heketi_deploy_is_missing
@@ -256,7 +256,7 @@
when:
- glusterfs_heketi_topology_load
-- include: heketi_deploy_part2.yml
+- include_tasks: heketi_deploy_part2.yml
when:
- glusterfs_heketi_is_native
- glusterfs_heketi_is_missing
@@ -312,8 +312,8 @@
when:
- glusterfs_storageclass or glusterfs_s3_deploy
-- include: glusterblock_deploy.yml
+- include_tasks: glusterblock_deploy.yml
when: glusterfs_block_deploy
-- include: gluster_s3_deploy.yml
+- include_tasks: gluster_s3_deploy.yml
when: glusterfs_s3_deploy
diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml
index 71c1311cd..2ea7286f3 100644
--- a/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml
+++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml
@@ -7,6 +7,7 @@
glusterfs_nodeselector: "{{ openshift_storage_glusterfs_nodeselector | default(['storagenode', openshift_storage_glusterfs_name] | join('=')) | map_from_pairs }}"
glusterfs_use_default_selector: "{{ openshift_storage_glusterfs_use_default_selector }}"
glusterfs_storageclass: "{{ openshift_storage_glusterfs_storageclass }}"
+ glusterfs_storageclass_default: "{{ openshift_storage_glusterfs_storageclass_default | bool }}"
glusterfs_image: "{{ openshift_storage_glusterfs_image }}"
glusterfs_version: "{{ openshift_storage_glusterfs_version }}"
glusterfs_block_deploy: "{{ openshift_storage_glusterfs_block_deploy | bool }}"
@@ -46,4 +47,4 @@
glusterfs_heketi_fstab: "{{ openshift_storage_glusterfs_heketi_fstab }}"
glusterfs_nodes: "{{ groups.glusterfs | default([]) }}"
-- include: glusterfs_common.yml
+- include_tasks: glusterfs_common.yml
diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml
index 30e83e79b..0c2fcb2c5 100644
--- a/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml
+++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml
@@ -23,7 +23,7 @@
state: absent
labels: "{{ glusterfs_nodeselector | oo_dict_to_list_of_dict }}"
with_items: "{{ groups.all }}"
- when: glusterfs_wipe
+ when: "'openshift' in hostvars[item] and glusterfs_wipe"
- name: Delete pre-existing GlusterFS config
file:
diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml
index d3cba61cf..b7cff6514 100644
--- a/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml
+++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml
@@ -7,6 +7,7 @@
glusterfs_nodeselector: "{{ openshift_storage_glusterfs_registry_nodeselector | default(['storagenode', openshift_storage_glusterfs_registry_name] | join('=')) | map_from_pairs }}"
glusterfs_use_default_selector: "{{ openshift_storage_glusterfs_registry_use_default_selector }}"
glusterfs_storageclass: "{{ openshift_storage_glusterfs_registry_storageclass }}"
+ glusterfs_storageclass_default: "{{ openshift_storage_glusterfs_registry_storageclass_default | bool }}"
glusterfs_image: "{{ openshift_storage_glusterfs_registry_image }}"
glusterfs_version: "{{ openshift_storage_glusterfs_registry_version }}"
glusterfs_block_deploy: "{{ openshift_storage_glusterfs_registry_block_deploy | bool }}"
@@ -46,7 +47,7 @@
glusterfs_heketi_fstab: "{{ openshift_storage_glusterfs_registry_heketi_fstab }}"
glusterfs_nodes: "{% if groups.glusterfs_registry is defined %}{% set nodes = groups.glusterfs_registry %}{% elif 'groups.glusterfs' is defined %}{% set nodes = groups.glusterfs %}{% else %}{% set nodes = '[]' %}{% endif %}{{ nodes }}"
-- include: glusterfs_common.yml
+- include_tasks: glusterfs_common.yml
when:
- glusterfs_nodes | default([]) | count > 0
- "'glusterfs' not in groups or glusterfs_nodes != groups.glusterfs"
@@ -56,5 +57,5 @@
register: registry_volume
- name: Create GlusterFS registry volume
- command: "{{ glusterfs_heketi_client }} volume create --size={{ openshift.hosted.registry.storage.volume.size | replace('Gi','') }} --name={{ openshift.hosted.registry.storage.glusterfs.path }}"
- when: "openshift.hosted.registry.storage.glusterfs.path not in registry_volume.stdout"
+ command: "{{ glusterfs_heketi_client }} volume create --size={{ openshift_hosted_registry_storage_volume_size | replace('Gi','') }} --name={{ openshift_hosted_registry_storage_glusterfs_path }}"
+ when: "openshift_hosted_registry_storage_glusterfs_path not in registry_volume.stdout"
diff --git a/roles/openshift_storage_glusterfs/tasks/main.yml b/roles/openshift_storage_glusterfs/tasks/main.yml
index d2d8c6c10..b48bfc88e 100644
--- a/roles/openshift_storage_glusterfs/tasks/main.yml
+++ b/roles/openshift_storage_glusterfs/tasks/main.yml
@@ -5,13 +5,15 @@
changed_when: False
check_mode: no
-- include: glusterfs_config.yml
+- include_tasks: glusterfs_config.yml
when:
- groups.glusterfs | default([]) | count > 0
-- include: glusterfs_registry.yml
- when:
- - "groups.glusterfs_registry | default([]) | count > 0 or openshift.hosted.registry.storage.kind == 'glusterfs' or openshift.hosted.registry.storage.glusterfs.swap"
+- include_tasks: glusterfs_registry.yml
+ when: >
+ groups.glusterfs_registry | default([]) | count > 0
+ or (openshift_hosted_registry_storage_kind | default(none) == 'glusterfs')
+ or (openshift_hosted_registry_storage_glusterfs_swap | default(False))
- name: Delete temp directory
file:
diff --git a/roles/openshift_storage_glusterfs/templates/v1.5/glusterfs-storageclass.yml.j2 b/roles/openshift_storage_glusterfs/templates/v1.5/glusterfs-storageclass.yml.j2
index 454e84aaf..5ea17428f 100644
--- a/roles/openshift_storage_glusterfs/templates/v1.5/glusterfs-storageclass.yml.j2
+++ b/roles/openshift_storage_glusterfs/templates/v1.5/glusterfs-storageclass.yml.j2
@@ -3,6 +3,10 @@ apiVersion: storage.k8s.io/v1beta1
kind: StorageClass
metadata:
name: glusterfs-{{ glusterfs_name }}
+{% if glusterfs_storageclass_default is defined and glusterfs_storageclass_default %}
+ annotations:
+ storageclass.kubernetes.io/is-default-class: "true"
+{% endif %}
provisioner: kubernetes.io/glusterfs
parameters:
resturl: "http://{% if glusterfs_heketi_is_native %}{{ glusterfs_heketi_route }}{% else %}{{ glusterfs_heketi_url }}:{{ glusterfs_heketi_port }}{% endif %}"
diff --git a/roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-storageclass.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-storageclass.yml.j2
index 095fb780f..ca87807fe 100644
--- a/roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-storageclass.yml.j2
+++ b/roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-storageclass.yml.j2
@@ -3,6 +3,10 @@ apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: glusterfs-{{ glusterfs_name }}
+{% if glusterfs_storageclass_default is defined and glusterfs_storageclass_default %}
+ annotations:
+ storageclass.kubernetes.io/is-default-class: "true"
+{% endif %}
provisioner: kubernetes.io/glusterfs
parameters:
resturl: "http://{% if glusterfs_heketi_is_native %}{{ glusterfs_heketi_route }}{% else %}{{ glusterfs_heketi_url }}:{{ glusterfs_heketi_port }}{% endif %}"
diff --git a/roles/openshift_storage_glusterfs/templates/v3.7/glusterfs-storageclass.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.7/glusterfs-storageclass.yml.j2
index 095fb780f..ca87807fe 100644
--- a/roles/openshift_storage_glusterfs/templates/v3.7/glusterfs-storageclass.yml.j2
+++ b/roles/openshift_storage_glusterfs/templates/v3.7/glusterfs-storageclass.yml.j2
@@ -3,6 +3,10 @@ apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: glusterfs-{{ glusterfs_name }}
+{% if glusterfs_storageclass_default is defined and glusterfs_storageclass_default %}
+ annotations:
+ storageclass.kubernetes.io/is-default-class: "true"
+{% endif %}
provisioner: kubernetes.io/glusterfs
parameters:
resturl: "http://{% if glusterfs_heketi_is_native %}{{ glusterfs_heketi_route }}{% else %}{{ glusterfs_heketi_url }}:{{ glusterfs_heketi_port }}{% endif %}"
diff --git a/roles/openshift_storage_glusterfs/templates/v3.8/glusterfs-storageclass.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.8/glusterfs-storageclass.yml.j2
index 095fb780f..ca87807fe 100644
--- a/roles/openshift_storage_glusterfs/templates/v3.8/glusterfs-storageclass.yml.j2
+++ b/roles/openshift_storage_glusterfs/templates/v3.8/glusterfs-storageclass.yml.j2
@@ -3,6 +3,10 @@ apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: glusterfs-{{ glusterfs_name }}
+{% if glusterfs_storageclass_default is defined and glusterfs_storageclass_default %}
+ annotations:
+ storageclass.kubernetes.io/is-default-class: "true"
+{% endif %}
provisioner: kubernetes.io/glusterfs
parameters:
resturl: "http://{% if glusterfs_heketi_is_native %}{{ glusterfs_heketi_route }}{% else %}{{ glusterfs_heketi_url }}:{{ glusterfs_heketi_port }}{% endif %}"
diff --git a/roles/openshift_storage_nfs/meta/main.yml b/roles/openshift_storage_nfs/meta/main.yml
index 98f7c317e..d61e6873a 100644
--- a/roles/openshift_storage_nfs/meta/main.yml
+++ b/roles/openshift_storage_nfs/meta/main.yml
@@ -11,4 +11,4 @@ galaxy_info:
- 7
dependencies:
- role: lib_os_firewall
-- role: openshift_hosted_facts
+- role: openshift_facts
diff --git a/roles/openshift_storage_nfs/tasks/main.yml b/roles/openshift_storage_nfs/tasks/main.yml
index c25cad74c..55e4024ec 100644
--- a/roles/openshift_storage_nfs/tasks/main.yml
+++ b/roles/openshift_storage_nfs/tasks/main.yml
@@ -20,25 +20,25 @@
- name: Ensure exports directory exists
file:
- path: "{{ openshift.hosted.registry.storage.nfs.directory }}"
+ path: "{{ openshift_hosted_registry_storage_nfs_directory }}"
state: directory
- name: Ensure export directories exist
file:
- path: "{{ item.storage.nfs.directory }}/{{ item.storage.volume.name }}"
+ path: "{{ item }}"
state: directory
mode: 0777
owner: nfsnobody
group: nfsnobody
with_items:
- - "{{ openshift.hosted.registry }}"
- - "{{ openshift.metrics }}"
- - "{{ openshift.logging }}"
- - "{{ openshift.loggingops }}"
- - "{{ openshift.hosted.etcd }}"
- - "{{ openshift.prometheus }}"
- - "{{ openshift.prometheus.alertmanager }}"
- - "{{ openshift.prometheus.alertbuffer }}"
+ - "{{ openshift_hosted_registry_storage_nfs_directory }}/{{ openshift_hosted_registry_storage_volume_name }}"
+ - "{{ openshift_metrics_storage_nfs_directory }}/{{ openshift_metrics_storage_volume_name }}"
+ - "{{ openshift_logging_storage_nfs_directory }}/{{ openshift_logging_storage_volume_name }}"
+ - "{{ openshift_loggingops_storage_nfs_directory }}/{{ openshift_loggingops_storage_volume_name }}"
+ - "{{ openshift_hosted_etcd_storage_nfs_directory }}/{{ openshift_hosted_etcd_storage_volume_name }}"
+ - "{{ openshift_prometheus_storage_nfs_directory }}/{{ openshift_prometheus_storage_volume_name }}"
+ - "{{ openshift_prometheus_alertmanager_storage_nfs_directory }}/{{ openshift_prometheus_alertmanager_storage_volume_name }}"
+ - "{{ openshift_prometheus_alertbuffer_storage_nfs_directory }}/{{ openshift_prometheus_alertbuffer_storage_volume_name }}"
- name: Configure exports
template:
diff --git a/roles/openshift_storage_nfs/templates/exports.j2 b/roles/openshift_storage_nfs/templates/exports.j2
index c2a741035..2ec8db019 100644
--- a/roles/openshift_storage_nfs/templates/exports.j2
+++ b/roles/openshift_storage_nfs/templates/exports.j2
@@ -1,8 +1,8 @@
-{{ openshift.hosted.registry.storage.nfs.directory }}/{{ openshift.hosted.registry.storage.volume.name }} {{ openshift.hosted.registry.storage.nfs.options }}
-{{ openshift.metrics.storage.nfs.directory }}/{{ openshift.metrics.storage.volume.name }} {{ openshift.metrics.storage.nfs.options }}
-{{ openshift.logging.storage.nfs.directory }}/{{ openshift.logging.storage.volume.name }} {{ openshift.logging.storage.nfs.options }}
-{{ openshift.loggingops.storage.nfs.directory }}/{{ openshift.loggingops.storage.volume.name }} {{ openshift.loggingops.storage.nfs.options }}
-{{ openshift.hosted.etcd.storage.nfs.directory }}/{{ openshift.hosted.etcd.storage.volume.name }} {{ openshift.hosted.etcd.storage.nfs.options }}
-{{ openshift.prometheus.storage.nfs.directory }}/{{ openshift.prometheus.storage.volume.name }} {{ openshift.prometheus.storage.nfs.options }}
-{{ openshift.prometheus.alertmanager.storage.nfs.directory }}/{{ openshift.prometheus.alertmanager.storage.volume.name }} {{ openshift.prometheus.alertmanager.storage.nfs.options }}
-{{ openshift.prometheus.alertbuffer.storage.nfs.directory }}/{{ openshift.prometheus.alertbuffer.storage.volume.name }} {{ openshift.prometheus.alertbuffer.storage.nfs.options }}
+{{ openshift_hosted_registry_storage_nfs_directory }}/{{ openshift_hosted_registry_storage_volume_name }} {{ openshift_hosted_registry_storage_nfs_options }}
+{{ openshift_metrics_storage_nfs_directory }}/{{ openshift_metrics_storage_volume_name }} {{ openshift_metrics_storage_nfs_options }}
+{{ openshift_logging_storage_nfs_directory }}/{{ openshift_logging_storage_volume_name }} {{ openshift_logging_storage_nfs_options }}
+{{ openshift_loggingops_storage_nfs_directory }}/{{ openshift_loggingops_storage_volume_name }} {{ openshift_loggingops_storage_nfs_options }}
+{{ openshift_hosted_etcd_storage_nfs_directory }}/{{ openshift_hosted_etcd_storage_volume_name }} {{ openshift_hosted_etcd_storage_nfs_options }}
+{{ openshift_prometheus_storage_nfs_directory }}/{{ openshift_prometheus_storage_volume_name }} {{ openshift_prometheus_storage_nfs_options }}
+{{ openshift_prometheus_alertmanager_storage_nfs_directory }}/{{ openshift_prometheus_alertmanager_storage_volume_name }} {{ openshift_prometheus_alertmanager_storage_nfs_options }}
+{{ openshift_prometheus_alertbuffer_storage_nfs_directory }}/{{ openshift_prometheus_alertbuffer_storage_volume_name }} {{ openshift_prometheus_alertbuffer_storage_nfs_options }}
diff --git a/roles/openshift_storage_nfs_lvm/tasks/main.yml b/roles/openshift_storage_nfs_lvm/tasks/main.yml
index 49dd657b5..c8e7b6d7c 100644
--- a/roles/openshift_storage_nfs_lvm/tasks/main.yml
+++ b/roles/openshift_storage_nfs_lvm/tasks/main.yml
@@ -20,7 +20,7 @@
file: path={{osnl_mount_dir}}/{{ item }} owner=nfsnobody group=nfsnobody mode=0700
with_sequence: start={{osnl_volume_num_start}} count={{osnl_number_of_volumes}} format={{osnl_volume_prefix}}{{osnl_volume_size}}g%04d
-- include: nfs.yml
+- include_tasks: nfs.yml
- name: Create volume json file
template: src=../templates/nfs.json.j2 dest=/root/persistent-volume.{{ item }}.json
diff --git a/roles/openshift_version/defaults/main.yml b/roles/openshift_version/defaults/main.yml
index 01a1a7472..354699637 100644
--- a/roles/openshift_version/defaults/main.yml
+++ b/roles/openshift_version/defaults/main.yml
@@ -1,2 +1,10 @@
---
openshift_protect_installed_version: True
+
+openshift_service_type_dict:
+ origin: origin
+ openshift-enterprise: atomic-openshift
+
+openshift_service_type: "{{ openshift_service_type_dict[openshift_deployment_type] }}"
+
+openshift_use_crio_only: False
diff --git a/roles/openshift_version/meta/main.yml b/roles/openshift_version/meta/main.yml
index 5d7683120..d0ad4b7d2 100644
--- a/roles/openshift_version/meta/main.yml
+++ b/roles/openshift_version/meta/main.yml
@@ -13,3 +13,4 @@ galaxy_info:
- cloud
dependencies:
- role: lib_utils
+- role: openshift_facts
diff --git a/roles/openshift_version/tasks/main.yml b/roles/openshift_version/tasks/main.yml
index 4f9158ade..ae0f68a5b 100644
--- a/roles/openshift_version/tasks/main.yml
+++ b/roles/openshift_version/tasks/main.yml
@@ -101,13 +101,13 @@
when: is_containerized | bool
- block:
- - name: Get available {{ openshift.common.service_type}} version
+ - name: Get available {{ openshift_service_type}} version
repoquery:
- name: "{{ openshift.common.service_type}}"
+ name: "{{ openshift_service_type}}"
ignore_excluders: true
register: rpm_results
- fail:
- msg: "Package {{ openshift.common.service_type}} not found"
+ msg: "Package {{ openshift_service_type}} not found"
when: not rpm_results.results.package_found
- set_fact:
openshift_rpm_version: "{{ rpm_results.results.versions.available_versions.0 | default('0.0', True) }}"
@@ -196,7 +196,7 @@
- openshift_version.startswith(openshift_release) | bool
msg: |-
You requested openshift_release {{ openshift_release }}, which is not matched by
- the latest OpenShift RPM we detected as {{ openshift.common.service_type }}-{{ openshift_version }}
+ the latest OpenShift RPM we detected as {{ openshift_service_type }}-{{ openshift_version }}
on host {{ inventory_hostname }}.
We will only install the latest RPMs, so please ensure you are getting the release
you expect. You may need to adjust your Ansible inventory, modify the repositories
diff --git a/roles/openshift_version/tasks/set_version_rpm.yml b/roles/openshift_version/tasks/set_version_rpm.yml
index c40777bf1..c7ca5ceae 100644
--- a/roles/openshift_version/tasks/set_version_rpm.yml
+++ b/roles/openshift_version/tasks/set_version_rpm.yml
@@ -8,14 +8,14 @@
- openshift_version is not defined
- block:
- - name: Get available {{ openshift.common.service_type}} version
+ - name: Get available {{ openshift_service_type}} version
repoquery:
- name: "{{ openshift.common.service_type}}"
+ name: "{{ openshift_service_type}}"
ignore_excluders: true
register: rpm_results
- fail:
- msg: "Package {{ openshift.common.service_type}} not found"
+ msg: "Package {{ openshift_service_type}} not found"
when: not rpm_results.results.package_found
- set_fact:
diff --git a/roles/rhel_subscribe/README.md b/roles/rhel_subscribe/README.md
new file mode 100644
index 000000000..15eaf4f30
--- /dev/null
+++ b/roles/rhel_subscribe/README.md
@@ -0,0 +1,29 @@
+RHEL Subscribe
+==============
+
+Subscribes the RHEL servers and add the OpenShift enterprise repos.
+
+Role variables
+--------------
+
+### `rhsub_user`
+
+Username for the subscription-manager.
+
+### `rhsub_pass`
+
+Password for the subscription-manager.
+
+### `rhsub_pool`
+
+Name of the pool to attach (optional).
+
+### `rhsub_server`
+
+Custom hostname for the Satellite server (optional).
+
+### `openshift_release`
+
+Version for the OpenShift Enterprise repositories.
+
+Example: `3.6`
diff --git a/roles/rhel_subscribe/defaults/main.yml b/roles/rhel_subscribe/defaults/main.yml
new file mode 100644
index 000000000..80b2ab919
--- /dev/null
+++ b/roles/rhel_subscribe/defaults/main.yml
@@ -0,0 +1,2 @@
+---
+rhsub_pool: 'Red Hat OpenShift Container Platform, Premium*'
diff --git a/roles/rhel_subscribe/tasks/enterprise.yml b/roles/rhel_subscribe/tasks/enterprise.yml
index fa74c9953..8acdfb969 100644
--- a/roles/rhel_subscribe/tasks/enterprise.yml
+++ b/roles/rhel_subscribe/tasks/enterprise.yml
@@ -1,25 +1,18 @@
---
-- name: Disable all repositories
- command: subscription-manager repos --disable="*"
-
-- set_fact:
- default_ose_version: '3.6'
- when: deployment_type == 'openshift-enterprise'
-
- set_fact:
- ose_version: "{{ lookup('env', 'ose_version') | default(default_ose_version, True) }}"
-
-- fail:
- msg: "{{ ose_version }} is not a valid version for {{ deployment_type }} deployment type"
+ openshift_release: "{{ openshift_release[1:] }}"
when:
- - deployment_type == 'openshift-enterprise'
- - ose_version not in ['3.1', '3.2', '3.3', '3.4', '3.5', '3.6'] )
+ - openshift_release is defined
+ - openshift_release[0] == 'v'
+
+- name: Disable all repositories
+ command: subscription-manager repos --disable="*"
- name: Enable RHEL repositories
command: subscription-manager repos \
--enable="rhel-7-server-rpms" \
--enable="rhel-7-server-extras-rpms" \
- --enable="rhel-7-server-ose-{{ ose_version }}-rpms" \
+ --enable="rhel-7-server-ose-{{ (openshift_release | default('')).split('.')[0:2] | join('.') }}-rpms" \
--enable="rhel-7-fast-datapath-rpms"
register: subscribe_repos
until: subscribe_repos | succeeded
diff --git a/roles/rhel_subscribe/tasks/main.yml b/roles/rhel_subscribe/tasks/main.yml
index 9ca49b569..3466b7e44 100644
--- a/roles/rhel_subscribe/tasks/main.yml
+++ b/roles/rhel_subscribe/tasks/main.yml
@@ -3,23 +3,17 @@
# to make it able to attach to a pool
# to make it able to enable repositories
-- set_fact:
- rhel_subscription_pool: "{{ lookup('env', 'rhel_subscription_pool') | default(rhsub_pool | default('Red Hat OpenShift Container Platform, Premium*')) }}"
- rhel_subscription_user: "{{ lookup('env', 'rhel_subscription_user') | default(rhsub_user | default(omit, True)) }}"
- rhel_subscription_pass: "{{ lookup('env', 'rhel_subscription_pass') | default(rhsub_pass | default(omit, True)) }}"
- rhel_subscription_server: "{{ lookup('env', 'rhel_subscription_server') | default(rhsub_server | default(omit, True)) }}"
-
- fail:
msg: "This role is only supported for Red Hat hosts"
when: ansible_distribution != 'RedHat'
- fail:
- msg: Either rhsub_user or the rhel_subscription_user env variable are required for this role.
- when: rhel_subscription_user is not defined
+ msg: The rhsub_user variable is required for this role.
+ when: rhsub_user is not defined or not rhsub_user
- fail:
- msg: Either rhsub_pass or the rhel_subscription_pass env variable are required for this role.
- when: rhel_subscription_pass is not defined
+ msg: The rhsub_pass variable is required for this role.
+ when: rhsub_pass is not defined or not rhsub_pass
- name: Detecting Atomic Host Operating System
stat:
@@ -27,10 +21,10 @@
register: ostree_booted
- name: Satellite preparation
- command: "rpm -Uvh http://{{ rhel_subscription_server }}/pub/katello-ca-consumer-latest.noarch.rpm"
+ command: "rpm -Uvh http://{{ rhsub_server }}/pub/katello-ca-consumer-latest.noarch.rpm"
args:
creates: /etc/rhsm/ca/katello-server-ca.pem
- when: rhel_subscription_server is defined and rhel_subscription_server
+ when: rhsub_server is defined and rhsub_server
- name: Install Red Hat Subscription manager
yum:
@@ -41,26 +35,26 @@
- name: RedHat subscriptions
redhat_subscription:
- username: "{{ rhel_subscription_user }}"
- password: "{{ rhel_subscription_pass }}"
+ username: "{{ rhsub_user }}"
+ password: "{{ rhsub_pass }}"
register: rh_subscription
until: rh_subscription | succeeded
- name: Retrieve the OpenShift Pool ID
- command: subscription-manager list --available --matches="{{ rhel_subscription_pool }}" --pool-only
+ command: subscription-manager list --available --matches="{{ rhsub_pool }}" --pool-only
register: openshift_pool_id
until: openshift_pool_id | succeeded
changed_when: False
- name: Determine if OpenShift Pool Already Attached
- command: subscription-manager list --consumed --matches="{{ rhel_subscription_pool }}" --pool-only
+ command: subscription-manager list --consumed --matches="{{ rhsub_pool }}" --pool-only
register: openshift_pool_attached
until: openshift_pool_attached | succeeded
changed_when: False
when: openshift_pool_id.stdout == ''
- fail:
- msg: "Unable to find pool matching {{ rhel_subscription_pool }} in available or consumed pools"
+ msg: "Unable to find pool matching {{ rhsub_pool }} in available or consumed pools"
when: openshift_pool_id.stdout == '' and openshift_pool_attached is defined and openshift_pool_attached.stdout == ''
- name: Attach to OpenShift Pool
@@ -69,7 +63,6 @@
until: subscribe_pool | succeeded
when: openshift_pool_id.stdout != ''
-- include: enterprise.yml
+- include_tasks: enterprise.yml
when:
- - deployment_type == 'openshift-enterprise'
- not ostree_booted.stat.exists | bool