summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.papr.inventory (renamed from .redhat-ci.inventory)0
-rwxr-xr-x.papr.sh (renamed from .redhat-ci.sh)6
-rw-r--r--.papr.yml42
-rw-r--r--.redhat-ci.yml30
-rw-r--r--.tito/packages/openshift-ansible2
-rw-r--r--BUILD.md10
-rw-r--r--README_CONTAINER_IMAGE.md16
-rw-r--r--ansible.cfg1
-rw-r--r--docs/pull_requests.md8
-rw-r--r--docs/repo_structure.md13
-rw-r--r--examples/certificate-check-upload.yaml6
-rw-r--r--examples/certificate-check-volume.yaml6
-rw-r--r--examples/scheduled-certcheck-upload.yaml2
-rw-r--r--examples/scheduled-certcheck-volume.yaml2
-rw-r--r--filter_plugins/oo_filters.py17
-rwxr-xr-xhack/build-images.sh4
-rwxr-xr-xhack/push-release.sh2
-rw-r--r--images/installer/Dockerfile8
-rw-r--r--images/installer/Dockerfile.rhel72
-rw-r--r--images/installer/system-container/root/exports/config.json.template16
-rw-r--r--images/installer/system-container/root/exports/manifest.json2
-rw-r--r--inventory/byo/hosts.byo.native-glusterfs.example2
-rw-r--r--inventory/byo/hosts.origin.example33
-rw-r--r--inventory/byo/hosts.ose.example33
-rw-r--r--openshift-ansible.spec155
-rw-r--r--playbooks/adhoc/uninstall.yml12
-rw-r--r--playbooks/byo/openshift-cluster/config.yml5
-rw-r--r--playbooks/byo/openshift-cluster/redeploy-etcd-ca.yml10
-rw-r--r--playbooks/byo/openshift-cluster/redeploy-openshift-ca.yml2
-rw-r--r--playbooks/common/openshift-cluster/evaluate_groups.yml2
-rw-r--r--playbooks/common/openshift-cluster/openshift_hosted.yml2
-rw-r--r--playbooks/common/openshift-cluster/redeploy-certificates/etcd-ca.yml158
-rw-r--r--playbooks/common/openshift-cluster/redeploy-certificates/openshift-ca.yml (renamed from playbooks/common/openshift-cluster/redeploy-certificates/ca.yml)155
-rw-r--r--playbooks/common/openshift-cluster/upgrades/disable_node_excluders.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/etcd/backup.yml12
-rw-r--r--playbooks/common/openshift-cluster/upgrades/etcd/main.yml3
-rw-r--r--playbooks/common/openshift-cluster/upgrades/post_control_plane.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml33
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml72
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml4
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_3/upgrade.yml10
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml10
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml10
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_4/upgrade.yml10
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml10
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml10
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_5/master_config_upgrade.yml16
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml10
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml12
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml10
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_6/master_config_upgrade.yml16
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml10
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml12
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml10
-rw-r--r--playbooks/common/openshift-master/config.yml1
-rw-r--r--playbooks/common/openshift-node/config.yml2
-rw-r--r--playbooks/libvirt/openshift-cluster/config.yml2
-rw-r--r--requirements.txt1
-rw-r--r--roles/calico/defaults/main.yaml13
-rw-r--r--roles/calico/tasks/gen_certs.yml17
-rw-r--r--roles/calico/tasks/main.yml39
-rw-r--r--roles/calico/templates/10-calico.conf.j22
-rw-r--r--roles/calico/templates/calico.service.j24
-rw-r--r--roles/calico/templates/calicoctl.cfg.j22
-rw-r--r--roles/calico_master/defaults/main.yaml1
-rw-r--r--roles/calico_master/templates/calico-policy-controller.yml.j28
-rw-r--r--roles/docker/README.md2
-rw-r--r--roles/docker/tasks/package_docker.yml15
-rw-r--r--roles/docker/tasks/systemcontainer_docker.yml10
-rw-r--r--roles/etcd/tasks/main.yml3
-rw-r--r--roles/etcd/tasks/system_container.yml52
-rw-r--r--roles/etcd_common/defaults/main.yml16
-rw-r--r--roles/etcd_common/tasks/backup.yml (renamed from roles/etcd_upgrade/tasks/backup.yml)33
-rw-r--r--roles/etcd_common/tasks/drop_etcdctl.yml (renamed from roles/etcd_common/tasks/etcdctl.yml)0
-rw-r--r--roles/etcd_common/tasks/main.yml9
-rw-r--r--roles/etcd_server_certificates/tasks/main.yml37
-rw-r--r--roles/etcd_upgrade/defaults/main.yml6
-rw-r--r--roles/etcd_upgrade/meta/main.yml1
-rw-r--r--roles/etcd_upgrade/tasks/main.yml4
-rw-r--r--roles/lib_openshift/library/oc_adm_ca_server_cert.py44
-rw-r--r--roles/lib_openshift/library/oc_adm_manage_node.py44
-rw-r--r--roles/lib_openshift/library/oc_adm_policy_group.py44
-rw-r--r--roles/lib_openshift/library/oc_adm_policy_user.py44
-rw-r--r--roles/lib_openshift/library/oc_adm_registry.py44
-rw-r--r--roles/lib_openshift/library/oc_adm_router.py44
-rw-r--r--roles/lib_openshift/library/oc_clusterrole.py44
-rw-r--r--roles/lib_openshift/library/oc_configmap.py44
-rw-r--r--roles/lib_openshift/library/oc_edit.py44
-rw-r--r--roles/lib_openshift/library/oc_env.py44
-rw-r--r--roles/lib_openshift/library/oc_group.py44
-rw-r--r--roles/lib_openshift/library/oc_image.py44
-rw-r--r--roles/lib_openshift/library/oc_label.py44
-rw-r--r--roles/lib_openshift/library/oc_obj.py54
-rw-r--r--roles/lib_openshift/library/oc_objectvalidator.py44
-rw-r--r--roles/lib_openshift/library/oc_process.py44
-rw-r--r--roles/lib_openshift/library/oc_project.py44
-rw-r--r--roles/lib_openshift/library/oc_pvc.py44
-rw-r--r--roles/lib_openshift/library/oc_route.py44
-rw-r--r--roles/lib_openshift/library/oc_scale.py44
-rw-r--r--roles/lib_openshift/library/oc_secret.py58
-rw-r--r--roles/lib_openshift/library/oc_service.py44
-rw-r--r--roles/lib_openshift/library/oc_serviceaccount.py44
-rw-r--r--roles/lib_openshift/library/oc_serviceaccount_secret.py44
-rw-r--r--roles/lib_openshift/library/oc_user.py44
-rw-r--r--roles/lib_openshift/library/oc_version.py44
-rw-r--r--roles/lib_openshift/library/oc_volume.py44
-rw-r--r--roles/lib_openshift/src/class/oc_obj.py10
-rw-r--r--roles/lib_openshift/src/class/oc_secret.py14
-rw-r--r--roles/lib_openshift/src/lib/base.py44
-rwxr-xr-xroles/lib_openshift/src/test/unit/test_oc_secret.py1
-rw-r--r--roles/openshift_ca/tasks/main.yml32
-rw-r--r--roles/openshift_ca/vars/main.yml3
-rw-r--r--roles/openshift_default_storage_class/README.md39
-rw-r--r--roles/openshift_default_storage_class/defaults/main.yml14
-rw-r--r--roles/openshift_default_storage_class/meta/main.yml15
-rw-r--r--roles/openshift_default_storage_class/tasks/main.yml19
-rw-r--r--roles/openshift_default_storage_class/vars/main.yml1
-rw-r--r--roles/openshift_docker_facts/tasks/main.yml3
-rw-r--r--roles/openshift_etcd_facts/vars/main.yml3
-rw-r--r--roles/openshift_examples/files/examples/v3.6/image-streams/image-streams-centos7.json28
-rw-r--r--roles/openshift_examples/files/examples/v3.6/image-streams/image-streams-rhel7.json20
-rw-r--r--roles/openshift_excluder/tasks/install.yml36
-rwxr-xr-xroles/openshift_facts/library/openshift_facts.py12
-rw-r--r--roles/openshift_facts/tasks/main.yml23
-rwxr-xr-xroles/openshift_health_checker/library/aos_version.py105
-rw-r--r--roles/openshift_health_checker/library/ocutil.py74
-rw-r--r--roles/openshift_health_checker/library/rpm_version.py127
-rw-r--r--roles/openshift_health_checker/openshift_checks/__init__.py22
-rw-r--r--roles/openshift_health_checker/openshift_checks/docker_image_availability.py172
-rw-r--r--roles/openshift_health_checker/openshift_checks/docker_storage.py185
-rw-r--r--roles/openshift_health_checker/openshift_checks/logging/__init__.py0
-rw-r--r--roles/openshift_health_checker/openshift_checks/logging/curator.py61
-rw-r--r--roles/openshift_health_checker/openshift_checks/logging/elasticsearch.py217
-rw-r--r--roles/openshift_health_checker/openshift_checks/logging/fluentd.py170
-rw-r--r--roles/openshift_health_checker/openshift_checks/logging/kibana.py229
-rw-r--r--roles/openshift_health_checker/openshift_checks/logging/logging.py96
-rw-r--r--roles/openshift_health_checker/openshift_checks/mixins.py45
-rw-r--r--roles/openshift_health_checker/openshift_checks/ovs_version.py78
-rw-r--r--roles/openshift_health_checker/openshift_checks/package_availability.py3
-rw-r--r--roles/openshift_health_checker/openshift_checks/package_update.py2
-rw-r--r--roles/openshift_health_checker/openshift_checks/package_version.py110
-rw-r--r--roles/openshift_health_checker/test/aos_version_test.py87
-rw-r--r--roles/openshift_health_checker/test/curator_test.py68
-rw-r--r--roles/openshift_health_checker/test/docker_image_availability_test.py123
-rw-r--r--roles/openshift_health_checker/test/docker_storage_test.py224
-rw-r--r--roles/openshift_health_checker/test/elasticsearch_test.py180
-rw-r--r--roles/openshift_health_checker/test/fluentd_test.py109
-rw-r--r--roles/openshift_health_checker/test/kibana_test.py218
-rw-r--r--roles/openshift_health_checker/test/logging_check_test.py137
-rw-r--r--roles/openshift_health_checker/test/ovs_version_test.py89
-rw-r--r--roles/openshift_health_checker/test/package_version_test.py126
-rw-r--r--roles/openshift_health_checker/test/rpm_version_test.py82
-rw-r--r--roles/openshift_hosted/tasks/registry/storage/object_storage.yml16
-rw-r--r--roles/openshift_logging/defaults/main.yml4
-rw-r--r--roles/openshift_logging/tasks/delete_logging.yaml3
-rw-r--r--roles/openshift_logging/tasks/generate_certs.yaml2
-rw-r--r--roles/openshift_logging/tasks/install_logging.yaml7
-rw-r--r--roles/openshift_logging/tasks/procure_server_certs.yaml2
-rw-r--r--roles/openshift_logging_elasticsearch/tasks/main.yaml2
-rw-r--r--roles/openshift_logging_elasticsearch/templates/elasticsearch.yml.j21
-rw-r--r--roles/openshift_logging_elasticsearch/templates/es.j27
-rw-r--r--roles/openshift_logging_kibana/tasks/main.yaml60
-rw-r--r--roles/openshift_logging_kibana/templates/oauth-client.j26
-rw-r--r--roles/openshift_logging_mux/templates/mux.j22
-rw-r--r--roles/openshift_master/tasks/main.yml1
-rw-r--r--roles/openshift_master_certificates/tasks/main.yml6
-rw-r--r--roles/openshift_metrics/README.md3
-rw-r--r--roles/openshift_metrics/defaults/main.yaml9
-rw-r--r--roles/openshift_metrics/tasks/generate_certificates.yaml2
-rw-r--r--roles/openshift_metrics/tasks/generate_hawkular_certificates.yaml2
-rw-r--r--roles/openshift_metrics/tasks/install_cassandra.yaml6
-rw-r--r--roles/openshift_metrics/tasks/install_hosa.yaml44
-rw-r--r--roles/openshift_metrics/tasks/install_metrics.yaml37
-rw-r--r--roles/openshift_metrics/tasks/main.yaml3
-rw-r--r--roles/openshift_metrics/tasks/oc_apply.yaml2
-rw-r--r--roles/openshift_metrics/tasks/setup_certificate.yaml2
-rw-r--r--roles/openshift_metrics/tasks/uninstall_hosa.yaml15
-rw-r--r--roles/openshift_metrics/templates/hawkular_openshift_agent_cm.j254
-rw-r--r--roles/openshift_metrics/templates/hawkular_openshift_agent_ds.j291
-rw-r--r--roles/openshift_metrics/templates/hawkular_openshift_agent_role.j225
-rw-r--r--roles/openshift_metrics/templates/hawkular_openshift_agent_sa.j27
-rw-r--r--roles/openshift_metrics/templates/pvc.j27
-rw-r--r--roles/openshift_node/handlers/main.yml11
-rw-r--r--roles/openshift_node/tasks/main.yml1
-rw-r--r--roles/openshift_node/tasks/systemd_units.yml17
-rw-r--r--roles/openshift_node_dnsmasq/templates/origin-dns.conf.j22
-rw-r--r--roles/openshift_node_upgrade/tasks/rpm_upgrade.yml15
l---------roles/openshift_node_upgrade/templates/atomic-openshift-node.service.j21
l---------roles/openshift_node_upgrade/templates/origin-node.service.j21
-rw-r--r--roles/openshift_persistent_volumes/templates/persistent-volume.yml.j26
-rw-r--r--roles/openshift_repos/tasks/main.yaml52
-rw-r--r--roles/openshift_storage_glusterfs/README.md83
-rw-r--r--roles/openshift_storage_glusterfs/defaults/main.yml16
-rw-r--r--roles/openshift_storage_glusterfs/files/v3.6/deploy-heketi-template.yml50
-rw-r--r--roles/openshift_storage_glusterfs/files/v3.6/glusterfs-template.yml31
-rw-r--r--roles/openshift_storage_glusterfs/files/v3.6/heketi-template.yml41
-rw-r--r--roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml108
-rw-r--r--roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml6
-rw-r--r--roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml36
-rw-r--r--roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml29
-rw-r--r--roles/openshift_storage_glusterfs/tasks/heketi_deploy_part1.yml21
-rw-r--r--roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml68
-rw-r--r--roles/openshift_storage_glusterfs/tasks/main.yml5
-rw-r--r--roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-registry-endpoints.yml.j23
-rw-r--r--roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-registry-service.yml.j2 (renamed from roles/openshift_storage_glusterfs/files/v3.6/glusterfs-registry-service.yml)2
-rw-r--r--roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-storageclass.yml.j210
-rw-r--r--roles/openshift_storage_glusterfs/templates/v3.6/heketi-endpoints.yml.j212
-rw-r--r--roles/openshift_storage_glusterfs/templates/v3.6/heketi-service.yml.j210
-rw-r--r--test/integration/openshift_health_checker/builds/aos-package-checks/Dockerfile2
-rw-r--r--test/integration/openshift_health_checker/builds/aos-package-checks/root/root/ose-3.2.spec17
-rw-r--r--test/integration/openshift_health_checker/builds/aos-package-checks/root/root/ose-3.3.spec17
-rw-r--r--test/integration/openshift_health_checker/preflight/playbooks/package_version_matches.yml4
-rw-r--r--test/integration/openshift_health_checker/preflight/playbooks/package_version_multiple.yml4
-rw-r--r--test/integration/openshift_health_checker/preflight/playbooks/tasks/enable_repo.yml2
-rw-r--r--test/integration/openshift_health_checker/preflight/preflight_test.go2
216 files changed, 5440 insertions, 1671 deletions
diff --git a/.redhat-ci.inventory b/.papr.inventory
index 23bc9923c..23bc9923c 100644
--- a/.redhat-ci.inventory
+++ b/.papr.inventory
diff --git a/.redhat-ci.sh b/.papr.sh
index fce8c1d52..fe0b97b68 100755
--- a/.redhat-ci.sh
+++ b/.papr.sh
@@ -1,10 +1,12 @@
#!/bin/bash
set -xeuo pipefail
+echo "Targeting OpenShift Origin $OPENSHIFT_IMAGE_TAG"
+
pip install -r requirements.txt
# ping the nodes to check they're responding and register their ostree versions
-ansible -vvv -i .redhat-ci.inventory nodes -a 'rpm-ostree status'
+ansible -vvv -i .papr.inventory nodes -a 'rpm-ostree status'
upload_journals() {
mkdir journals
@@ -16,7 +18,7 @@ upload_journals() {
trap upload_journals ERR
# run the actual installer
-ansible-playbook -vvv -i .redhat-ci.inventory playbooks/byo/config.yml
+ansible-playbook -vvv -i .papr.inventory playbooks/byo/config.yml
# run a small subset of origin conformance tests to sanity
# check the cluster NB: we run it on the master since we may
diff --git a/.papr.yml b/.papr.yml
new file mode 100644
index 000000000..16d6e78b1
--- /dev/null
+++ b/.papr.yml
@@ -0,0 +1,42 @@
+---
+
+# This YAML file is used by PAPR. It details the test
+# environment to provision and the test procedure. For more
+# information on PAPR, see:
+#
+# https://github.com/projectatomic/papr
+#
+# The PAPR YAML specification detailing allowed fields can
+# be found at:
+#
+# https://github.com/projectatomic/papr/blob/master/sample.papr.yml
+
+cluster:
+ hosts:
+ - name: ocp-master
+ distro: fedora/25/atomic
+ - name: ocp-node1
+ distro: fedora/25/atomic
+ - name: ocp-node2
+ distro: fedora/25/atomic
+ container:
+ image: fedora:25
+
+packages:
+ - gcc
+ - python-pip
+ - python-devel
+ - libffi-devel
+ - openssl-devel
+ - redhat-rpm-config
+
+context: 'fedora/25/atomic'
+
+env:
+ OPENSHIFT_IMAGE_TAG: v3.6.0-alpha.1
+
+tests:
+ - ./.papr.sh
+
+artifacts:
+ - journals/
diff --git a/.redhat-ci.yml b/.redhat-ci.yml
deleted file mode 100644
index 6dac7b256..000000000
--- a/.redhat-ci.yml
+++ /dev/null
@@ -1,30 +0,0 @@
----
-
-cluster:
- hosts:
- - name: ocp-master
- distro: fedora/25/atomic
- - name: ocp-node1
- distro: fedora/25/atomic
- - name: ocp-node2
- distro: fedora/25/atomic
- container:
- image: fedora:25
-
-packages:
- - gcc
- - python-pip
- - python-devel
- - openssl-devel
- - redhat-rpm-config
-
-context: 'fedora/25/atomic | origin/v3.6.0-alpha.1'
-
-env:
- OPENSHIFT_IMAGE_TAG: v3.6.0-alpha.1
-
-tests:
- - ./.redhat-ci.sh
-
-artifacts:
- - journals/
diff --git a/.tito/packages/openshift-ansible b/.tito/packages/openshift-ansible
index c4302af24..a8f7dbd63 100644
--- a/.tito/packages/openshift-ansible
+++ b/.tito/packages/openshift-ansible
@@ -1 +1 @@
-3.6.89.0-1 ./
+3.6.110-1 ./
diff --git a/BUILD.md b/BUILD.md
index 7ac0187ee..d9de3f5aa 100644
--- a/BUILD.md
+++ b/BUILD.md
@@ -26,16 +26,18 @@ tito build --rpm
## Build an openshift-ansible container image
+**NOTE**: the examples below use "openshift-ansible" as the name of the image to build for simplicity and illustration purposes, and also to prevent potential confusion between custom built images and official releases. See [README_CONTAINER_IMAGE.md](README_CONTAINER_IMAGE.md) for details about the released container images for openshift-ansible.
+
To build a container image of `openshift-ansible` using standalone **Docker**:
cd openshift-ansible
- docker build -f images/installer/Dockerfile -t openshift/openshift-ansible .
+ docker build -f images/installer/Dockerfile -t openshift-ansible .
### Building on OpenShift
To build an openshift-ansible image using an **OpenShift** [build and image stream](https://docs.openshift.org/latest/architecture/core_concepts/builds_and_image_streams.html) the straightforward command would be:
- oc new-build docker.io/aweiteka/playbook2image~https://github.com/openshift/openshift-ansible
+ oc new-build registry.centos.org/openshift/playbook2image~https://github.com/openshift/openshift-ansible
However: because the `Dockerfile` for this repository is not in the top level directory, and because we can't change the build context to the `images/installer` path as it would cause the build to fail, the `oc new-app` command above will create a build configuration using the *source to image* strategy, which is the default approach of the [playbook2image](https://github.com/openshift/playbook2image) base image. This does build an image successfully, but unfortunately the resulting image will be missing some customizations that are handled by the [Dockerfile](images/installer/Dockerfile) in this repo.
@@ -48,7 +50,7 @@ At the time of this writing there is no straightforward option to [set the docke
```
curl -s https://raw.githubusercontent.com/openshift/openshift-ansible/master/images/installer/Dockerfile |
oc new-build -D - \
- --docker-image=docker.io/aweiteka/playbook2image \
+ --docker-image=registry.centos.org/openshift/playbook2image \
https://github.com/openshift/openshift-ansible
```
@@ -76,5 +78,5 @@ Once the container image is built, we can import it into the OSTree
storage:
```
-atomic pull --storage ostree docker:openshift/openshift-ansible:latest
+atomic pull --storage ostree docker:openshift-ansible:latest
```
diff --git a/README_CONTAINER_IMAGE.md b/README_CONTAINER_IMAGE.md
index 0d7f7f4af..cf3b432df 100644
--- a/README_CONTAINER_IMAGE.md
+++ b/README_CONTAINER_IMAGE.md
@@ -6,6 +6,12 @@ The image is designed to **run as a non-root user**. The container's UID is mapp
**Note**: at this time there are known issues that prevent to run this image for installation/upgrade purposes (i.e. run one of the config/upgrade playbooks) from within one of the hosts that is also an installation target at the same time: if the playbook you want to run attempts to manage the docker daemon and restart it (like install/upgrade playbooks do) this would kill the container itself during its operation.
+## A note about the name of the image
+
+The released container images for openshift-ansible follow the naming scheme determined by OpenShift's `imageConfig.format` configuration option. This means that the released image name is `openshift/origin-ansible` instead of `openshift/openshift-ansible`.
+
+This provides consistency with other images used by the platform and it's also a requirement for some use cases like using the image from [`oc cluster up`](https://github.com/openshift/origin/blob/master/docs/cluster_up_down.md).
+
## Usage
The `playbook2image` base image provides several options to control the behaviour of the containers. For more details on these options see the [playbook2image](https://github.com/openshift/playbook2image) documentation.
@@ -26,7 +32,7 @@ Here is an example of how to run a containerized `openshift-ansible` playbook th
-e INVENTORY_FILE=/tmp/inventory \
-e PLAYBOOK_FILE=playbooks/byo/openshift-checks/certificate_expiry/default.yaml \
-e OPTS="-v" -t \
- openshift/openshift-ansible
+ openshift/origin-ansible
You might want to adjust some of the options in the example to match your environment and/or preferences. For example: you might want to create a separate directory on the host where you'll copy the ssh key and inventory files prior to invocation to avoid unwanted SELinux re-labeling of the original files or paths (see below).
@@ -46,7 +52,7 @@ Here is a detailed explanation of the options used in the command above:
Further usage examples are available in the [examples directory](examples/) with samples of how to use the image from within OpenShift.
-Additional usage information for images built from `playbook2image` like this one can be found in the [playbook2image examples](https://github.com/aweiteka/playbook2image/tree/master/examples).
+Additional usage information for images built from `playbook2image` like this one can be found in the [playbook2image examples](https://github.com/openshift/playbook2image/tree/master/examples).
## Running openshift-ansible as a System Container
@@ -59,8 +65,8 @@ If the inventory file needs additional files then it can use the path `/var/lib/
Run the ansible system container:
```sh
-atomic install --system --set INVENTORY_FILE=$(pwd)/inventory.origin openshift/openshift-ansible
-systemctl start openshift-ansible
+atomic install --system --set INVENTORY_FILE=$(pwd)/inventory.origin openshift/origin-ansible
+systemctl start origin-ansible
```
The `INVENTORY_FILE` variable says to the installer what inventory file on the host will be bind mounted inside the container. In the example above, a file called `inventory.origin` in the current directory is used as the inventory file for the installer.
@@ -68,5 +74,5 @@ The `INVENTORY_FILE` variable says to the installer what inventory file on the h
And to finally cleanup the container:
```
-atomic uninstall openshift-ansible
+atomic uninstall origin-ansible
```
diff --git a/ansible.cfg b/ansible.cfg
index 034733684..0c74d63da 100644
--- a/ansible.cfg
+++ b/ansible.cfg
@@ -14,6 +14,7 @@ callback_plugins = callback_plugins/
forks = 20
host_key_checking = False
retry_files_enabled = False
+retry_files_save_path = ~/ansible-installer-retries
nocows = True
# Uncomment to use the provided BYO inventory
diff --git a/docs/pull_requests.md b/docs/pull_requests.md
index fcc3e275c..45ae01a9d 100644
--- a/docs/pull_requests.md
+++ b/docs/pull_requests.md
@@ -10,8 +10,8 @@ Whenever a
[Pull Request is opened](../CONTRIBUTING.md#submitting-contributions), some
automated test jobs must be successfully run before the PR can be merged.
-Some of these jobs are automatically triggered, e.g., Travis and Coveralls.
-Other jobs need to be manually triggered by a member of the
+Some of these jobs are automatically triggered, e.g., Travis, PAPR, and
+Coveralls. Other jobs need to be manually triggered by a member of the
[Team OpenShift Ansible Contributors](https://github.com/orgs/openshift/teams/team-openshift-ansible-contributors).
## Triggering tests
@@ -48,9 +48,9 @@ simplifying the workflow towards a single infrastructure in the future.
There are a set of tests that run on Fedora infrastructure. They are started
automatically with every pull request.
-They are implemented using the [`redhat-ci` framework](https://github.com/jlebon/redhat-ci).
+They are implemented using the [`PAPR` framework](https://github.com/projectatomic/papr).
-To re-run tests, write a comment containing `bot, retest this please`.
+To re-run tests, write a comment containing only `bot, retest this please`.
## Triggering merge
diff --git a/docs/repo_structure.md b/docs/repo_structure.md
index 693837fba..f598f22c3 100644
--- a/docs/repo_structure.md
+++ b/docs/repo_structure.md
@@ -52,3 +52,16 @@ These are plugins used in playbooks and roles:
.
└── test Contains tests.
```
+
+### CI
+
+These files are used by [PAPR](https://github.com/projectatomic/papr),
+It is very similar in workflow to Travis, with the test
+environment and test scripts defined in a YAML file.
+
+```
+.
+├── .papr.yml
+├── .papr.sh
+└── .papr.inventory
+```
diff --git a/examples/certificate-check-upload.yaml b/examples/certificate-check-upload.yaml
index 8b560447f..1794cb096 100644
--- a/examples/certificate-check-upload.yaml
+++ b/examples/certificate-check-upload.yaml
@@ -4,10 +4,10 @@
# The generated reports are uploaded to a location in the master
# hosts, using the playbook 'easy-mode-upload.yaml'.
#
-# This example uses the openshift/openshift-ansible container image.
+# This example uses the openshift/origin-ansible container image.
# (see README_CONTAINER_IMAGE.md in the top level dir for more details).
#
-# The following objects are xpected to be configured before the creation
+# The following objects are expected to be configured before the creation
# of this Job:
# - A ConfigMap named 'inventory' with a key named 'hosts' that
# contains the the Ansible inventory file
@@ -28,7 +28,7 @@ spec:
spec:
containers:
- name: openshift-ansible
- image: openshift/openshift-ansible
+ image: openshift/origin-ansible
env:
- name: PLAYBOOK_FILE
value: playbooks/certificate_expiry/easy-mode-upload.yaml
diff --git a/examples/certificate-check-volume.yaml b/examples/certificate-check-volume.yaml
index f6613bcd8..dd0a89c8e 100644
--- a/examples/certificate-check-volume.yaml
+++ b/examples/certificate-check-volume.yaml
@@ -4,10 +4,10 @@
# The generated reports are stored in a Persistent Volume using
# the playbook 'html_and_json_timestamp.yaml'.
#
-# This example uses the openshift/openshift-ansible container image.
+# This example uses the openshift/origin-ansible container image.
# (see README_CONTAINER_IMAGE.md in the top level dir for more details).
#
-# The following objects are xpected to be configured before the creation
+# The following objects are expected to be configured before the creation
# of this Job:
# - A ConfigMap named 'inventory' with a key named 'hosts' that
# contains the the Ansible inventory file
@@ -30,7 +30,7 @@ spec:
spec:
containers:
- name: openshift-ansible
- image: openshift/openshift-ansible
+ image: openshift/origin-ansible
env:
- name: PLAYBOOK_FILE
value: playbooks/certificate_expiry/html_and_json_timestamp.yaml
diff --git a/examples/scheduled-certcheck-upload.yaml b/examples/scheduled-certcheck-upload.yaml
index b0a97361b..05890a357 100644
--- a/examples/scheduled-certcheck-upload.yaml
+++ b/examples/scheduled-certcheck-upload.yaml
@@ -28,7 +28,7 @@ spec:
spec:
containers:
- name: openshift-ansible
- image: openshift/openshift-ansible
+ image: openshift/origin-ansible
env:
- name: PLAYBOOK_FILE
value: playbooks/certificate_expiry/easy-mode-upload.yaml
diff --git a/examples/scheduled-certcheck-volume.yaml b/examples/scheduled-certcheck-volume.yaml
index 74cdc9e7f..2f26e8809 100644
--- a/examples/scheduled-certcheck-volume.yaml
+++ b/examples/scheduled-certcheck-volume.yaml
@@ -28,7 +28,7 @@ spec:
spec:
containers:
- name: openshift-ansible
- image: openshift/openshift-ansible
+ image: openshift/origin-ansible
env:
- name: PLAYBOOK_FILE
value: playbooks/certificate_expiry/html_and_json_timestamp.yaml
diff --git a/filter_plugins/oo_filters.py b/filter_plugins/oo_filters.py
index 8b279981d..cff9f8a60 100644
--- a/filter_plugins/oo_filters.py
+++ b/filter_plugins/oo_filters.py
@@ -715,7 +715,7 @@ def oo_openshift_env(hostvars):
return facts
-# pylint: disable=too-many-branches, too-many-nested-blocks
+# pylint: disable=too-many-branches, too-many-nested-blocks, too-many-statements
def oo_persistent_volumes(hostvars, groups, persistent_volumes=None):
""" Generate list of persistent volumes based on oo_openshift_env
storage options set in host variables.
@@ -747,10 +747,15 @@ def oo_persistent_volumes(hostvars, groups, persistent_volumes=None):
volume = params['volume']['name']
path = directory + '/' + volume
size = params['volume']['size']
+ if 'labels' in params:
+ labels = params['labels']
+ else:
+ labels = dict()
access_modes = params['access']['modes']
persistent_volume = dict(
name="{0}-volume".format(volume),
capacity=size,
+ labels=labels,
access_modes=access_modes,
storage=dict(
nfs=dict(
@@ -760,12 +765,17 @@ def oo_persistent_volumes(hostvars, groups, persistent_volumes=None):
elif kind == 'openstack':
volume = params['volume']['name']
size = params['volume']['size']
+ if 'labels' in params:
+ labels = params['labels']
+ else:
+ labels = dict()
access_modes = params['access']['modes']
filesystem = params['openstack']['filesystem']
volume_id = params['openstack']['volumeID']
persistent_volume = dict(
name="{0}-volume".format(volume),
capacity=size,
+ labels=labels,
access_modes=access_modes,
storage=dict(
cinder=dict(
@@ -775,6 +785,10 @@ def oo_persistent_volumes(hostvars, groups, persistent_volumes=None):
elif kind == 'glusterfs':
volume = params['volume']['name']
size = params['volume']['size']
+ if 'labels' in params:
+ labels = params['labels']
+ else:
+ labels = dict()
access_modes = params['access']['modes']
endpoints = params['glusterfs']['endpoints']
path = params['glusterfs']['path']
@@ -782,6 +796,7 @@ def oo_persistent_volumes(hostvars, groups, persistent_volumes=None):
persistent_volume = dict(
name="{0}-volume".format(volume),
capacity=size,
+ labels=labels,
access_modes=access_modes,
storage=dict(
glusterfs=dict(
diff --git a/hack/build-images.sh b/hack/build-images.sh
index 3e9896caa..ce421178f 100755
--- a/hack/build-images.sh
+++ b/hack/build-images.sh
@@ -7,7 +7,7 @@ set -o pipefail
STARTTIME=$(date +%s)
source_root=$(dirname "${0}")/..
-prefix="openshift/openshift-ansible"
+prefix="openshift/origin-ansible"
version="latest"
verbose=false
options="-f images/installer/Dockerfile"
@@ -44,7 +44,7 @@ if [ "$help" = true ]; then
echo "Options: "
echo " --prefix=PREFIX"
echo " The prefix to use for the image names."
- echo " default: openshift/openshift-ansible"
+ echo " default: openshift/origin-ansible"
echo
echo " --version=VERSION"
echo " The version used to tag the image"
diff --git a/hack/push-release.sh b/hack/push-release.sh
index 8639143af..131ed83ca 100755
--- a/hack/push-release.sh
+++ b/hack/push-release.sh
@@ -12,7 +12,7 @@ set -o pipefail
STARTTIME=$(date +%s)
OS_ROOT=$(dirname "${BASH_SOURCE}")/..
-PREFIX="${PREFIX:-openshift/openshift-ansible}"
+PREFIX="${PREFIX:-openshift/origin-ansible}"
# Go to the top of the tree.
cd "${OS_ROOT}"
diff --git a/images/installer/Dockerfile b/images/installer/Dockerfile
index f6af018ca..915dfe377 100644
--- a/images/installer/Dockerfile
+++ b/images/installer/Dockerfile
@@ -1,11 +1,11 @@
# Using playbook2image as a base
-# See https://github.com/aweiteka/playbook2image for details on the image
+# See https://github.com/openshift/playbook2image for details on the image
# including documentation for the settings/env vars referenced below
-FROM docker.io/aweiteka/playbook2image:latest
+FROM registry.centos.org/openshift/playbook2image:latest
MAINTAINER OpenShift Team <dev@lists.openshift.redhat.com>
-LABEL name="openshift-ansible" \
+LABEL name="openshift/origin-ansible" \
summary="OpenShift's installation and configuration tool" \
description="A containerized openshift-ansible image to let you run playbooks to install, upgrade, maintain and check an OpenShift cluster" \
url="https://github.com/openshift/openshift-ansible" \
@@ -22,7 +22,7 @@ USER root
# configurations for the two images.
RUN mkdir -p /usr/share/ansible/ && ln -s /opt/app-root/src /usr/share/ansible/openshift-ansible
-RUN INSTALL_PKGS="skopeo" && \
+RUN INSTALL_PKGS="skopeo openssl java-1.8.0-openjdk-headless httpd-tools" && \
yum install -y --setopt=tsflags=nodocs $INSTALL_PKGS && \
rpm -V $INSTALL_PKGS && \
yum clean all
diff --git a/images/installer/Dockerfile.rhel7 b/images/installer/Dockerfile.rhel7
index 00841e660..9d7eeec24 100644
--- a/images/installer/Dockerfile.rhel7
+++ b/images/installer/Dockerfile.rhel7
@@ -2,7 +2,7 @@ FROM openshift3/playbook2image
MAINTAINER OpenShift Team <dev@lists.openshift.redhat.com>
-LABEL name="openshift3/openshift-ansible" \
+LABEL name="openshift3/ose-ansible" \
summary="OpenShift's installation and configuration tool" \
description="A containerized openshift-ansible image to let you run playbooks to install, upgrade, maintain and check an OpenShift cluster" \
url="https://github.com/openshift/openshift-ansible" \
diff --git a/images/installer/system-container/root/exports/config.json.template b/images/installer/system-container/root/exports/config.json.template
index 383e3696e..397ac941a 100644
--- a/images/installer/system-container/root/exports/config.json.template
+++ b/images/installer/system-container/root/exports/config.json.template
@@ -102,7 +102,7 @@
},
{
"type": "bind",
- "source": "$SSH_ROOT",
+ "source": "$HOME_ROOT/.ssh",
"destination": "/opt/app-root/src/.ssh",
"options": [
"bind",
@@ -112,8 +112,8 @@
},
{
"type": "bind",
- "source": "$SSH_ROOT",
- "destination": "/root/.ssh",
+ "source": "$HOME_ROOT",
+ "destination": "/root",
"options": [
"bind",
"rw",
@@ -171,6 +171,16 @@
]
},
{
+ "destination": "/etc/resolv.conf",
+ "type": "bind",
+ "source": "/etc/resolv.conf",
+ "options": [
+ "ro",
+ "rbind",
+ "rprivate"
+ ]
+ },
+ {
"destination": "/sys/fs/cgroup",
"type": "cgroup",
"source": "cgroup",
diff --git a/images/installer/system-container/root/exports/manifest.json b/images/installer/system-container/root/exports/manifest.json
index 1db845965..f735494d4 100644
--- a/images/installer/system-container/root/exports/manifest.json
+++ b/images/installer/system-container/root/exports/manifest.json
@@ -5,7 +5,7 @@
"VAR_LIB_OPENSHIFT_INSTALLER" : "/var/lib/openshift-installer",
"VAR_LOG_OPENSHIFT_LOG": "/var/log/ansible.log",
"PLAYBOOK_FILE": "/usr/share/ansible/openshift-ansible/playbooks/byo/config.yml",
- "SSH_ROOT": "/root/.ssh",
+ "HOME_ROOT": "/root",
"INVENTORY_FILE": "/dev/null"
}
}
diff --git a/inventory/byo/hosts.byo.native-glusterfs.example b/inventory/byo/hosts.byo.native-glusterfs.example
index 2dbb57d40..dc847a5b2 100644
--- a/inventory/byo/hosts.byo.native-glusterfs.example
+++ b/inventory/byo/hosts.byo.native-glusterfs.example
@@ -24,7 +24,7 @@ glusterfs
[OSEv3:vars]
ansible_ssh_user=root
-deployment_type=origin
+openshift_deployment_type=origin
# Specify that we want to use GlusterFS storage for a hosted registry
openshift_hosted_registry_storage_kind=glusterfs
diff --git a/inventory/byo/hosts.origin.example b/inventory/byo/hosts.origin.example
index 206ec06c3..86b4de4b7 100644
--- a/inventory/byo/hosts.origin.example
+++ b/inventory/byo/hosts.origin.example
@@ -501,6 +501,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
#openshift_hosted_metrics_storage_nfs_options='*(rw,root_squash)'
#openshift_hosted_metrics_storage_volume_name=metrics
#openshift_hosted_metrics_storage_volume_size=10Gi
+#openshift_hosted_metrics_storage_labels={'storage': 'metrics'}
#
# Option B - External NFS Host
# NFS volume must already exist with path "nfs_directory/_volume_name" on
@@ -512,6 +513,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
#openshift_hosted_metrics_storage_nfs_directory=/exports
#openshift_hosted_metrics_storage_volume_name=metrics
#openshift_hosted_metrics_storage_volume_size=10Gi
+#openshift_hosted_metrics_storage_labels={'storage': 'metrics'}
#
# Option C - Dynamic -- If openshift supports dynamic volume provisioning for
# your cloud platform use this.
@@ -545,6 +547,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
#openshift_hosted_logging_storage_nfs_options='*(rw,root_squash)'
#openshift_hosted_logging_storage_volume_name=logging
#openshift_hosted_logging_storage_volume_size=10Gi
+#openshift_hosted_logging_storage_labels={'storage': 'logging'}
#
# Option B - External NFS Host
# NFS volume must already exist with path "nfs_directory/_volume_name" on
@@ -556,6 +559,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
#openshift_hosted_logging_storage_nfs_directory=/exports
#openshift_hosted_logging_storage_volume_name=logging
#openshift_hosted_logging_storage_volume_size=10Gi
+#openshift_hosted_logging_storage_labels={'storage': 'logging'}
#
# Option C - Dynamic -- If openshift supports dynamic volume provisioning for
# your cloud platform use this.
@@ -799,6 +803,35 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
#
#etcd_ca_default_days=1825
+# Upgrade Control
+#
+# By default nodes are upgraded in a serial manner one at a time and all failures
+# are fatal, one set of variables for normal nodes, one set of variables for
+# nodes that are part of control plane as the number of hosts may be different
+# in those two groups.
+#openshift_upgrade_nodes_serial=1
+#openshift_upgrade_nodes_max_fail_percentage=0
+#openshift_upgrade_control_plane_nodes_serial=1
+#openshift_upgrade_control_plane_nodes_max_fail_percentage=0
+#
+# You can specify the number of nodes to upgrade at once. We do not currently
+# attempt to verify that you have capacity to drain this many nodes at once
+# so please be careful when specifying these values. You should also verify that
+# the expected number of nodes are all schedulable and ready before starting an
+# upgrade. If it's not possible to drain the requested nodes the upgrade will
+# stall indefinitely until the drain is successful.
+#
+# If you're upgrading more than one node at a time you can specify the maximum
+# percentage of failure within the batch before the upgrade is aborted. Any
+# nodes that do fail are ignored for the rest of the playbook run and you should
+# take care to investigate the failure and return the node to service so that
+# your cluster.
+#
+# The percentage must exceed the value, this would fail on two failures
+# openshift_upgrade_nodes_serial=4 openshift_upgrade_nodes_max_fail_percentage=49
+# where as this would not
+# openshift_upgrade_nodes_serial=4 openshift_upgrade_nodes_max_fail_percentage=50
+
# host group for masters
[masters]
ose3-master[1:3]-ansible.test.example.com
diff --git a/inventory/byo/hosts.ose.example b/inventory/byo/hosts.ose.example
index 4f777c330..cbaf22810 100644
--- a/inventory/byo/hosts.ose.example
+++ b/inventory/byo/hosts.ose.example
@@ -501,6 +501,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
#openshift_hosted_metrics_storage_nfs_options='*(rw,root_squash)'
#openshift_hosted_metrics_storage_volume_name=metrics
#openshift_hosted_metrics_storage_volume_size=10Gi
+#openshift_hosted_metrics_storage_labels={'storage': 'metrics'}
#
# Option B - External NFS Host
# NFS volume must already exist with path "nfs_directory/_volume_name" on
@@ -512,6 +513,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
#openshift_hosted_metrics_storage_nfs_directory=/exports
#openshift_hosted_metrics_storage_volume_name=metrics
#openshift_hosted_metrics_storage_volume_size=10Gi
+#openshift_hosted_metrics_storage_labels={'storage': 'metrics'}
#
# Option C - Dynamic -- If openshift supports dynamic volume provisioning for
# your cloud platform use this.
@@ -545,6 +547,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
#openshift_hosted_logging_storage_nfs_options='*(rw,root_squash)'
#openshift_hosted_logging_storage_volume_name=logging
#openshift_hosted_logging_storage_volume_size=10Gi
+#openshift_hosted_logging_storage_labels={'storage': 'logging'}
#
# Option B - External NFS Host
# NFS volume must already exist with path "nfs_directory/_volume_name" on
@@ -556,6 +559,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
#openshift_hosted_logging_storage_nfs_directory=/exports
#openshift_hosted_logging_storage_volume_name=logging
#openshift_hosted_logging_storage_volume_size=10Gi
+#openshift_hosted_logging_storage_labels={'storage': 'logging'}
#
# Option C - Dynamic -- If openshift supports dynamic volume provisioning for
# your cloud platform use this.
@@ -795,6 +799,35 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
#
#etcd_ca_default_days=1825
+# Upgrade Control
+#
+# By default nodes are upgraded in a serial manner one at a time and all failures
+# are fatal, one set of variables for normal nodes, one set of variables for
+# nodes that are part of control plane as the number of hosts may be different
+# in those two groups.
+#openshift_upgrade_nodes_serial=1
+#openshift_upgrade_nodes_max_fail_percentage=0
+#openshift_upgrade_control_plane_nodes_serial=1
+#openshift_upgrade_control_plane_nodes_max_fail_percentage=0
+#
+# You can specify the number of nodes to upgrade at once. We do not currently
+# attempt to verify that you have capacity to drain this many nodes at once
+# so please be careful when specifying these values. You should also verify that
+# the expected number of nodes are all schedulable and ready before starting an
+# upgrade. If it's not possible to drain the requested nodes the upgrade will
+# stall indefinitely until the drain is successful.
+#
+# If you're upgrading more than one node at a time you can specify the maximum
+# percentage of failure within the batch before the upgrade is aborted. Any
+# nodes that do fail are ignored for the rest of the playbook run and you should
+# take care to investigate the failure and return the node to service so that
+# your cluster.
+#
+# The percentage must exceed the value, this would fail on two failures
+# openshift_upgrade_nodes_serial=4 openshift_upgrade_nodes_max_fail_percentage=49
+# where as this would not
+# openshift_upgrade_nodes_serial=4 openshift_upgrade_nodes_max_fail_percentage=50
+
# host group for masters
[masters]
ose3-master[1:3]-ansible.test.example.com
diff --git a/openshift-ansible.spec b/openshift-ansible.spec
index 06c0d2cba..77c177222 100644
--- a/openshift-ansible.spec
+++ b/openshift-ansible.spec
@@ -9,7 +9,7 @@
%global __requires_exclude ^/usr/bin/ansible-playbook$
Name: openshift-ansible
-Version: 3.6.89.0
+Version: 3.6.110
Release: 1%{?dist}
Summary: Openshift and Atomic Enterprise Ansible
License: ASL 2.0
@@ -280,6 +280,159 @@ Atomic OpenShift Utilities includes
%changelog
+* Thu Jun 15 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.110-1
+- papr: add documentation to YAML and simplify context (jlebon@redhat.com)
+- docs: better documentation for PAPR (jlebon@redhat.com)
+- papr: install libffi-devel (jlebon@redhat.com)
+- pre-install checks: add more during byo install (lmeyer@redhat.com)
+- move etcd backup to etcd_common role (jchaloup@redhat.com)
+- Support installing HOSA via ansible (mwringe@redhat.com)
+- GlusterFS: Remove requirement for heketi-cli (jarrpa@redhat.com)
+- GlusterFS: Fix bugs in wipe (jarrpa@redhat.com)
+- GlusterFS: Skip heketi-cli install on Atomic (jarrpa@redhat.com)
+- GlusterFS: Create a StorageClass if specified (jarrpa@redhat.com)
+- GlusterFS: Use proper secrets (jarrpa@redhat.com)
+- GlusterFS: Allow cleaner separation of multiple clusters (jarrpa@redhat.com)
+- GlusterFS: Minor corrections and cleanups (jarrpa@redhat.com)
+- GlusterFS: Improve documentation (jarrpa@redhat.com)
+- GlusterFS: Allow configuration of kube namespace for heketi
+ (jarrpa@redhat.com)
+- GlusterFS: Adjust when clauses for registry config (jarrpa@redhat.com)
+- GlusterFS: Allow failure reporting when deleting deploy-heketi
+ (jarrpa@redhat.com)
+- GlusterFS: Tweak pod probe parameters (jarrpa@redhat.com)
+- GlusterFS: Allow for configuration of node selector (jarrpa@redhat.com)
+- GlusterFS: Label on Openshift node name (jarrpa@redhat.com)
+- GlusterFS: Make sure timeout is an int (jarrpa@redhat.com)
+- GlusterFS: Use groups variables (jarrpa@redhat.com)
+- papr: rename redhat-ci related files to papr (jlebon@redhat.com)
+- singletonize some role tasks that repeat a lot (lmeyer@redhat.com)
+
+* Wed Jun 14 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.109-1
+-
+
+* Wed Jun 14 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.108-1
+- Upgraded Calico to 2.2.1 Release (vincent.schwarzer@yahoo.de)
+
+* Wed Jun 14 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.107-1
+- Disable negative caching, set cache TTL to 1s (skuznets@redhat.com)
+- Update mounts in system container installer (smilner@redhat.com)
+- Set ansible retry file location (smilner@redhat.com)
+- installer: add bind mount for /etc/resolv.conf (gscrivan@redhat.com)
+- Making pylint happy (ewolinet@redhat.com)
+- Fix possible access to undefined variable (rhcarvalho@gmail.com)
+- certificates: copy the certificates for the etcd system container
+ (gscrivan@redhat.com)
+- Separate etcd and OpenShift CA redeploy playbooks. (abutcher@redhat.com)
+- lib/base: allow for results parsing on non-zero return code
+ (jarrpa@redhat.com)
+- etcd: system container defines ETCD_(PEER_)?TRUSTED_CA_FILE
+ (gscrivan@redhat.com)
+- etcd: unmask system container service before installing it
+ (gscrivan@redhat.com)
+- etcd: copy previous database when migrating to system container
+ (gscrivan@redhat.com)
+- etcd: define data dir location for the system container (gscrivan@redhat.com)
+- oc_obj: set _delete() rc to 0 if err is 'not found' (jarrpa@redhat.com)
+- oc_obj: only check 'items' if exists in delete (jarrpa@redhat.com)
+- Removed hardocded Calico Policy Controller URL (vincent.schwarzer@yahoo.de)
+- Allowing openshift_metrics to specify PV selectors and allow way to define
+ selectors when creating pv (ewolinet@redhat.com)
+
+* Tue Jun 13 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.100-1
+- Change default key for gce (hekumar@redhat.com)
+- set etcd working directory for embedded etcd (jchaloup@redhat.com)
+- Add daemon-reload handler to openshift_node and notify when /etc/systemd
+ files have been updated. (abutcher@redhat.com)
+- Use volume.beta.kubernetes.io annotation for storage-classes
+ (per.carlson@vegvesen.no)
+- Correct master-config update during upgrade (rteague@redhat.com)
+
+* Mon Jun 12 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.99-1
+- Replace repoquery with module (jchaloup@redhat.com)
+- Consider previous value of 'changed' when updating (rhcarvalho@gmail.com)
+- Improve code readability (rhcarvalho@gmail.com)
+- Disable excluder only on nodes that are not masters (jchaloup@redhat.com)
+- Added includes to specify openshift version for libvirt cluster create.
+ Otherwise bin/cluster create fails on unknown version for libvirt deployment.
+ (schulthess@puzzle.ch)
+- docker checks: finish and refactor (lmeyer@redhat.com)
+- oc_secret: allow use of force for secret type (jarrpa@redhat.com)
+- add docker storage, docker driver checks (jvallejo@redhat.com)
+- Add dependency and use same storageclass name as upstream
+ (hekumar@redhat.com)
+- Add documentation (hekumar@redhat.com)
+- Install default storageclass in AWS & GCE envs (hekumar@redhat.com)
+
+* Fri Jun 09 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.98-1
+-
+
+* Fri Jun 09 2017 Scott Dodson <sdodson@redhat.com> 3.6.97-1
+- Updated to using oo_random_word for secret gen (ewolinet@redhat.com)
+- Updating kibana to store session and oauth secrets for reuse, fix oauthclient
+ generation for ops (ewolinet@redhat.com)
+
+* Thu Jun 08 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.89.5-1
+- Rename container image to origin-ansible / ose-ansible (pep@redhat.com)
+
+* Thu Jun 08 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.89.4-1
+- Guard check for container install based on openshift dictionary key
+ (ayoung@redhat.com)
+- Separate client config removal in uninstall s.t. ansible_ssh_user is removed
+ from with_items. (abutcher@redhat.com)
+- Remove supported/implemented barrier for registry object storage providers.
+ (abutcher@redhat.com)
+- Add node unit file on upgrade (smilner@redhat.com)
+- fix up openshift-ansible for use with 'oc cluster up' (jcantril@redhat.com)
+- specify all logging index mappings for kibana (jcantril@redhat.com)
+- openshift-master: set r_etcd_common_etcd_runtime (gscrivan@redhat.com)
+- rename daemon.json to container-daemon.json (smilner@redhat.com)
+- Updating probe timeout and exposing variable to adjust timeout in image
+ (ewolinet@redhat.com)
+- Do not attempt to override openstack nodename (jdetiber@redhat.com)
+- Update image stream to openshift/origin:2c55ade (skuznets@redhat.com)
+
+* Wed Jun 07 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.89.3-1
+- Use local openshift.master.loopback_url when generating initial master
+ loopback kubeconfigs. (abutcher@redhat.com)
+
+* Tue Jun 06 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.89.2-1
+-
+
+* Tue Jun 06 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.89.1-1
+- Updating image for registry_console (ewolinet@redhat.com)
+- add elasticseatch, fluentd, kibana check (jvallejo@redhat.com)
+- show correct default value in inventory (mmckinst@redhat.com)
+- Skip service restarts within ca redeployment playbook when expired
+ certificates are detected. (abutcher@redhat.com)
+- Add mtu setting to /etc/sysconfig/docker-network (sdodson@redhat.com)
+- Add daemon_reload parameter to service tasks (tbielawa@redhat.com)
+- mux uses fluentd cert/key to talk to ES (rmeggins@redhat.com)
+- fix curator host, port params; remove curator es volumes
+ (rmeggins@redhat.com)
+- add mux docs; allow to specify mux namespaces (rmeggins@redhat.com)
+- oc_secret: allow for specifying secret type (jarrpa@redhat.com)
+- Revert "Merge pull request #4271 from DG-i/master" (skuznets@redhat.com)
+- verify upgrade targets separately for each group (masters, nodes, etcd)
+ (jchaloup@redhat.com)
+- Updating Kibana-proxy secret key name, fixing deleting secrets, fixed extra
+ ES dc creation (ewolinet@redhat.com)
+- upgrade: Reload systemd before restart (smilner@redhat.com)
+- Skip router/registry cert redeploy when
+ openshift_hosted_manage_{router,registry}=false (abutcher@redhat.com)
+- disable docker excluder before it is updated to remove older excluded
+ packages (jchaloup@redhat.com)
+- Support byo etcd for calico (djosborne10@gmail.com)
+- preflight int tests: fix for package_version changes (lmeyer@redhat.com)
+- Remove unnecessary comment. (rhcarvalho@gmail.com)
+- update aos_version module to support generic pkgs and versions
+ (jvallejo@redhat.com)
+- Add separate variables for control plane nodes (sdodson@redhat.com)
+- Copy Nuage VSD generated user certificates to Openshift master nodes
+ (sneha.deshpande@nokia.com)
+- add existing_ovs_version check (jvallejo@redhat.com)
+- Tolerate failures in the node upgrade playbook (sdodson@redhat.com)
+
* Wed May 31 2017 Scott Dodson <sdodson@redhat.com> 3.6.89.0-1
- AMP 2.0 (sdodson@redhat.com)
- add support for oc_service for labels, externalIPs (rmeggins@redhat.com)
diff --git a/playbooks/adhoc/uninstall.yml b/playbooks/adhoc/uninstall.yml
index 1c8257162..97d835eae 100644
--- a/playbooks/adhoc/uninstall.yml
+++ b/playbooks/adhoc/uninstall.yml
@@ -393,10 +393,19 @@
- "{{ directories.results | default([]) }}"
- files
+ - set_fact:
+ client_users: "{{ [ansible_ssh_user, 'root'] | unique }}"
+
+ - name: Remove client kubeconfigs
+ file:
+ path: "~{{ item }}/.kube"
+ state: absent
+ with_items:
+ - "{{ client_users }}"
+
- name: Remove remaining files
file: path={{ item }} state=absent
with_items:
- - "~{{ ansible_ssh_user }}/.kube"
- /etc/ansible/facts.d/openshift.fact
- /etc/atomic-enterprise
- /etc/corosync
@@ -421,7 +430,6 @@
- /etc/sysconfig/origin-master
- /etc/sysconfig/origin-master-api
- /etc/sysconfig/origin-master-controllers
- - /root/.kube
- /usr/share/openshift/examples
- /var/lib/atomic-enterprise
- /var/lib/openshift
diff --git a/playbooks/byo/openshift-cluster/config.yml b/playbooks/byo/openshift-cluster/config.yml
index fd4a9eb26..2372a5322 100644
--- a/playbooks/byo/openshift-cluster/config.yml
+++ b/playbooks/byo/openshift-cluster/config.yml
@@ -15,6 +15,11 @@
checks:
- disk_availability
- memory_availability
+ - package_availability
+ - package_update
+ - package_version
+ - docker_image_availability
+ - docker_storage
- include: ../../common/openshift-cluster/std_include.yml
tags:
diff --git a/playbooks/byo/openshift-cluster/redeploy-etcd-ca.yml b/playbooks/byo/openshift-cluster/redeploy-etcd-ca.yml
new file mode 100644
index 000000000..29f821eda
--- /dev/null
+++ b/playbooks/byo/openshift-cluster/redeploy-etcd-ca.yml
@@ -0,0 +1,10 @@
+---
+- include: initialize_groups.yml
+ tags:
+ - always
+
+- include: ../../common/openshift-cluster/std_include.yml
+ tags:
+ - always
+
+- include: ../../common/openshift-cluster/redeploy-certificates/etcd-ca.yml
diff --git a/playbooks/byo/openshift-cluster/redeploy-openshift-ca.yml b/playbooks/byo/openshift-cluster/redeploy-openshift-ca.yml
index 3b33e0d6f..6e11a111b 100644
--- a/playbooks/byo/openshift-cluster/redeploy-openshift-ca.yml
+++ b/playbooks/byo/openshift-cluster/redeploy-openshift-ca.yml
@@ -7,4 +7,4 @@
tags:
- always
-- include: ../../common/openshift-cluster/redeploy-certificates/ca.yml
+- include: ../../common/openshift-cluster/redeploy-certificates/openshift-ca.yml
diff --git a/playbooks/common/openshift-cluster/evaluate_groups.yml b/playbooks/common/openshift-cluster/evaluate_groups.yml
index 46932b27f..c28ce4c14 100644
--- a/playbooks/common/openshift-cluster/evaluate_groups.yml
+++ b/playbooks/common/openshift-cluster/evaluate_groups.yml
@@ -155,5 +155,5 @@
groups: oo_glusterfs_to_config
ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
ansible_become: "{{ g_sudo | default(omit) }}"
- with_items: "{{ g_glusterfs_hosts | union(g_glusterfs_registry_hosts) | default([]) }}"
+ with_items: "{{ g_glusterfs_hosts | union(g_glusterfs_registry_hosts | default([])) }}"
changed_when: no
diff --git a/playbooks/common/openshift-cluster/openshift_hosted.yml b/playbooks/common/openshift-cluster/openshift_hosted.yml
index 5db71b857..8d94b6509 100644
--- a/playbooks/common/openshift-cluster/openshift_hosted.yml
+++ b/playbooks/common/openshift-cluster/openshift_hosted.yml
@@ -45,6 +45,8 @@
- role: cockpit-ui
when: ( openshift.common.version_gte_3_3_or_1_3 | bool ) and ( openshift_hosted_manage_registry | default(true) | bool ) and not (openshift.docker.hosted_registry_insecure | default(false) | bool)
+ - role: openshift_default_storage_class
+ when: openshift_cloudprovider_kind is defined and (openshift_cloudprovider_kind == 'aws' or openshift_cloudprovider_kind == 'gce')
- name: Update master-config for publicLoggingURL
hosts: oo_masters_to_config:!oo_first_master
diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/etcd-ca.yml b/playbooks/common/openshift-cluster/redeploy-certificates/etcd-ca.yml
new file mode 100644
index 000000000..6964e8567
--- /dev/null
+++ b/playbooks/common/openshift-cluster/redeploy-certificates/etcd-ca.yml
@@ -0,0 +1,158 @@
+---
+- name: Check cert expirys
+ hosts: oo_etcd_to_config:oo_masters_to_config
+ vars:
+ openshift_certificate_expiry_show_all: yes
+ roles:
+ # Sets 'check_results' per host which contains health status for
+ # etcd, master and node certificates. We will use 'check_results'
+ # to determine if any certificates were expired prior to running
+ # this playbook. Service restarts will be skipped if any
+ # certificates were previously expired.
+ - role: openshift_certificate_expiry
+
+- name: Backup existing etcd CA certificate directories
+ hosts: oo_etcd_to_config
+ roles:
+ - role: etcd_common
+ r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
+ tasks:
+ - name: Determine if CA certificate directory exists
+ stat:
+ path: "{{ etcd_ca_dir }}"
+ register: etcd_ca_certs_dir_stat
+ - name: Backup generated etcd certificates
+ command: >
+ tar -czf {{ etcd_conf_dir }}/etcd-ca-certificate-backup-{{ ansible_date_time.epoch }}.tgz
+ {{ etcd_ca_dir }}
+ args:
+ warn: no
+ when: etcd_ca_certs_dir_stat.stat.exists | bool
+ - name: Remove CA certificate directory
+ file:
+ path: "{{ etcd_ca_dir }}"
+ state: absent
+ when: etcd_ca_certs_dir_stat.stat.exists | bool
+
+- name: Generate new etcd CA
+ hosts: oo_first_etcd
+ roles:
+ - role: openshift_etcd_ca
+ etcd_peers: "{{ groups.oo_etcd_to_config | default([], true) }}"
+ etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
+ etcd_certificates_etcd_hosts: "{{ groups.oo_etcd_to_config | default([], true) }}"
+
+- name: Create temp directory for syncing certs
+ hosts: localhost
+ connection: local
+ become: no
+ gather_facts: no
+ tasks:
+ - name: Create local temp directory for syncing certs
+ local_action: command mktemp -d /tmp/openshift-ansible-XXXXXXX
+ register: g_etcd_mktemp
+ changed_when: false
+
+- name: Distribute etcd CA to etcd hosts
+ hosts: oo_etcd_to_config
+ vars:
+ etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
+ roles:
+ - role: etcd_common
+ r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
+ tasks:
+ - name: Create a tarball of the etcd ca certs
+ command: >
+ tar -czvf {{ etcd_conf_dir }}/{{ etcd_ca_name }}.tgz
+ -C {{ etcd_ca_dir }} .
+ args:
+ creates: "{{ etcd_conf_dir }}/{{ etcd_ca_name }}.tgz"
+ warn: no
+ delegate_to: "{{ etcd_ca_host }}"
+ run_once: true
+ - name: Retrieve etcd ca cert tarball
+ fetch:
+ src: "{{ etcd_conf_dir }}/{{ etcd_ca_name }}.tgz"
+ dest: "{{ hostvars['localhost'].g_etcd_mktemp.stdout }}/"
+ flat: yes
+ fail_on_missing: yes
+ validate_checksum: yes
+ delegate_to: "{{ etcd_ca_host }}"
+ run_once: true
+ - name: Ensure ca directory exists
+ file:
+ path: "{{ etcd_ca_dir }}"
+ state: directory
+ - name: Unarchive etcd ca cert tarballs
+ unarchive:
+ src: "{{ hostvars['localhost'].g_etcd_mktemp.stdout }}/{{ etcd_ca_name }}.tgz"
+ dest: "{{ etcd_ca_dir }}"
+ - name: Read current etcd CA
+ slurp:
+ src: "{{ etcd_conf_dir }}/ca.crt"
+ register: g_current_etcd_ca_output
+ - name: Read new etcd CA
+ slurp:
+ src: "{{ etcd_ca_dir }}/ca.crt"
+ register: g_new_etcd_ca_output
+ - copy:
+ content: "{{ (g_new_etcd_ca_output.content|b64decode) + (g_current_etcd_ca_output.content|b64decode) }}"
+ dest: "{{ item }}/ca.crt"
+ with_items:
+ - "{{ etcd_conf_dir }}"
+ - "{{ etcd_ca_dir }}"
+
+- include: ../../openshift-etcd/restart.yml
+ # Do not restart etcd when etcd certificates were previously expired.
+ when: ('expired' not in (hostvars
+ | oo_select_keys(groups['etcd'])
+ | oo_collect('check_results.check_results.etcd')
+ | oo_collect('health')))
+
+- name: Retrieve etcd CA certificate
+ hosts: oo_first_etcd
+ roles:
+ - role: etcd_common
+ r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
+ tasks:
+ - name: Retrieve etcd CA certificate
+ fetch:
+ src: "{{ etcd_conf_dir }}/ca.crt"
+ dest: "{{ hostvars['localhost'].g_etcd_mktemp.stdout }}/"
+ flat: yes
+ fail_on_missing: yes
+ validate_checksum: yes
+
+- name: Distribute etcd CA to masters
+ hosts: oo_masters_to_config
+ vars:
+ openshift_ca_host: "{{ groups.oo_first_master.0 }}"
+ tasks:
+ - name: Deploy etcd CA
+ copy:
+ src: "{{ hostvars['localhost'].g_etcd_mktemp.stdout }}/ca.crt"
+ dest: "{{ openshift.common.config_base }}/master/master.etcd-ca.crt"
+ when: groups.oo_etcd_to_config | default([]) | length > 0
+
+- name: Delete temporary directory on localhost
+ hosts: localhost
+ connection: local
+ become: no
+ gather_facts: no
+ tasks:
+ - file:
+ name: "{{ g_etcd_mktemp.stdout }}"
+ state: absent
+ changed_when: false
+
+- include: ../../openshift-master/restart.yml
+ # Do not restart masters when master certificates were previously expired.
+ when: ('expired' not in hostvars
+ | oo_select_keys(groups['oo_masters_to_config'])
+ | oo_collect('check_results.check_results.ocp_certs')
+ | oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/master.server.crt"}))
+ and
+ ('expired' not in hostvars
+ | oo_select_keys(groups['oo_masters_to_config'])
+ | oo_collect('check_results.check_results.ocp_certs')
+ | oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/ca-bundle.crt"}))
diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/ca.yml b/playbooks/common/openshift-cluster/redeploy-certificates/openshift-ca.yml
index 0d0ff798c..089ae6bbc 100644
--- a/playbooks/common/openshift-cluster/redeploy-certificates/ca.yml
+++ b/playbooks/common/openshift-cluster/redeploy-certificates/openshift-ca.yml
@@ -6,134 +6,17 @@
msg: "The current OpenShift version is less than 1.2/3.2 and does not support CA bundles."
when: not openshift.common.version_gte_3_2_or_1_2 | bool
-- name: Backup existing etcd CA certificate directories
- hosts: oo_etcd_to_config
- roles:
- - role: etcd_common
- r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
- tasks:
- - name: Determine if CA certificate directory exists
- stat:
- path: "{{ etcd_ca_dir }}"
- register: etcd_ca_certs_dir_stat
- - name: Backup generated etcd certificates
- command: >
- tar -czf {{ etcd_conf_dir }}/etcd-ca-certificate-backup-{{ ansible_date_time.epoch }}.tgz
- {{ etcd_ca_dir }}
- args:
- warn: no
- when: etcd_ca_certs_dir_stat.stat.exists | bool
- - name: Remove CA certificate directory
- file:
- path: "{{ etcd_ca_dir }}"
- state: absent
- when: etcd_ca_certs_dir_stat.stat.exists | bool
-
-- name: Generate new etcd CA
- hosts: oo_first_etcd
- roles:
- - role: openshift_etcd_ca
- etcd_peers: "{{ groups.oo_etcd_to_config | default([], true) }}"
- etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
- etcd_certificates_etcd_hosts: "{{ groups.oo_etcd_to_config | default([], true) }}"
-
-- name: Create temp directory for syncing certs
- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- tasks:
- - name: Create local temp directory for syncing certs
- local_action: command mktemp -d /tmp/openshift-ansible-XXXXXXX
- register: g_etcd_mktemp
- changed_when: false
-
-- name: Distribute etcd CA to etcd hosts
- hosts: oo_etcd_to_config
+- name: Check cert expirys
+ hosts: oo_nodes_to_config:oo_masters_to_config
vars:
- etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
+ openshift_certificate_expiry_show_all: yes
roles:
- - role: etcd_common
- r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
- tasks:
- - name: Create a tarball of the etcd ca certs
- command: >
- tar -czvf {{ etcd_conf_dir }}/{{ etcd_ca_name }}.tgz
- -C {{ etcd_ca_dir }} .
- args:
- creates: "{{ etcd_conf_dir }}/{{ etcd_ca_name }}.tgz"
- warn: no
- delegate_to: "{{ etcd_ca_host }}"
- run_once: true
- - name: Retrieve etcd ca cert tarball
- fetch:
- src: "{{ etcd_conf_dir }}/{{ etcd_ca_name }}.tgz"
- dest: "{{ hostvars['localhost'].g_etcd_mktemp.stdout }}/"
- flat: yes
- fail_on_missing: yes
- validate_checksum: yes
- delegate_to: "{{ etcd_ca_host }}"
- run_once: true
- - name: Ensure ca directory exists
- file:
- path: "{{ etcd_ca_dir }}"
- state: directory
- - name: Unarchive etcd ca cert tarballs
- unarchive:
- src: "{{ hostvars['localhost'].g_etcd_mktemp.stdout }}/{{ etcd_ca_name }}.tgz"
- dest: "{{ etcd_ca_dir }}"
- - name: Read current etcd CA
- slurp:
- src: "{{ etcd_conf_dir }}/ca.crt"
- register: g_current_etcd_ca_output
- - name: Read new etcd CA
- slurp:
- src: "{{ etcd_ca_dir }}/ca.crt"
- register: g_new_etcd_ca_output
- - copy:
- content: "{{ (g_new_etcd_ca_output.content|b64decode) + (g_current_etcd_ca_output.content|b64decode) }}"
- dest: "{{ item }}/ca.crt"
- with_items:
- - "{{ etcd_conf_dir }}"
- - "{{ etcd_ca_dir }}"
-
-- name: Retrieve etcd CA certificate
- hosts: oo_first_etcd
- roles:
- - role: etcd_common
- r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
- tasks:
- - name: Retrieve etcd CA certificate
- fetch:
- src: "{{ etcd_conf_dir }}/ca.crt"
- dest: "{{ hostvars['localhost'].g_etcd_mktemp.stdout }}/"
- flat: yes
- fail_on_missing: yes
- validate_checksum: yes
-
-- name: Distribute etcd CA to masters
- hosts: oo_masters_to_config
- vars:
- openshift_ca_host: "{{ groups.oo_first_master.0 }}"
- tasks:
- - name: Deploy CA certificate, key, bundle and serial
- copy:
- src: "{{ hostvars['localhost'].g_etcd_mktemp.stdout }}/ca.crt"
- dest: "{{ openshift.common.config_base }}/master/master.etcd-ca.crt"
- when: groups.oo_etcd_to_config | default([]) | length > 0
-
-- name: Delete temporary directory on localhost
- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- tasks:
- - file:
- name: "{{ g_etcd_mktemp.stdout }}"
- state: absent
- changed_when: false
-
-- include: ../../openshift-etcd/restart.yml
+ # Sets 'check_results' per host which contains health status for
+ # etcd, master and node certificates. We will use 'check_results'
+ # to determine if any certificates were expired prior to running
+ # this playbook. Service restarts will be skipped if any
+ # certificates were previously expired.
+ - role: openshift_certificate_expiry
# Update master config when ca-bundle not referenced. Services will be
# restarted below after new CA certificate has been distributed.
@@ -326,6 +209,16 @@
with_items: "{{ client_users }}"
- include: ../../openshift-master/restart.yml
+ # Do not restart masters when master certificates were previously expired.
+ when: ('expired' not in hostvars
+ | oo_select_keys(groups['oo_masters_to_config'])
+ | oo_collect('check_results.check_results.ocp_certs')
+ | oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/master.server.crt"}))
+ and
+ ('expired' not in hostvars
+ | oo_select_keys(groups['oo_masters_to_config'])
+ | oo_collect('check_results.check_results.ocp_certs')
+ | oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/ca-bundle.crt"}))
- name: Distribute OpenShift CA certificate to nodes
hosts: oo_nodes_to_config
@@ -375,3 +268,13 @@
changed_when: false
- include: ../../openshift-node/restart.yml
+ # Do not restart nodes when node certificates were previously expired.
+ when: ('expired' not in hostvars
+ | oo_select_keys(groups['oo_nodes_to_config'])
+ | oo_collect('check_results.check_results.ocp_certs')
+ | oo_collect('health', {'path':hostvars[groups.oo_nodes_to_config.0].openshift.common.config_base ~ "/node/server.crt"}))
+ and
+ ('expired' not in hostvars
+ | oo_select_keys(groups['oo_nodes_to_config'])
+ | oo_collect('check_results.check_results.ocp_certs')
+ | oo_collect('health', {'path':hostvars[groups.oo_nodes_to_config.0].openshift.common.config_base ~ "/node/ca.crt"}))
diff --git a/playbooks/common/openshift-cluster/upgrades/disable_node_excluders.yml b/playbooks/common/openshift-cluster/upgrades/disable_node_excluders.yml
index 7988e97ab..a66301c0d 100644
--- a/playbooks/common/openshift-cluster/upgrades/disable_node_excluders.yml
+++ b/playbooks/common/openshift-cluster/upgrades/disable_node_excluders.yml
@@ -1,6 +1,6 @@
---
- name: Disable excluders
- hosts: oo_nodes_to_config
+ hosts: oo_nodes_to_upgrade:!oo_masters_to_config
gather_facts: no
roles:
- role: openshift_excluder
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml b/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml
index b7fd2c0c5..616ba04f8 100644
--- a/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml
+++ b/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml
@@ -3,12 +3,12 @@
hosts: oo_etcd_hosts_to_backup
roles:
- role: openshift_facts
- - role: etcd_upgrade
- r_etcd_upgrade_action: backup
- r_etcd_backup_tag: etcd_backup_tag
+ - role: etcd_common
+ r_etcd_common_action: backup
+ r_etcd_common_backup_tag: etcd_backup_tag
r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
- r_etcd_upgrade_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"
- r_etcd_backup_sufix_name: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}"
+ r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"
+ r_etcd_common_backup_sufix_name: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}"
- name: Gate on etcd backup
hosts: localhost
@@ -18,7 +18,7 @@
- set_fact:
etcd_backup_completed: "{{ hostvars
| oo_select_keys(groups.oo_etcd_hosts_to_backup)
- | oo_collect('inventory_hostname', {'r_etcd_upgrade_backup_complete': true}) }}"
+ | oo_collect('inventory_hostname', {'r_etcd_common_backup_complete': true}) }}"
- set_fact:
etcd_backup_failed: "{{ groups.oo_etcd_hosts_to_backup | difference(etcd_backup_completed) }}"
- fail:
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/main.yml b/playbooks/common/openshift-cluster/upgrades/etcd/main.yml
index 3e01883ae..64abc54e7 100644
--- a/playbooks/common/openshift-cluster/upgrades/etcd/main.yml
+++ b/playbooks/common/openshift-cluster/upgrades/etcd/main.yml
@@ -16,7 +16,8 @@
tasks:
- include_role:
name: etcd_common
- tasks_from: etcdctl.yml
+ vars:
+ r_etcd_common_action: drop_etcdctl
- name: Perform etcd upgrade
include: ./upgrade.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml
index 4cf434dab..d9ddf3860 100644
--- a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml
@@ -9,7 +9,7 @@
replace ( '${version}', openshift_image_tag ) }}"
router_image: "{{ openshift.master.registry_url | replace( '${component}', 'haproxy-router' ) |
replace ( '${version}', openshift_image_tag ) }}"
- registry_console_image: "{{ openshift.master.registry_url | replace ( '${component}', 'registry-console') |
+ registry_console_image: "{{ openshift.master.registry_url | regex_replace ( '(origin|ose)-\\${component}', 'registry-console') |
replace ( '${version}', 'v' ~ openshift.common.short_version ) }}"
pre_tasks:
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml
index 7646e0fa6..9d8b73cff 100644
--- a/playbooks/common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml
+++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml
@@ -1,23 +1,20 @@
---
-- name: Verify docker upgrade targets
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
- tasks:
- # Only check if docker upgrade is required if docker_upgrade is not
- # already set to False.
- - include: ../docker/upgrade_check.yml
- when: docker_upgrade is not defined or docker_upgrade | bool and not openshift.common.is_atomic | bool
+# Only check if docker upgrade is required if docker_upgrade is not
+# already set to False.
+- include: ../docker/upgrade_check.yml
+ when: docker_upgrade is not defined or docker_upgrade | bool and not openshift.common.is_atomic | bool
- # Additional checks for Atomic hosts:
+# Additional checks for Atomic hosts:
- - name: Determine available Docker
- shell: "rpm -q --queryformat '---\ncurr_version: %{VERSION}\navail_version: \n' docker"
- register: g_atomic_docker_version_result
- when: openshift.common.is_atomic | bool
+- name: Determine available Docker
+ shell: "rpm -q --queryformat '---\ncurr_version: %{VERSION}\navail_version: \n' docker"
+ register: g_atomic_docker_version_result
+ when: openshift.common.is_atomic | bool
- - set_fact:
- l_docker_version: "{{ g_atomic_docker_version_result.stdout | from_yaml }}"
- when: openshift.common.is_atomic | bool
+- set_fact:
+ l_docker_version: "{{ g_atomic_docker_version_result.stdout | from_yaml }}"
+ when: openshift.common.is_atomic | bool
- - fail:
- msg: This playbook requires access to Docker 1.12 or later
- when: openshift.common.is_atomic | bool and l_docker_version.avail_version | default(l_docker_version.curr_version, true) | version_compare('1.12','<')
+- fail:
+ msg: This playbook requires access to Docker 1.12 or later
+ when: openshift.common.is_atomic | bool and l_docker_version.avail_version | default(l_docker_version.curr_version, true) | version_compare('1.12','<')
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
index 6a9f88707..9b4a8e413 100644
--- a/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
+++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
@@ -1,45 +1,43 @@
---
-- name: Verify upgrade targets
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
+- name: Fail when OpenShift is not installed
+ fail:
+ msg: Verify OpenShift is already installed
+ when: openshift.common.version is not defined
- tasks:
- - name: Fail when OpenShift is not installed
- fail:
- msg: Verify OpenShift is already installed
- when: openshift.common.version is not defined
-
- - name: Verify containers are available for upgrade
- command: >
- docker pull {{ openshift.common.cli_image }}:{{ openshift_image_tag }}
- register: pull_result
- changed_when: "'Downloaded newer image' in pull_result.stdout"
- when: openshift.common.is_containerized | bool
+- name: Verify containers are available for upgrade
+ command: >
+ docker pull {{ openshift.common.cli_image }}:{{ openshift_image_tag }}
+ register: pull_result
+ changed_when: "'Downloaded newer image' in pull_result.stdout"
+ when: openshift.common.is_containerized | bool
- - when: not openshift.common.is_containerized | bool
- block:
- - name: Check latest available OpenShift RPM version
- command: >
- {{ repoquery_cmd }} --qf '%{version}' "{{ openshift.common.service_type }}"
- failed_when: false
- changed_when: false
- register: avail_openshift_version
+- when: not openshift.common.is_containerized | bool
+ block:
+ - name: Check latest available OpenShift RPM version
+ repoquery:
+ name: "{{ openshift.common.service_type }}"
+ ignore_excluders: true
+ register: repoquery_out
- - name: Fail when unable to determine available OpenShift RPM version
- fail:
- msg: "Unable to determine available OpenShift RPM version"
- when:
- - avail_openshift_version.stdout == ''
+ - name: Fail when unable to determine available OpenShift RPM version
+ fail:
+ msg: "Unable to determine available OpenShift RPM version"
+ when:
+ - not repoquery_out.results.package_found
- - name: Verify OpenShift RPMs are available for upgrade
- fail:
- msg: "OpenShift {{ avail_openshift_version.stdout }} is available, but {{ openshift_upgrade_target }} or greater is required"
- when:
- - not avail_openshift_version | skipped
- - avail_openshift_version.stdout | default('0.0', True) | version_compare(openshift_release, '<')
+ - name: Set fact avail_openshift_version
+ set_fact:
+ avail_openshift_version: "{{ repoquery_out.results.versions.available_versions.0 }}"
- - name: Fail when openshift version does not meet minium requirement for Origin upgrade
+ - name: Verify OpenShift RPMs are available for upgrade
fail:
- msg: "This upgrade playbook must be run against OpenShift {{ openshift_upgrade_min }} or later"
+ msg: "OpenShift {{ avail_openshift_version }} is available, but {{ openshift_upgrade_target }} or greater is required"
when:
- - deployment_type == 'origin'
- - openshift.common.version | version_compare(openshift_upgrade_min,'<')
+ - avail_openshift_version | default('0.0', True) | version_compare(openshift_release, '<')
+
+- name: Fail when openshift version does not meet minium requirement for Origin upgrade
+ fail:
+ msg: "This upgrade playbook must be run against OpenShift {{ openshift_upgrade_min }} or later"
+ when:
+ - deployment_type == 'origin'
+ - openshift.common.version | version_compare(openshift_upgrade_min,'<')
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
index e10c4c540..b980909eb 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
@@ -247,8 +247,8 @@
hosts: oo_masters_to_config:&oo_nodes_to_upgrade
# This var must be set with -e on invocation, as it is not a per-host inventory var
# and is evaluated early. Values such as "20%" can also be used.
- serial: "{{ openshift_upgrade_nodes_serial | default(1) }}"
- any_errors_fatal: true
+ serial: "{{ openshift_upgrade_control_plane_nodes_serial | default(1) }}"
+ max_fail_percentage: "{{ openshift_upgrade_control_plane_nodes_max_fail_percentage | default(0) }}"
pre_tasks:
- name: Load lib_openshift modules
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
index 4d455fe0a..91dbc2cd4 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
@@ -4,7 +4,7 @@
# This var must be set with -e on invocation, as it is not a per-host inventory var
# and is evaluated early. Values such as "20%" can also be used.
serial: "{{ openshift_upgrade_nodes_serial | default(1) }}"
- any_errors_fatal: true
+ max_fail_percentage: "{{ openshift_upgrade_nodes_max_fail_percentage | default(0) }}"
pre_tasks:
- name: Load lib_openshift modules
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade.yml
index d81a13ef2..f1245aa2e 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade.yml
@@ -78,11 +78,17 @@
tags:
- pre_upgrade
-- include: ../pre/verify_upgrade_targets.yml
+- name: Verify upgrade targets
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade
+ tasks:
+ - include: ../pre/verify_upgrade_targets.yml
tags:
- pre_upgrade
-- include: ../pre/verify_docker_upgrade_targets.yml
+- name: Verify docker upgrade targets
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
+ tasks:
+ - include: ../pre/verify_docker_upgrade_targets.yml
tags:
- pre_upgrade
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml
index 8a692d02b..b693ab55c 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml
@@ -82,11 +82,17 @@
tags:
- pre_upgrade
-- include: ../pre/verify_upgrade_targets.yml
+- name: Verify upgrade targets
+ hosts: oo_masters_to_config
+ tasks:
+ - include: ../pre/verify_upgrade_targets.yml
tags:
- pre_upgrade
-- include: ../pre/verify_docker_upgrade_targets.yml
+- name: Verify docker upgrade targets
+ hosts: oo_masters_to_config:oo_etcd_to_config
+ tasks:
+ - include: ../pre/verify_docker_upgrade_targets.yml
tags:
- pre_upgrade
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml
index 2d30bba94..4fd029107 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml
@@ -79,11 +79,17 @@
tags:
- pre_upgrade
-- include: ../pre/verify_upgrade_targets.yml
+- name: Verify upgrade targets
+ hosts: oo_nodes_to_upgrade
+ tasks:
+ - include: ../pre/verify_upgrade_targets.yml
tags:
- pre_upgrade
-- include: ../pre/verify_docker_upgrade_targets.yml
+- name: Verify docker upgrade targets
+ hosts: oo_nodes_to_upgrade
+ tasks:
+ - include: ../pre/verify_docker_upgrade_targets.yml
tags:
- pre_upgrade
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade.yml
index e9ff47f32..965e39482 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade.yml
@@ -78,11 +78,17 @@
tags:
- pre_upgrade
-- include: ../pre/verify_upgrade_targets.yml
+- name: Verify upgrade targets
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade
+ tasks:
+ - include: ../pre/verify_upgrade_targets.yml
tags:
- pre_upgrade
-- include: ../pre/verify_docker_upgrade_targets.yml
+- name: Verify docker upgrade targets
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
+ tasks:
+ - include: ../pre/verify_docker_upgrade_targets.yml
tags:
- pre_upgrade
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml
index d4ae8d8b4..7830f462c 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml
@@ -82,11 +82,17 @@
tags:
- pre_upgrade
-- include: ../pre/verify_upgrade_targets.yml
+- name: Verify upgrade targets
+ hosts: oo_masters_to_config
+ tasks:
+ - include: ../pre/verify_upgrade_targets.yml
tags:
- pre_upgrade
-- include: ../pre/verify_docker_upgrade_targets.yml
+- name: Verify docker upgrade targets
+ hosts: oo_masters_to_config:oo_etcd_to_config
+ tasks:
+ - include: ../pre/verify_docker_upgrade_targets.yml
tags:
- pre_upgrade
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml
index ae205b172..4364ff8e3 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml
@@ -79,11 +79,17 @@
tags:
- pre_upgrade
-- include: ../pre/verify_upgrade_targets.yml
+- name: Verify upgrade targets
+ hosts: oo_nodes_to_upgrade
+ tasks:
+ - include: ../pre/verify_upgrade_targets.yml
tags:
- pre_upgrade
-- include: ../pre/verify_docker_upgrade_targets.yml
+- name: Verify docker upgrade targets
+ hosts: oo_nodes_to_upgrade
+ tasks:
+ - include: ../pre/verify_docker_upgrade_targets.yml
tags:
- pre_upgrade
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/master_config_upgrade.yml
new file mode 100644
index 000000000..ed89dbe8d
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_5/master_config_upgrade.yml
@@ -0,0 +1,16 @@
+---
+- modify_yaml:
+ dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
+ yaml_key: 'admissionConfig.pluginConfig'
+ yaml_value: "{{ openshift.master.admission_plugin_config }}"
+ when: "'admission_plugin_config' in openshift.master"
+
+- modify_yaml:
+ dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
+ yaml_key: 'admissionConfig.pluginOrderOverride'
+ yaml_value:
+
+- modify_yaml:
+ dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
+ yaml_key: 'kubernetesMasterConfig.admissionConfig'
+ yaml_value:
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml
index 1269634d1..e63b03e51 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml
@@ -78,11 +78,17 @@
tags:
- pre_upgrade
-- include: ../pre/verify_upgrade_targets.yml
+- name: Verify upgrade targets
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade
+ tasks:
+ - include: ../pre/verify_upgrade_targets.yml
tags:
- pre_upgrade
-- include: ../pre/verify_docker_upgrade_targets.yml
+- name: Verify docker upgrade targets
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
+ tasks:
+ - include: ../pre/verify_docker_upgrade_targets.yml
tags:
- pre_upgrade
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml
index 21c075678..74c2964aa 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml
@@ -82,11 +82,17 @@
tags:
- pre_upgrade
-- include: ../pre/verify_upgrade_targets.yml
+- name: Verify upgrade targets
+ hosts: oo_masters_to_config
+ tasks:
+ - include: ../pre/verify_upgrade_targets.yml
tags:
- pre_upgrade
-- include: ../pre/verify_docker_upgrade_targets.yml
+- name: Verify docker upgrade targets
+ hosts: oo_masters_to_config:oo_etcd_to_config
+ tasks:
+ - include: ../pre/verify_docker_upgrade_targets.yml
tags:
- pre_upgrade
@@ -109,6 +115,8 @@
- include: ../cleanup_unused_images.yml
- include: ../upgrade_control_plane.yml
+ vars:
+ master_config_hook: "v3_5/master_config_upgrade.yml"
- include: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml
index e67e169fc..036d3fcf5 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml
@@ -79,11 +79,17 @@
tags:
- pre_upgrade
-- include: ../pre/verify_upgrade_targets.yml
+- name: Verify upgrade targets
+ hosts: oo_nodes_to_upgrade
+ tasks:
+ - include: ../pre/verify_upgrade_targets.yml
tags:
- pre_upgrade
-- include: ../pre/verify_docker_upgrade_targets.yml
+- name: Verify docker upgrade targets
+ hosts: oo_nodes_to_upgrade
+ tasks:
+ - include: ../pre/verify_docker_upgrade_targets.yml
tags:
- pre_upgrade
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/master_config_upgrade.yml
new file mode 100644
index 000000000..ed89dbe8d
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_6/master_config_upgrade.yml
@@ -0,0 +1,16 @@
+---
+- modify_yaml:
+ dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
+ yaml_key: 'admissionConfig.pluginConfig'
+ yaml_value: "{{ openshift.master.admission_plugin_config }}"
+ when: "'admission_plugin_config' in openshift.master"
+
+- modify_yaml:
+ dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
+ yaml_key: 'admissionConfig.pluginOrderOverride'
+ yaml_value:
+
+- modify_yaml:
+ dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
+ yaml_key: 'kubernetesMasterConfig.admissionConfig'
+ yaml_value:
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml
index a1b1f3301..5d41b84d0 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml
@@ -78,11 +78,17 @@
tags:
- pre_upgrade
-- include: ../pre/verify_upgrade_targets.yml
+- name: Verify upgrade targets
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade
+ tasks:
+ - include: ../pre/verify_upgrade_targets.yml
tags:
- pre_upgrade
-- include: ../pre/verify_docker_upgrade_targets.yml
+- name: Verify docker upgrade targets
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
+ tasks:
+ - include: ../pre/verify_docker_upgrade_targets.yml
tags:
- pre_upgrade
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
index af6e1f71b..a66fb51ff 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
@@ -82,11 +82,17 @@
tags:
- pre_upgrade
-- include: ../pre/verify_upgrade_targets.yml
+- name: Verify upgrade targets
+ hosts: oo_masters_to_config
+ tasks:
+ - include: ../pre/verify_upgrade_targets.yml
tags:
- pre_upgrade
-- include: ../pre/verify_docker_upgrade_targets.yml
+- name: Verify docker upgrade targets
+ hosts: oo_masters_to_config:oo_etcd_to_config
+ tasks:
+ - include: ../pre/verify_docker_upgrade_targets.yml
tags:
- pre_upgrade
@@ -109,6 +115,8 @@
- include: ../cleanup_unused_images.yml
- include: ../upgrade_control_plane.yml
+ vars:
+ master_config_hook: "v3_6/master_config_upgrade.yml"
- include: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml
index 285c18b7b..25eceaf90 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml
@@ -79,11 +79,17 @@
tags:
- pre_upgrade
-- include: ../pre/verify_upgrade_targets.yml
+- name: Verify upgrade targets
+ hosts: oo_nodes_to_upgrade
+ tasks:
+ - include: ../pre/verify_upgrade_targets.yml
tags:
- pre_upgrade
-- include: ../pre/verify_docker_upgrade_targets.yml
+- name: Verify docker upgrade targets
+ hosts: oo_nodes_to_upgrade
+ tasks:
+ - include: ../pre/verify_docker_upgrade_targets.yml
tags:
- pre_upgrade
diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml
index 60cf56108..ddc4db8f8 100644
--- a/playbooks/common/openshift-master/config.yml
+++ b/playbooks/common/openshift-master/config.yml
@@ -117,6 +117,7 @@
| oo_collect('openshift.common.hostname')
| default(none, true) }}"
openshift_master_hosts: "{{ groups.oo_masters_to_config }}"
+ r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
etcd_cert_subdir: "openshift-master-{{ openshift.common.hostname }}"
etcd_cert_config_dir: "{{ openshift.common.config_base }}/master"
diff --git a/playbooks/common/openshift-node/config.yml b/playbooks/common/openshift-node/config.yml
index 792ffb4e2..acebabc91 100644
--- a/playbooks/common/openshift-node/config.yml
+++ b/playbooks/common/openshift-node/config.yml
@@ -32,7 +32,7 @@
ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
ansible_become: "{{ g_sudo | default(omit) }}"
with_items: "{{ groups.oo_nodes_to_config | default([]) }}"
- when: hostvars[item].openshift.common is defined and hostvars[item].openshift.common.is_containerized | bool and (item in groups.oo_nodes_to_config and item in groups.oo_masters_to_config)
+ when: hostvars[item].openshift is defined and hostvars[item].openshift.common is defined and hostvars[item].openshift.common.is_containerized | bool and (item in groups.oo_nodes_to_config and item in groups.oo_masters_to_config)
changed_when: False
- name: Configure containerized nodes
diff --git a/playbooks/libvirt/openshift-cluster/config.yml b/playbooks/libvirt/openshift-cluster/config.yml
index f782d6dab..477213f4e 100644
--- a/playbooks/libvirt/openshift-cluster/config.yml
+++ b/playbooks/libvirt/openshift-cluster/config.yml
@@ -3,6 +3,8 @@
# is localhost, so no hostname value (or public_hostname) value is getting
# assigned
+- include: ../../common/openshift-cluster/std_include.yml
+
- hosts: localhost
gather_facts: no
tasks:
diff --git a/requirements.txt b/requirements.txt
index 734ee6201..dae460713 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -7,3 +7,4 @@ pyOpenSSL==16.2.0
# We need to disable ruamel.yaml for now because of test failures
#ruamel.yaml
six==1.10.0
+passlib==1.6.5
diff --git a/roles/calico/defaults/main.yaml b/roles/calico/defaults/main.yaml
index 03c612982..207dee068 100644
--- a/roles/calico/defaults/main.yaml
+++ b/roles/calico/defaults/main.yaml
@@ -1,20 +1,15 @@
---
kubeconfig: "{{openshift.common.config_base}}/node/{{ 'system:node:' + openshift.common.hostname }}.kubeconfig"
-etcd_endpoints: "{{ hostvars[groups.oo_first_master.0].openshift.master.etcd_urls | join(',') }}"
cni_conf_dir: "/etc/cni/net.d/"
cni_bin_dir: "/opt/cni/bin/"
-cni_url: "https://github.com/containernetworking/cni/releases/download/v0.4.0/cni-amd64-v0.4.0.tgz"
+cni_url: "https://github.com/containernetworking/cni/releases/download/v0.5.2/cni-amd64-v0.5.2.tgz"
-calico_etcd_ca_cert_file: "/etc/origin/calico/calico.etcd-ca.crt"
-calico_etcd_cert_file: "/etc/origin/calico/calico.etcd-client.crt"
-calico_etcd_key_file: "/etc/origin/calico/calico.etcd-client.key"
-
-calico_url_cni: "https://github.com/projectcalico/cni-plugin/releases/download/v1.5.5/calico"
-calico_url_ipam: "https://github.com/projectcalico/cni-plugin/releases/download/v1.5.5/calico-ipam"
+calico_url_cni: "https://github.com/projectcalico/cni-plugin/releases/download/v1.8.3/calico"
+calico_url_ipam: "https://github.com/projectcalico/cni-plugin/releases/download/v1.8.3/calico-ipam"
calico_ipv4pool_ipip: "always"
calico_ipv4pool_cidr: "192.168.0.0/16"
calico_log_dir: "/var/log/calico"
-calico_node_image: "calico/node:v1.1.0"
+calico_node_image: "calico/node:v1.2.1"
diff --git a/roles/calico/tasks/gen_certs.yml b/roles/calico/tasks/gen_certs.yml
new file mode 100644
index 000000000..2e6aa114e
--- /dev/null
+++ b/roles/calico/tasks/gen_certs.yml
@@ -0,0 +1,17 @@
+---
+- name: Calico Node | Generate OpenShift-etcd certs
+ include: ../../../roles/etcd_client_certificates/tasks/main.yml
+ vars:
+ etcd_cert_prefix: calico.etcd-
+ etcd_cert_config_dir: "{{ openshift.common.config_base }}/calico"
+ embedded_etcd: "{{ hostvars[groups.oo_first_master.0].openshift.master.embedded_etcd }}"
+ etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
+ etcd_cert_subdir: "openshift-calico-{{ openshift.common.hostname }}"
+
+- name: Calico Node | Set etcd cert location facts
+ set_fact:
+ calico_etcd_ca_cert_file: "/etc/origin/calico/calico.etcd-ca.crt"
+ calico_etcd_cert_file: "/etc/origin/calico/calico.etcd-client.crt"
+ calico_etcd_key_file: "/etc/origin/calico/calico.etcd-client.key"
+ calico_etcd_endpoints: "{{ hostvars[groups.oo_first_master.0].openshift.master.etcd_urls | join(',') }}"
+ calico_etcd_cert_dir: "/etc/origin/calico/"
diff --git a/roles/calico/tasks/main.yml b/roles/calico/tasks/main.yml
index fa5e338b3..8a7a61dc9 100644
--- a/roles/calico/tasks/main.yml
+++ b/roles/calico/tasks/main.yml
@@ -1,19 +1,36 @@
---
-- include: ../../../roles/etcd_client_certificates/tasks/main.yml
- vars:
- etcd_cert_prefix: calico.etcd-
- etcd_cert_config_dir: "{{ openshift.common.config_base }}/calico"
- embedded_etcd: "{{ hostvars[groups.oo_first_master.0].openshift.master.embedded_etcd }}"
- etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
- etcd_cert_subdir: "openshift-calico-{{ openshift.common.hostname }}"
+- name: Calico Node | Error if invalid cert arguments
+ fail:
+ msg: "Must provide all or none for the following etcd params: calico_etcd_cert_dir, calico_etcd_ca_cert_file, calico_etcd_cert_file, calico_etcd_key_file, calico_etcd_endpoints"
+ when: (calico_etcd_cert_dir is defined or calico_etcd_ca_cert_file is defined or calico_etcd_cert_file is defined or calico_etcd_key_file is defined or calico_etcd_endpoints is defined) and not (calico_etcd_cert_dir is defined and calico_etcd_ca_cert_file is defined and calico_etcd_cert_file is defined and calico_etcd_key_file is defined and calico_etcd_endpoints is defined)
-- name: Calico Node | Assure the calico certs have been generated
+- name: Calico Node | Generate certs if not provided
+ include: gen_certs.yml
+ when: item is not defined
+ with_items:
+ - calico_etcd_ca_cert_file
+ - calico_etcd_cert_file
+ - calico_etcd_key_file
+ - calico_etcd_endpoints
+ - calico_etcd_cert_dir
+
+- name: Calico Node | Error if no certs set.
+ fail:
+ msg: "Invalid etcd configuration for calico."
+ when: item is not defined or item == ''
+ with_items:
+ - calico_etcd_ca_cert_file
+ - calico_etcd_cert_file
+ - calico_etcd_key_file
+ - calico_etcd_endpoints
+
+- name: Calico Node | Assure the calico certs are present
stat:
path: "{{ item }}"
with_items:
- - "{{ calico_etcd_ca_cert_file }}"
- - "{{ calico_etcd_cert_file}}"
- - "{{ calico_etcd_key_file }}"
+ - "{{ calico_etcd_ca_cert_file }}"
+ - "{{ calico_etcd_cert_file}}"
+ - "{{ calico_etcd_key_file }}"
- name: Calico Node | Configure Calico service unit file
template:
diff --git a/roles/calico/templates/10-calico.conf.j2 b/roles/calico/templates/10-calico.conf.j2
index 3c8c6b046..1ec569cff 100644
--- a/roles/calico/templates/10-calico.conf.j2
+++ b/roles/calico/templates/10-calico.conf.j2
@@ -4,7 +4,7 @@
"ipam": {
"type": "calico-ipam"
},
- "etcd_endpoints": "{{ etcd_endpoints }}",
+ "etcd_endpoints": "{{ calico_etcd_endpoints }}",
"etcd_key_file": "{{ calico_etcd_key_file }}",
"etcd_cert_file": "{{ calico_etcd_cert_file }}",
"etcd_ca_cert_file": "{{ calico_etcd_ca_cert_file }}",
diff --git a/roles/calico/templates/calico.service.j2 b/roles/calico/templates/calico.service.j2
index 719d7ba0d..302c5f34e 100644
--- a/roles/calico/templates/calico.service.j2
+++ b/roles/calico/templates/calico.service.j2
@@ -13,8 +13,8 @@ ExecStart=/usr/bin/docker run --net=host --privileged \
-e CALICO_IPV4POOL_IPIP={{ calico_ipv4pool_ipip }} \
-e CALICO_IPV4POOL_CIDR={{ calico_ipv4pool_cidr }} \
-e FELIX_IPV6SUPPORT=false \
- -e ETCD_ENDPOINTS={{ etcd_endpoints }} \
- -v /etc/origin/calico:/etc/origin/calico \
+ -e ETCD_ENDPOINTS={{ calico_etcd_endpoints }} \
+ -v {{ calico_etcd_cert_dir }}:{{ calico_etcd_cert_dir }} \
-e ETCD_CA_CERT_FILE={{ calico_etcd_ca_cert_file }} \
-e ETCD_CERT_FILE={{ calico_etcd_cert_file }} \
-e ETCD_KEY_FILE={{ calico_etcd_key_file }} \
diff --git a/roles/calico/templates/calicoctl.cfg.j2 b/roles/calico/templates/calicoctl.cfg.j2
index 722385ed8..a00ea27dc 100644
--- a/roles/calico/templates/calicoctl.cfg.j2
+++ b/roles/calico/templates/calicoctl.cfg.j2
@@ -3,7 +3,7 @@ kind: calicoApiConfig
metadata:
spec:
datastoreType: "etcdv2"
- etcdEndpoints: "{{ etcd_endpoints }}"
+ etcdEndpoints: "{{ calico_etcd_endpoints }}"
etcdKeyFile: "{{ calico_etcd_key_file }}"
etcdCertFile: "{{ calico_etcd_cert_file }}"
etcdCaCertFile: "{{ calico_etcd_ca_cert_file }}"
diff --git a/roles/calico_master/defaults/main.yaml b/roles/calico_master/defaults/main.yaml
index 5b324bce5..b2df0105f 100644
--- a/roles/calico_master/defaults/main.yaml
+++ b/roles/calico_master/defaults/main.yaml
@@ -4,3 +4,4 @@ kubeconfig: "{{ openshift.common.config_base }}/master/openshift-master.kubeconf
calicoctl_bin_dir: "/usr/local/bin/"
calico_url_calicoctl: "https://github.com/projectcalico/calicoctl/releases/download/v1.1.3/calicoctl"
+calico_url_policy_controller: "quay.io/calico/kube-policy-controller:v0.5.4"
diff --git a/roles/calico_master/templates/calico-policy-controller.yml.j2 b/roles/calico_master/templates/calico-policy-controller.yml.j2
index 3fb1abf0d..811884473 100644
--- a/roles/calico_master/templates/calico-policy-controller.yml.j2
+++ b/roles/calico_master/templates/calico-policy-controller.yml.j2
@@ -74,11 +74,11 @@ spec:
serviceAccountName: calico
containers:
- name: calico-policy-controller
- image: quay.io/calico/kube-policy-controller:v0.5.4
+ image: {{ calico_url_policy_controller }}
env:
# The location of the Calico etcd cluster.
- name: ETCD_ENDPOINTS
- value: {{ etcd_endpoints }}
+ value: {{ calico_etcd_endpoints }}
# Location of the CA certificate for etcd.
- name: ETCD_CA_CERT_FILE
value: {{ calico_etcd_ca_cert_file }}
@@ -96,10 +96,10 @@ spec:
volumeMounts:
# Mount in the etcd TLS secrets.
- name: certs
- mountPath: /etc/origin/calico
+ mountPath: {{ calico_etcd_cert_dir }}
volumes:
# Mount in the etcd TLS secrets.
- name: certs
hostPath:
- path: /etc/origin/calico
+ path: {{ calico_etcd_cert_dir }}
diff --git a/roles/docker/README.md b/roles/docker/README.md
index 4a9f21f22..19908c036 100644
--- a/roles/docker/README.md
+++ b/roles/docker/README.md
@@ -3,7 +3,7 @@ Docker
Ensures docker package or system container is installed, and optionally raises timeout for systemd-udevd.service to 5 minutes.
-daemon.json items may be found at https://docs.docker.com/engine/reference/commandline/dockerd/#daemon-configuration-file
+container-daemon.json items may be found at https://docs.docker.com/engine/reference/commandline/dockerd/#daemon-configuration-file
Requirements
------------
diff --git a/roles/docker/tasks/package_docker.yml b/roles/docker/tasks/package_docker.yml
index e101730d2..c82d8659a 100644
--- a/roles/docker/tasks/package_docker.yml
+++ b/roles/docker/tasks/package_docker.yml
@@ -102,6 +102,21 @@
notify:
- restart docker
+- stat: path=/etc/sysconfig/docker-network
+ register: sysconfig_docker_network_check
+
+- name: Configure Docker Network OPTIONS
+ lineinfile:
+ dest: /etc/sysconfig/docker-network
+ regexp: '^DOCKER_NETWORK_OPTIONS=.*$'
+ line: "DOCKER_NETWORK_OPTIONS='\
+ {% if openshift.node is defined and openshift.node.sdn_mtu is defined %} --mtu={{ openshift.node.sdn_mtu }}{% endif %}'"
+ when:
+ - sysconfig_docker_network_check.stat.isreg is defined
+ - sysconfig_docker_network_check.stat.isreg
+ notify:
+ - restart docker
+
- name: Start the Docker service
systemd:
name: docker
diff --git a/roles/docker/tasks/systemcontainer_docker.yml b/roles/docker/tasks/systemcontainer_docker.yml
index f0f5a40dd..650f06f86 100644
--- a/roles/docker/tasks/systemcontainer_docker.yml
+++ b/roles/docker/tasks/systemcontainer_docker.yml
@@ -130,8 +130,8 @@
dest: "{{ container_engine_systemd_dir }}/custom.conf"
src: systemcontainercustom.conf.j2
-# Set local versions of facts that must be in json format for daemon.json
-# NOTE: When jinja2.9+ is used the daemon.json file can move to using tojson
+# Set local versions of facts that must be in json format for container-daemon.json
+# NOTE: When jinja2.9+ is used the container-daemon.json file can move to using tojson
- set_fact:
l_docker_insecure_registries: "{{ docker_insecure_registries | default([]) | to_json }}"
l_docker_log_options: "{{ docker_log_options | default({}) | to_json }}"
@@ -139,10 +139,12 @@
l_docker_blocked_registries: "{{ docker_blocked_registries | default([]) | to_json }}"
l_docker_selinux_enabled: "{{ docker_selinux_enabled | default(true) | to_json }}"
-# Configure container-engine using the daemon.json file
+# Configure container-engine using the container-daemon.json file
+# NOTE: daemon.json and container-daemon.json have been seperated to avoid
+# collision.
- name: Configure Container Engine
template:
- dest: "{{ docker_conf_dir }}/daemon.json"
+ dest: "{{ docker_conf_dir }}/container-daemon.json"
src: daemon.json
# Enable and start the container-engine service
diff --git a/roles/etcd/tasks/main.yml b/roles/etcd/tasks/main.yml
index fa2f44609..586aebb11 100644
--- a/roles/etcd/tasks/main.yml
+++ b/roles/etcd/tasks/main.yml
@@ -122,7 +122,8 @@
- include_role:
name: etcd_common
- tasks_from: etcdctl.yml
+ vars:
+ r_etcd_common_action: drop_etcdctl
when: openshift_etcd_etcdctl_profile | default(true) | bool
- name: Set fact etcd_service_status_changed
diff --git a/roles/etcd/tasks/system_container.yml b/roles/etcd/tasks/system_container.yml
index 72ffadbd2..f1d948d16 100644
--- a/roles/etcd/tasks/system_container.yml
+++ b/roles/etcd/tasks/system_container.yml
@@ -15,6 +15,56 @@
{%- endif -%}
{% endfor -%}
+- name: Check etcd system container package
+ command: >
+ atomic containers list --no-trunc -a -f container=etcd -f backend=ostree
+ register: etcd_result
+
+- name: Unmask etcd service
+ systemd:
+ name: etcd
+ state: stopped
+ enabled: yes
+ masked: no
+ daemon_reload: yes
+ register: task_result
+ failed_when: task_result|failed and 'could not' not in task_result.msg|lower
+ when: "'etcd' in etcd_result.stdout"
+
+- name: Disable etcd_container
+ systemd:
+ name: etcd_container
+ state: stopped
+ enabled: no
+ masked: yes
+ daemon_reload: yes
+ register: task_result
+ failed_when: task_result|failed and 'could not' not in task_result.msg|lower
+
+- name: Check for previous etcd data store
+ stat:
+ path: "{{ etcd_data_dir }}/member/"
+ register: src_datastore
+
+- name: Check for etcd system container data store
+ stat:
+ path: "{{ r_etcd_common_system_container_host_dir }}/etcd.etcd/member"
+ register: dest_datastore
+
+- name: Ensure that etcd system container data dirs exist
+ file: path="{{ item }}" state=directory
+ with_items:
+ - "{{ r_etcd_common_system_container_host_dir }}/etc"
+ - "{{ r_etcd_common_system_container_host_dir }}/etcd.etcd"
+
+- name: Copy etcd data store
+ command: >
+ cp -a {{ etcd_data_dir }}/member
+ {{ r_etcd_common_system_container_host_dir }}/etcd.etcd/member
+ when:
+ - src_datastore.stat.exists
+ - not dest_datastore.stat.exists
+
- name: Install or Update Etcd system container package
oc_atomic_container:
name: etcd
@@ -35,3 +85,5 @@
- ETCD_PEER_CA_FILE={{ etcd_system_container_conf_dir }}/ca.crt
- ETCD_PEER_CERT_FILE={{ etcd_system_container_conf_dir }}/peer.crt
- ETCD_PEER_KEY_FILE={{ etcd_system_container_conf_dir }}/peer.key
+ - ETCD_TRUSTED_CA_FILE={{ etcd_system_container_conf_dir }}/ca.crt
+ - ETCD_PEER_TRUSTED_CA_FILE={{ etcd_system_container_conf_dir }}/ca.crt
diff --git a/roles/etcd_common/defaults/main.yml b/roles/etcd_common/defaults/main.yml
index e1a080b34..8cc7a9c20 100644
--- a/roles/etcd_common/defaults/main.yml
+++ b/roles/etcd_common/defaults/main.yml
@@ -1,9 +1,21 @@
---
+# Default action when calling this role
+r_etcd_common_action: noop
+r_etcd_common_backup_tag: ''
+r_etcd_common_backup_sufix_name: ''
+
# runc, docker, host
r_etcd_common_etcd_runtime: "docker"
+r_etcd_common_embedded_etcd: false
+
+# etcd run on a host => use etcdctl command directly
+# etcd run as a docker container => use docker exec
+# etcd run as a runc container => use runc exec
+r_etcd_common_etcdctl_command: "{{ 'etcdctl' if r_etcd_common_etcd_runtime == 'host' or r_etcd_common_embedded_etcd | bool else 'docker exec etcd_container etcdctl' if r_etcd_common_etcd_runtime == 'docker' else 'runc exec etcd etcdctl' }}"
# etcd server vars
-etcd_conf_dir: "{{ '/etc/etcd' if r_etcd_common_etcd_runtime != 'runc' else '/var/lib/etcd/etcd.etcd/etc' }}"
+etcd_conf_dir: '/etc/etcd'
+r_etcd_common_system_container_host_dir: /var/lib/etcd/etcd.etcd
etcd_system_container_conf_dir: /var/lib/etcd/etc
etcd_conf_file: "{{ etcd_conf_dir }}/etcd.conf"
etcd_ca_file: "{{ etcd_conf_dir }}/ca.crt"
@@ -40,7 +52,7 @@ etcd_is_containerized: False
etcd_is_thirdparty: False
# etcd dir vars
-etcd_data_dir: /var/lib/etcd/
+etcd_data_dir: "{{ '/var/lib/origin/openshift.local.etcd' if r_etcd_common_embedded_etcd | bool else '/var/lib/etcd/' }}"
# etcd ports and protocols
etcd_client_port: 2379
diff --git a/roles/etcd_upgrade/tasks/backup.yml b/roles/etcd_common/tasks/backup.yml
index 1ea6fc59f..4a4832275 100644
--- a/roles/etcd_upgrade/tasks/backup.yml
+++ b/roles/etcd_common/tasks/backup.yml
@@ -1,15 +1,11 @@
---
-# INPUT r_etcd_backup_sufix_name
-# INPUT r_etcd_backup_tag
-# OUTPUT r_etcd_upgrade_backup_complete
- set_fact:
- # ORIGIN etcd_data_dir etcd_common.defaults
- l_etcd_backup_dir: "{{ etcd_data_dir }}/openshift-backup-{{ r_etcd_backup_tag | default('') }}{{ r_etcd_backup_sufix_name }}"
+ l_etcd_backup_dir: "{{ etcd_data_dir }}/openshift-backup-{{ r_etcd_common_backup_tag }}{{ r_etcd_common_backup_sufix_name }}"
# TODO: replace shell module with command and update later checks
- name: Check available disk space for etcd backup
shell: df --output=avail -k {{ etcd_data_dir }} | tail -n 1
- register: avail_disk
+ register: l_avail_disk
# AUDIT:changed_when: `false` because we are only inspecting
# state, not manipulating anything
changed_when: false
@@ -17,8 +13,8 @@
# TODO: replace shell module with command and update later checks
- name: Check current etcd disk usage
shell: du --exclude='*openshift-backup*' -k {{ etcd_data_dir }} | tail -n 1 | cut -f1
- register: etcd_disk_usage
- when: r_etcd_upgrade_embedded_etcd | bool
+ register: l_etcd_disk_usage
+ when: r_etcd_common_embedded_etcd | bool
# AUDIT:changed_when: `false` because we are only inspecting
# state, not manipulating anything
changed_when: false
@@ -26,9 +22,9 @@
- name: Abort if insufficient disk space for etcd backup
fail:
msg: >
- {{ etcd_disk_usage.stdout }} Kb disk space required for etcd backup,
- {{ avail_disk.stdout }} Kb available.
- when: (r_etcd_upgrade_embedded_etcd | bool) and (etcd_disk_usage.stdout|int > avail_disk.stdout|int)
+ {{ l_etcd_disk_usage.stdout }} Kb disk space required for etcd backup,
+ {{ l_avail_disk.stdout }} Kb available.
+ when: (r_etcd_common_embedded_etcd | bool) and (l_etcd_disk_usage.stdout|int > l_avail_disk.stdout|int)
# For non containerized and non embedded we should have the correct version of
# etcd installed already. So don't do anything.
@@ -37,17 +33,22 @@
#
# For embedded non containerized we need to ensure we have the latest version
# etcd on the host.
+- name: Detecting Atomic Host Operating System
+ stat:
+ path: /run/ostree-booted
+ register: l_ostree_booted
+
- name: Install latest etcd for embedded
package:
name: etcd
state: latest
when:
- - r_etcd_upgrade_embedded_etcd | bool
+ - r_etcd_common_embedded_etcd | bool
- not l_ostree_booted.stat.exists | bool
- name: Generate etcd backup
command: >
- {{ etcdctl_command }} backup --data-dir={{ etcd_data_dir }}
+ {{ r_etcd_common_etcdctl_command }} backup --data-dir={{ etcd_data_dir }}
--backup-dir={{ l_etcd_backup_dir }}
# According to the docs change you can simply copy snap/db
@@ -55,16 +56,16 @@
- name: Check for v3 data store
stat:
path: "{{ etcd_data_dir }}/member/snap/db"
- register: v3_db
+ register: l_v3_db
- name: Copy etcd v3 data store
command: >
cp -a {{ etcd_data_dir }}/member/snap/db
{{ l_etcd_backup_dir }}/member/snap/
- when: v3_db.stat.exists
+ when: l_v3_db.stat.exists
- set_fact:
- r_etcd_upgrade_backup_complete: True
+ r_etcd_common_backup_complete: True
- name: Display location of etcd backup
debug:
diff --git a/roles/etcd_common/tasks/etcdctl.yml b/roles/etcd_common/tasks/drop_etcdctl.yml
index 6cb456677..6cb456677 100644
--- a/roles/etcd_common/tasks/etcdctl.yml
+++ b/roles/etcd_common/tasks/drop_etcdctl.yml
diff --git a/roles/etcd_common/tasks/main.yml b/roles/etcd_common/tasks/main.yml
new file mode 100644
index 000000000..6ed87e6c7
--- /dev/null
+++ b/roles/etcd_common/tasks/main.yml
@@ -0,0 +1,9 @@
+---
+- name: Fail if invalid r_etcd_common_action provided
+ fail:
+ msg: "etcd_common role can only be called with 'noop' or 'backup' or 'drop_etcdctl'"
+ when: r_etcd_common_action not in ['noop', 'backup', 'drop_etcdctl']
+
+- name: Include main action task file
+ include: "{{ r_etcd_common_action }}.yml"
+ when: r_etcd_common_action != "noop"
diff --git a/roles/etcd_server_certificates/tasks/main.yml b/roles/etcd_server_certificates/tasks/main.yml
index 3ac7f3401..4795188a6 100644
--- a/roles/etcd_server_certificates/tasks/main.yml
+++ b/roles/etcd_server_certificates/tasks/main.yml
@@ -5,11 +5,14 @@
- name: Check status of etcd certificates
stat:
- path: "{{ etcd_cert_config_dir }}/{{ item }}"
+ path: "{{ item }}"
with_items:
- - "{{ etcd_cert_prefix }}server.crt"
- - "{{ etcd_cert_prefix }}peer.crt"
- - "{{ etcd_cert_prefix }}ca.crt"
+ - "{{ etcd_cert_config_dir }}/{{ etcd_cert_prefix }}server.crt"
+ - "{{ etcd_cert_config_dir }}/{{ etcd_cert_prefix }}peer.crt"
+ - "{{ etcd_cert_config_dir }}/{{ etcd_cert_prefix }}ca.crt"
+ - "{{ etcd_system_container_cert_config_dir }}/{{ etcd_cert_prefix }}server.crt"
+ - "{{ etcd_system_container_cert_config_dir }}/{{ etcd_cert_prefix }}peer.crt"
+ - "{{ etcd_system_container_cert_config_dir }}/{{ etcd_cert_prefix }}ca.crt"
register: g_etcd_server_cert_stat_result
when: not etcd_certificates_redeploy | default(false) | bool
@@ -132,8 +135,11 @@
- name: Ensure certificate directory exists
file:
- path: "{{ etcd_cert_config_dir }}"
+ path: "{{ item }}"
state: directory
+ with_items:
+ - "{{ etcd_cert_config_dir }}"
+ - "{{ etcd_system_container_cert_config_dir }}"
when: etcd_server_certs_missing | bool
- name: Unarchive cert tarball
@@ -164,15 +170,28 @@
- name: Ensure ca directory exists
file:
- path: "{{ etcd_ca_dir }}"
+ path: "{{ item }}"
state: directory
+ with_items:
+ - "{{ etcd_ca_dir }}"
+ - "{{ etcd_system_container_cert_config_dir }}/ca"
when: etcd_server_certs_missing | bool
-- name: Unarchive etcd ca cert tarballs
+- name: Unarchive cert tarball for the system container
+ unarchive:
+ src: "{{ g_etcd_server_mktemp.stdout }}/{{ etcd_cert_subdir }}.tgz"
+ dest: "{{ etcd_system_container_cert_config_dir }}"
+ when:
+ - etcd_server_certs_missing | bool
+ - r_etcd_common_etcd_runtime == 'runc'
+
+- name: Unarchive etcd ca cert tarballs for the system container
unarchive:
src: "{{ g_etcd_server_mktemp.stdout }}/{{ etcd_ca_name }}.tgz"
- dest: "{{ etcd_ca_dir }}"
- when: etcd_server_certs_missing | bool
+ dest: "{{ etcd_system_container_cert_config_dir }}/ca"
+ when:
+ - etcd_server_certs_missing | bool
+ - r_etcd_common_etcd_runtime == 'runc'
- name: Delete temporary directory
local_action: file path="{{ g_etcd_server_mktemp.stdout }}" state=absent
diff --git a/roles/etcd_upgrade/defaults/main.yml b/roles/etcd_upgrade/defaults/main.yml
index 01ad8a268..61bbba225 100644
--- a/roles/etcd_upgrade/defaults/main.yml
+++ b/roles/etcd_upgrade/defaults/main.yml
@@ -1,9 +1,3 @@
---
r_etcd_upgrade_action: upgrade
r_etcd_upgrade_mechanism: rpm
-r_etcd_upgrade_embedded_etcd: False
-
-# etcd run on a host => use etcdctl command directly
-# etcd run as a docker container => use docker exec
-# etcd run as a runc container => use runc exec
-etcdctl_command: "{{ 'etcdctl' if r_etcd_common_etcd_runtime == 'host' or r_etcd_upgrade_embedded_etcd | bool else 'docker exec etcd_container etcdctl' if r_etcd_common_etcd_runtime == 'docker' else 'runc exec etcd etcdctl' }}"
diff --git a/roles/etcd_upgrade/meta/main.yml b/roles/etcd_upgrade/meta/main.yml
index 018bdc8d7..afdb0267f 100644
--- a/roles/etcd_upgrade/meta/main.yml
+++ b/roles/etcd_upgrade/meta/main.yml
@@ -14,3 +14,4 @@ galaxy_info:
- system
dependencies:
- role: etcd_common
+ r_etcd_common_embedded_etcd: "{{ r_etcd_upgrade_embedded_etcd }}"
diff --git a/roles/etcd_upgrade/tasks/main.yml b/roles/etcd_upgrade/tasks/main.yml
index 5178c14e3..129c69d6b 100644
--- a/roles/etcd_upgrade/tasks/main.yml
+++ b/roles/etcd_upgrade/tasks/main.yml
@@ -2,9 +2,9 @@
# INPUT r_etcd_upgrade_action
- name: Fail if invalid etcd_upgrade_action provided
fail:
- msg: "etcd_upgrade role can only be called with 'upgrade' or 'backup'"
+ msg: "etcd_upgrade role can only be called with 'upgrade'"
when:
- - r_etcd_upgrade_action not in ['upgrade', 'backup']
+ - r_etcd_upgrade_action not in ['upgrade']
- name: Detecting Atomic Host Operating System
stat:
diff --git a/roles/lib_openshift/library/oc_adm_ca_server_cert.py b/roles/lib_openshift/library/oc_adm_ca_server_cert.py
index 3974cc4dd..1b73bfd0e 100644
--- a/roles/lib_openshift/library/oc_adm_ca_server_cert.py
+++ b/roles/lib_openshift/library/oc_adm_ca_server_cert.py
@@ -1097,10 +1097,6 @@ class OpenShiftCLI(object):
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- rval = {}
- results = ''
- err = None
-
if self.verbose:
print(' '.join(cmds))
@@ -1110,34 +1106,26 @@ class OpenShiftCLI(object):
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
- "results": results,
"cmd": ' '.join(cmds)}
- if returncode == 0:
- if output:
- if output_type == 'json':
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as verr:
- if "No JSON object could be decoded" in verr.args:
- err = verr.args
- elif output_type == 'raw':
- rval['results'] = stdout
-
- if self.verbose:
- print("STDOUT: {0}".format(stdout))
- print("STDERR: {0}".format(stderr))
-
- if err:
- rval.update({"err": err,
- "stderr": stderr,
- "stdout": stdout,
- "cmd": cmds})
+ if output_type == 'json':
+ rval['results'] = {}
+ if output and stdout:
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ rval['err'] = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout if output else ''
- else:
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
- "stdout": stdout,
- "results": {}})
+ "stdout": stdout})
return rval
diff --git a/roles/lib_openshift/library/oc_adm_manage_node.py b/roles/lib_openshift/library/oc_adm_manage_node.py
index 320eac17e..b09321a5b 100644
--- a/roles/lib_openshift/library/oc_adm_manage_node.py
+++ b/roles/lib_openshift/library/oc_adm_manage_node.py
@@ -1083,10 +1083,6 @@ class OpenShiftCLI(object):
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- rval = {}
- results = ''
- err = None
-
if self.verbose:
print(' '.join(cmds))
@@ -1096,34 +1092,26 @@ class OpenShiftCLI(object):
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
- "results": results,
"cmd": ' '.join(cmds)}
- if returncode == 0:
- if output:
- if output_type == 'json':
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as verr:
- if "No JSON object could be decoded" in verr.args:
- err = verr.args
- elif output_type == 'raw':
- rval['results'] = stdout
-
- if self.verbose:
- print("STDOUT: {0}".format(stdout))
- print("STDERR: {0}".format(stderr))
-
- if err:
- rval.update({"err": err,
- "stderr": stderr,
- "stdout": stdout,
- "cmd": cmds})
+ if output_type == 'json':
+ rval['results'] = {}
+ if output and stdout:
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ rval['err'] = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout if output else ''
- else:
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
- "stdout": stdout,
- "results": {}})
+ "stdout": stdout})
return rval
diff --git a/roles/lib_openshift/library/oc_adm_policy_group.py b/roles/lib_openshift/library/oc_adm_policy_group.py
index f9658d6e1..221ef5094 100644
--- a/roles/lib_openshift/library/oc_adm_policy_group.py
+++ b/roles/lib_openshift/library/oc_adm_policy_group.py
@@ -1069,10 +1069,6 @@ class OpenShiftCLI(object):
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- rval = {}
- results = ''
- err = None
-
if self.verbose:
print(' '.join(cmds))
@@ -1082,34 +1078,26 @@ class OpenShiftCLI(object):
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
- "results": results,
"cmd": ' '.join(cmds)}
- if returncode == 0:
- if output:
- if output_type == 'json':
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as verr:
- if "No JSON object could be decoded" in verr.args:
- err = verr.args
- elif output_type == 'raw':
- rval['results'] = stdout
-
- if self.verbose:
- print("STDOUT: {0}".format(stdout))
- print("STDERR: {0}".format(stderr))
-
- if err:
- rval.update({"err": err,
- "stderr": stderr,
- "stdout": stdout,
- "cmd": cmds})
+ if output_type == 'json':
+ rval['results'] = {}
+ if output and stdout:
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ rval['err'] = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout if output else ''
- else:
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
- "stdout": stdout,
- "results": {}})
+ "stdout": stdout})
return rval
diff --git a/roles/lib_openshift/library/oc_adm_policy_user.py b/roles/lib_openshift/library/oc_adm_policy_user.py
index 0bdfd0bad..071562875 100644
--- a/roles/lib_openshift/library/oc_adm_policy_user.py
+++ b/roles/lib_openshift/library/oc_adm_policy_user.py
@@ -1069,10 +1069,6 @@ class OpenShiftCLI(object):
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- rval = {}
- results = ''
- err = None
-
if self.verbose:
print(' '.join(cmds))
@@ -1082,34 +1078,26 @@ class OpenShiftCLI(object):
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
- "results": results,
"cmd": ' '.join(cmds)}
- if returncode == 0:
- if output:
- if output_type == 'json':
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as verr:
- if "No JSON object could be decoded" in verr.args:
- err = verr.args
- elif output_type == 'raw':
- rval['results'] = stdout
-
- if self.verbose:
- print("STDOUT: {0}".format(stdout))
- print("STDERR: {0}".format(stderr))
-
- if err:
- rval.update({"err": err,
- "stderr": stderr,
- "stdout": stdout,
- "cmd": cmds})
+ if output_type == 'json':
+ rval['results'] = {}
+ if output and stdout:
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ rval['err'] = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout if output else ''
- else:
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
- "stdout": stdout,
- "results": {}})
+ "stdout": stdout})
return rval
diff --git a/roles/lib_openshift/library/oc_adm_registry.py b/roles/lib_openshift/library/oc_adm_registry.py
index df0e40d20..bf2650460 100644
--- a/roles/lib_openshift/library/oc_adm_registry.py
+++ b/roles/lib_openshift/library/oc_adm_registry.py
@@ -1187,10 +1187,6 @@ class OpenShiftCLI(object):
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- rval = {}
- results = ''
- err = None
-
if self.verbose:
print(' '.join(cmds))
@@ -1200,34 +1196,26 @@ class OpenShiftCLI(object):
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
- "results": results,
"cmd": ' '.join(cmds)}
- if returncode == 0:
- if output:
- if output_type == 'json':
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as verr:
- if "No JSON object could be decoded" in verr.args:
- err = verr.args
- elif output_type == 'raw':
- rval['results'] = stdout
-
- if self.verbose:
- print("STDOUT: {0}".format(stdout))
- print("STDERR: {0}".format(stderr))
-
- if err:
- rval.update({"err": err,
- "stderr": stderr,
- "stdout": stdout,
- "cmd": cmds})
+ if output_type == 'json':
+ rval['results'] = {}
+ if output and stdout:
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ rval['err'] = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout if output else ''
- else:
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
- "stdout": stdout,
- "results": {}})
+ "stdout": stdout})
return rval
diff --git a/roles/lib_openshift/library/oc_adm_router.py b/roles/lib_openshift/library/oc_adm_router.py
index 8af8cb196..a2b7d12c0 100644
--- a/roles/lib_openshift/library/oc_adm_router.py
+++ b/roles/lib_openshift/library/oc_adm_router.py
@@ -1212,10 +1212,6 @@ class OpenShiftCLI(object):
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- rval = {}
- results = ''
- err = None
-
if self.verbose:
print(' '.join(cmds))
@@ -1225,34 +1221,26 @@ class OpenShiftCLI(object):
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
- "results": results,
"cmd": ' '.join(cmds)}
- if returncode == 0:
- if output:
- if output_type == 'json':
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as verr:
- if "No JSON object could be decoded" in verr.args:
- err = verr.args
- elif output_type == 'raw':
- rval['results'] = stdout
-
- if self.verbose:
- print("STDOUT: {0}".format(stdout))
- print("STDERR: {0}".format(stderr))
-
- if err:
- rval.update({"err": err,
- "stderr": stderr,
- "stdout": stdout,
- "cmd": cmds})
+ if output_type == 'json':
+ rval['results'] = {}
+ if output and stdout:
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ rval['err'] = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout if output else ''
- else:
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
- "stdout": stdout,
- "results": {}})
+ "stdout": stdout})
return rval
diff --git a/roles/lib_openshift/library/oc_clusterrole.py b/roles/lib_openshift/library/oc_clusterrole.py
index 3ed0d65dc..289f08b83 100644
--- a/roles/lib_openshift/library/oc_clusterrole.py
+++ b/roles/lib_openshift/library/oc_clusterrole.py
@@ -1061,10 +1061,6 @@ class OpenShiftCLI(object):
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- rval = {}
- results = ''
- err = None
-
if self.verbose:
print(' '.join(cmds))
@@ -1074,34 +1070,26 @@ class OpenShiftCLI(object):
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
- "results": results,
"cmd": ' '.join(cmds)}
- if returncode == 0:
- if output:
- if output_type == 'json':
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as verr:
- if "No JSON object could be decoded" in verr.args:
- err = verr.args
- elif output_type == 'raw':
- rval['results'] = stdout
-
- if self.verbose:
- print("STDOUT: {0}".format(stdout))
- print("STDERR: {0}".format(stderr))
-
- if err:
- rval.update({"err": err,
- "stderr": stderr,
- "stdout": stdout,
- "cmd": cmds})
+ if output_type == 'json':
+ rval['results'] = {}
+ if output and stdout:
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ rval['err'] = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout if output else ''
- else:
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
- "stdout": stdout,
- "results": {}})
+ "stdout": stdout})
return rval
diff --git a/roles/lib_openshift/library/oc_configmap.py b/roles/lib_openshift/library/oc_configmap.py
index 5c8ed48d2..7cd29215f 100644
--- a/roles/lib_openshift/library/oc_configmap.py
+++ b/roles/lib_openshift/library/oc_configmap.py
@@ -1067,10 +1067,6 @@ class OpenShiftCLI(object):
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- rval = {}
- results = ''
- err = None
-
if self.verbose:
print(' '.join(cmds))
@@ -1080,34 +1076,26 @@ class OpenShiftCLI(object):
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
- "results": results,
"cmd": ' '.join(cmds)}
- if returncode == 0:
- if output:
- if output_type == 'json':
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as verr:
- if "No JSON object could be decoded" in verr.args:
- err = verr.args
- elif output_type == 'raw':
- rval['results'] = stdout
-
- if self.verbose:
- print("STDOUT: {0}".format(stdout))
- print("STDERR: {0}".format(stderr))
-
- if err:
- rval.update({"err": err,
- "stderr": stderr,
- "stdout": stdout,
- "cmd": cmds})
+ if output_type == 'json':
+ rval['results'] = {}
+ if output and stdout:
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ rval['err'] = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout if output else ''
- else:
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
- "stdout": stdout,
- "results": {}})
+ "stdout": stdout})
return rval
diff --git a/roles/lib_openshift/library/oc_edit.py b/roles/lib_openshift/library/oc_edit.py
index f3b6d552d..5b11f45ba 100644
--- a/roles/lib_openshift/library/oc_edit.py
+++ b/roles/lib_openshift/library/oc_edit.py
@@ -1111,10 +1111,6 @@ class OpenShiftCLI(object):
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- rval = {}
- results = ''
- err = None
-
if self.verbose:
print(' '.join(cmds))
@@ -1124,34 +1120,26 @@ class OpenShiftCLI(object):
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
- "results": results,
"cmd": ' '.join(cmds)}
- if returncode == 0:
- if output:
- if output_type == 'json':
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as verr:
- if "No JSON object could be decoded" in verr.args:
- err = verr.args
- elif output_type == 'raw':
- rval['results'] = stdout
-
- if self.verbose:
- print("STDOUT: {0}".format(stdout))
- print("STDERR: {0}".format(stderr))
-
- if err:
- rval.update({"err": err,
- "stderr": stderr,
- "stdout": stdout,
- "cmd": cmds})
+ if output_type == 'json':
+ rval['results'] = {}
+ if output and stdout:
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ rval['err'] = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout if output else ''
- else:
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
- "stdout": stdout,
- "results": {}})
+ "stdout": stdout})
return rval
diff --git a/roles/lib_openshift/library/oc_env.py b/roles/lib_openshift/library/oc_env.py
index c6421128a..d3834ce0c 100644
--- a/roles/lib_openshift/library/oc_env.py
+++ b/roles/lib_openshift/library/oc_env.py
@@ -1078,10 +1078,6 @@ class OpenShiftCLI(object):
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- rval = {}
- results = ''
- err = None
-
if self.verbose:
print(' '.join(cmds))
@@ -1091,34 +1087,26 @@ class OpenShiftCLI(object):
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
- "results": results,
"cmd": ' '.join(cmds)}
- if returncode == 0:
- if output:
- if output_type == 'json':
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as verr:
- if "No JSON object could be decoded" in verr.args:
- err = verr.args
- elif output_type == 'raw':
- rval['results'] = stdout
-
- if self.verbose:
- print("STDOUT: {0}".format(stdout))
- print("STDERR: {0}".format(stderr))
-
- if err:
- rval.update({"err": err,
- "stderr": stderr,
- "stdout": stdout,
- "cmd": cmds})
+ if output_type == 'json':
+ rval['results'] = {}
+ if output and stdout:
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ rval['err'] = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout if output else ''
- else:
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
- "stdout": stdout,
- "results": {}})
+ "stdout": stdout})
return rval
diff --git a/roles/lib_openshift/library/oc_group.py b/roles/lib_openshift/library/oc_group.py
index a791c29af..0d751fe28 100644
--- a/roles/lib_openshift/library/oc_group.py
+++ b/roles/lib_openshift/library/oc_group.py
@@ -1051,10 +1051,6 @@ class OpenShiftCLI(object):
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- rval = {}
- results = ''
- err = None
-
if self.verbose:
print(' '.join(cmds))
@@ -1064,34 +1060,26 @@ class OpenShiftCLI(object):
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
- "results": results,
"cmd": ' '.join(cmds)}
- if returncode == 0:
- if output:
- if output_type == 'json':
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as verr:
- if "No JSON object could be decoded" in verr.args:
- err = verr.args
- elif output_type == 'raw':
- rval['results'] = stdout
-
- if self.verbose:
- print("STDOUT: {0}".format(stdout))
- print("STDERR: {0}".format(stderr))
-
- if err:
- rval.update({"err": err,
- "stderr": stderr,
- "stdout": stdout,
- "cmd": cmds})
+ if output_type == 'json':
+ rval['results'] = {}
+ if output and stdout:
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ rval['err'] = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout if output else ''
- else:
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
- "stdout": stdout,
- "results": {}})
+ "stdout": stdout})
return rval
diff --git a/roles/lib_openshift/library/oc_image.py b/roles/lib_openshift/library/oc_image.py
index bbc123ce0..3a6ba3e56 100644
--- a/roles/lib_openshift/library/oc_image.py
+++ b/roles/lib_openshift/library/oc_image.py
@@ -1070,10 +1070,6 @@ class OpenShiftCLI(object):
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- rval = {}
- results = ''
- err = None
-
if self.verbose:
print(' '.join(cmds))
@@ -1083,34 +1079,26 @@ class OpenShiftCLI(object):
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
- "results": results,
"cmd": ' '.join(cmds)}
- if returncode == 0:
- if output:
- if output_type == 'json':
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as verr:
- if "No JSON object could be decoded" in verr.args:
- err = verr.args
- elif output_type == 'raw':
- rval['results'] = stdout
-
- if self.verbose:
- print("STDOUT: {0}".format(stdout))
- print("STDERR: {0}".format(stderr))
-
- if err:
- rval.update({"err": err,
- "stderr": stderr,
- "stdout": stdout,
- "cmd": cmds})
+ if output_type == 'json':
+ rval['results'] = {}
+ if output and stdout:
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ rval['err'] = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout if output else ''
- else:
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
- "stdout": stdout,
- "results": {}})
+ "stdout": stdout})
return rval
diff --git a/roles/lib_openshift/library/oc_label.py b/roles/lib_openshift/library/oc_label.py
index cd1afd0d2..5db036b23 100644
--- a/roles/lib_openshift/library/oc_label.py
+++ b/roles/lib_openshift/library/oc_label.py
@@ -1087,10 +1087,6 @@ class OpenShiftCLI(object):
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- rval = {}
- results = ''
- err = None
-
if self.verbose:
print(' '.join(cmds))
@@ -1100,34 +1096,26 @@ class OpenShiftCLI(object):
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
- "results": results,
"cmd": ' '.join(cmds)}
- if returncode == 0:
- if output:
- if output_type == 'json':
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as verr:
- if "No JSON object could be decoded" in verr.args:
- err = verr.args
- elif output_type == 'raw':
- rval['results'] = stdout
-
- if self.verbose:
- print("STDOUT: {0}".format(stdout))
- print("STDERR: {0}".format(stderr))
-
- if err:
- rval.update({"err": err,
- "stderr": stderr,
- "stdout": stdout,
- "cmd": cmds})
+ if output_type == 'json':
+ rval['results'] = {}
+ if output and stdout:
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ rval['err'] = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout if output else ''
- else:
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
- "stdout": stdout,
- "results": {}})
+ "stdout": stdout})
return rval
diff --git a/roles/lib_openshift/library/oc_obj.py b/roles/lib_openshift/library/oc_obj.py
index 215723cc8..56af303cc 100644
--- a/roles/lib_openshift/library/oc_obj.py
+++ b/roles/lib_openshift/library/oc_obj.py
@@ -1090,10 +1090,6 @@ class OpenShiftCLI(object):
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- rval = {}
- results = ''
- err = None
-
if self.verbose:
print(' '.join(cmds))
@@ -1103,34 +1099,26 @@ class OpenShiftCLI(object):
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
- "results": results,
"cmd": ' '.join(cmds)}
- if returncode == 0:
- if output:
- if output_type == 'json':
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as verr:
- if "No JSON object could be decoded" in verr.args:
- err = verr.args
- elif output_type == 'raw':
- rval['results'] = stdout
-
- if self.verbose:
- print("STDOUT: {0}".format(stdout))
- print("STDERR: {0}".format(stderr))
-
- if err:
- rval.update({"err": err,
- "stderr": stderr,
- "stdout": stdout,
- "cmd": cmds})
+ if output_type == 'json':
+ rval['results'] = {}
+ if output and stdout:
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ rval['err'] = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout if output else ''
- else:
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
- "stdout": stdout,
- "results": {}})
+ "stdout": stdout})
return rval
@@ -1473,7 +1461,12 @@ class OCObject(OpenShiftCLI):
def delete(self):
'''delete the object'''
- return self._delete(self.kind, name=self.name, selector=self.selector)
+ results = self._delete(self.kind, name=self.name, selector=self.selector)
+ if (results['returncode'] != 0 and 'stderr' in results and
+ '\"{}\" not found'.format(self.name) in results['stderr']):
+ results['returncode'] = 0
+
+ return results
def create(self, files=None, content=None):
'''
@@ -1557,7 +1550,8 @@ class OCObject(OpenShiftCLI):
if state == 'absent':
# verify its not in our results
if (params['name'] is not None or params['selector'] is not None) and \
- (len(api_rval['results']) == 0 or len(api_rval['results'][0].get('items', [])) == 0):
+ (len(api_rval['results']) == 0 or \
+ ('items' in api_rval['results'][0] and len(api_rval['results'][0]['items']) == 0)):
return {'changed': False, 'state': state}
if check_mode:
diff --git a/roles/lib_openshift/library/oc_objectvalidator.py b/roles/lib_openshift/library/oc_objectvalidator.py
index 358ef5130..130521761 100644
--- a/roles/lib_openshift/library/oc_objectvalidator.py
+++ b/roles/lib_openshift/library/oc_objectvalidator.py
@@ -1022,10 +1022,6 @@ class OpenShiftCLI(object):
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- rval = {}
- results = ''
- err = None
-
if self.verbose:
print(' '.join(cmds))
@@ -1035,34 +1031,26 @@ class OpenShiftCLI(object):
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
- "results": results,
"cmd": ' '.join(cmds)}
- if returncode == 0:
- if output:
- if output_type == 'json':
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as verr:
- if "No JSON object could be decoded" in verr.args:
- err = verr.args
- elif output_type == 'raw':
- rval['results'] = stdout
-
- if self.verbose:
- print("STDOUT: {0}".format(stdout))
- print("STDERR: {0}".format(stderr))
-
- if err:
- rval.update({"err": err,
- "stderr": stderr,
- "stdout": stdout,
- "cmd": cmds})
+ if output_type == 'json':
+ rval['results'] = {}
+ if output and stdout:
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ rval['err'] = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout if output else ''
- else:
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
- "stdout": stdout,
- "results": {}})
+ "stdout": stdout})
return rval
diff --git a/roles/lib_openshift/library/oc_process.py b/roles/lib_openshift/library/oc_process.py
index 025b846c6..c6568d520 100644
--- a/roles/lib_openshift/library/oc_process.py
+++ b/roles/lib_openshift/library/oc_process.py
@@ -1079,10 +1079,6 @@ class OpenShiftCLI(object):
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- rval = {}
- results = ''
- err = None
-
if self.verbose:
print(' '.join(cmds))
@@ -1092,34 +1088,26 @@ class OpenShiftCLI(object):
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
- "results": results,
"cmd": ' '.join(cmds)}
- if returncode == 0:
- if output:
- if output_type == 'json':
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as verr:
- if "No JSON object could be decoded" in verr.args:
- err = verr.args
- elif output_type == 'raw':
- rval['results'] = stdout
-
- if self.verbose:
- print("STDOUT: {0}".format(stdout))
- print("STDERR: {0}".format(stderr))
-
- if err:
- rval.update({"err": err,
- "stderr": stderr,
- "stdout": stdout,
- "cmd": cmds})
+ if output_type == 'json':
+ rval['results'] = {}
+ if output and stdout:
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ rval['err'] = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout if output else ''
- else:
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
- "stdout": stdout,
- "results": {}})
+ "stdout": stdout})
return rval
diff --git a/roles/lib_openshift/library/oc_project.py b/roles/lib_openshift/library/oc_project.py
index 05dfddab8..a78bc06d2 100644
--- a/roles/lib_openshift/library/oc_project.py
+++ b/roles/lib_openshift/library/oc_project.py
@@ -1076,10 +1076,6 @@ class OpenShiftCLI(object):
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- rval = {}
- results = ''
- err = None
-
if self.verbose:
print(' '.join(cmds))
@@ -1089,34 +1085,26 @@ class OpenShiftCLI(object):
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
- "results": results,
"cmd": ' '.join(cmds)}
- if returncode == 0:
- if output:
- if output_type == 'json':
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as verr:
- if "No JSON object could be decoded" in verr.args:
- err = verr.args
- elif output_type == 'raw':
- rval['results'] = stdout
-
- if self.verbose:
- print("STDOUT: {0}".format(stdout))
- print("STDERR: {0}".format(stderr))
-
- if err:
- rval.update({"err": err,
- "stderr": stderr,
- "stdout": stdout,
- "cmd": cmds})
+ if output_type == 'json':
+ rval['results'] = {}
+ if output and stdout:
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ rval['err'] = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout if output else ''
- else:
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
- "stdout": stdout,
- "results": {}})
+ "stdout": stdout})
return rval
diff --git a/roles/lib_openshift/library/oc_pvc.py b/roles/lib_openshift/library/oc_pvc.py
index d7de4964c..a88639bfc 100644
--- a/roles/lib_openshift/library/oc_pvc.py
+++ b/roles/lib_openshift/library/oc_pvc.py
@@ -1071,10 +1071,6 @@ class OpenShiftCLI(object):
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- rval = {}
- results = ''
- err = None
-
if self.verbose:
print(' '.join(cmds))
@@ -1084,34 +1080,26 @@ class OpenShiftCLI(object):
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
- "results": results,
"cmd": ' '.join(cmds)}
- if returncode == 0:
- if output:
- if output_type == 'json':
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as verr:
- if "No JSON object could be decoded" in verr.args:
- err = verr.args
- elif output_type == 'raw':
- rval['results'] = stdout
-
- if self.verbose:
- print("STDOUT: {0}".format(stdout))
- print("STDERR: {0}".format(stderr))
-
- if err:
- rval.update({"err": err,
- "stderr": stderr,
- "stdout": stdout,
- "cmd": cmds})
+ if output_type == 'json':
+ rval['results'] = {}
+ if output and stdout:
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ rval['err'] = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout if output else ''
- else:
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
- "stdout": stdout,
- "results": {}})
+ "stdout": stdout})
return rval
diff --git a/roles/lib_openshift/library/oc_route.py b/roles/lib_openshift/library/oc_route.py
index 3090b4cad..0c0bc9386 100644
--- a/roles/lib_openshift/library/oc_route.py
+++ b/roles/lib_openshift/library/oc_route.py
@@ -1121,10 +1121,6 @@ class OpenShiftCLI(object):
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- rval = {}
- results = ''
- err = None
-
if self.verbose:
print(' '.join(cmds))
@@ -1134,34 +1130,26 @@ class OpenShiftCLI(object):
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
- "results": results,
"cmd": ' '.join(cmds)}
- if returncode == 0:
- if output:
- if output_type == 'json':
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as verr:
- if "No JSON object could be decoded" in verr.args:
- err = verr.args
- elif output_type == 'raw':
- rval['results'] = stdout
-
- if self.verbose:
- print("STDOUT: {0}".format(stdout))
- print("STDERR: {0}".format(stderr))
-
- if err:
- rval.update({"err": err,
- "stderr": stderr,
- "stdout": stdout,
- "cmd": cmds})
+ if output_type == 'json':
+ rval['results'] = {}
+ if output and stdout:
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ rval['err'] = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout if output else ''
- else:
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
- "stdout": stdout,
- "results": {}})
+ "stdout": stdout})
return rval
diff --git a/roles/lib_openshift/library/oc_scale.py b/roles/lib_openshift/library/oc_scale.py
index 6a505fb6b..f112b6dd0 100644
--- a/roles/lib_openshift/library/oc_scale.py
+++ b/roles/lib_openshift/library/oc_scale.py
@@ -1065,10 +1065,6 @@ class OpenShiftCLI(object):
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- rval = {}
- results = ''
- err = None
-
if self.verbose:
print(' '.join(cmds))
@@ -1078,34 +1074,26 @@ class OpenShiftCLI(object):
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
- "results": results,
"cmd": ' '.join(cmds)}
- if returncode == 0:
- if output:
- if output_type == 'json':
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as verr:
- if "No JSON object could be decoded" in verr.args:
- err = verr.args
- elif output_type == 'raw':
- rval['results'] = stdout
-
- if self.verbose:
- print("STDOUT: {0}".format(stdout))
- print("STDERR: {0}".format(stderr))
-
- if err:
- rval.update({"err": err,
- "stderr": stderr,
- "stdout": stdout,
- "cmd": cmds})
+ if output_type == 'json':
+ rval['results'] = {}
+ if output and stdout:
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ rval['err'] = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout if output else ''
- else:
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
- "stdout": stdout,
- "results": {}})
+ "stdout": stdout})
return rval
diff --git a/roles/lib_openshift/library/oc_secret.py b/roles/lib_openshift/library/oc_secret.py
index 02257500f..d762e0c38 100644
--- a/roles/lib_openshift/library/oc_secret.py
+++ b/roles/lib_openshift/library/oc_secret.py
@@ -1117,10 +1117,6 @@ class OpenShiftCLI(object):
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- rval = {}
- results = ''
- err = None
-
if self.verbose:
print(' '.join(cmds))
@@ -1130,34 +1126,26 @@ class OpenShiftCLI(object):
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
- "results": results,
"cmd": ' '.join(cmds)}
- if returncode == 0:
- if output:
- if output_type == 'json':
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as verr:
- if "No JSON object could be decoded" in verr.args:
- err = verr.args
- elif output_type == 'raw':
- rval['results'] = stdout
-
- if self.verbose:
- print("STDOUT: {0}".format(stdout))
- print("STDERR: {0}".format(stderr))
-
- if err:
- rval.update({"err": err,
- "stderr": stderr,
- "stdout": stdout,
- "cmd": cmds})
+ if output_type == 'json':
+ rval['results'] = {}
+ if output and stdout:
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ rval['err'] = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout if output else ''
- else:
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
- "stdout": stdout,
- "results": {}})
+ "stdout": stdout})
return rval
@@ -1613,7 +1601,7 @@ class OCSecret(OpenShiftCLI):
'''delete a secret by name'''
return self._delete('secrets', self.name)
- def create(self, files=None, contents=None):
+ def create(self, files=None, contents=None, force=False):
'''Create a secret '''
if not files:
files = Utils.create_tmp_files_from_contents(contents)
@@ -1622,6 +1610,8 @@ class OCSecret(OpenShiftCLI):
cmd = ['secrets', 'new', self.name]
if self.type is not None:
cmd.append("--type=%s" % (self.type))
+ if force:
+ cmd.append('--confirm')
cmd.extend(secrets)
results = self.openshift_cmd(cmd)
@@ -1634,7 +1624,7 @@ class OCSecret(OpenShiftCLI):
This receives a list of file names and converts it into a secret.
The secret is then written to disk and passed into the `oc replace` command.
'''
- secret = self.prep_secret(files)
+ secret = self.prep_secret(files, force)
if secret['returncode'] != 0:
return secret
@@ -1646,7 +1636,7 @@ class OCSecret(OpenShiftCLI):
return self._replace(sfile_path, force=force)
- def prep_secret(self, files=None, contents=None):
+ def prep_secret(self, files=None, contents=None, force=False):
''' return what the secret would look like if created
This is accomplished by passing -ojson. This will most likely change in the future
'''
@@ -1657,6 +1647,8 @@ class OCSecret(OpenShiftCLI):
cmd = ['-ojson', 'secrets', 'new', self.name]
if self.type is not None:
cmd.extend(["--type=%s" % (self.type)])
+ if force:
+ cmd.append('--confirm')
cmd.extend(secrets)
return self.openshift_cmd(cmd, output=True)
@@ -1719,7 +1711,7 @@ class OCSecret(OpenShiftCLI):
return {'changed': True,
'msg': 'Would have performed a create.'}
- api_rval = ocsecret.create(files, params['contents'])
+ api_rval = ocsecret.create(files, params['contents'], force=params['force'])
# Remove files
if files and params['delete_after']:
@@ -1736,7 +1728,7 @@ class OCSecret(OpenShiftCLI):
########
# Update
########
- secret = ocsecret.prep_secret(params['files'], params['contents'])
+ secret = ocsecret.prep_secret(params['files'], params['contents'], force=params['force'])
if secret['returncode'] != 0:
return {'failed': True, 'msg': secret}
diff --git a/roles/lib_openshift/library/oc_service.py b/roles/lib_openshift/library/oc_service.py
index 308f45488..769b75e15 100644
--- a/roles/lib_openshift/library/oc_service.py
+++ b/roles/lib_openshift/library/oc_service.py
@@ -1124,10 +1124,6 @@ class OpenShiftCLI(object):
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- rval = {}
- results = ''
- err = None
-
if self.verbose:
print(' '.join(cmds))
@@ -1137,34 +1133,26 @@ class OpenShiftCLI(object):
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
- "results": results,
"cmd": ' '.join(cmds)}
- if returncode == 0:
- if output:
- if output_type == 'json':
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as verr:
- if "No JSON object could be decoded" in verr.args:
- err = verr.args
- elif output_type == 'raw':
- rval['results'] = stdout
-
- if self.verbose:
- print("STDOUT: {0}".format(stdout))
- print("STDERR: {0}".format(stderr))
-
- if err:
- rval.update({"err": err,
- "stderr": stderr,
- "stdout": stdout,
- "cmd": cmds})
+ if output_type == 'json':
+ rval['results'] = {}
+ if output and stdout:
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ rval['err'] = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout if output else ''
- else:
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
- "stdout": stdout,
- "results": {}})
+ "stdout": stdout})
return rval
diff --git a/roles/lib_openshift/library/oc_serviceaccount.py b/roles/lib_openshift/library/oc_serviceaccount.py
index 68c1fc51c..446987eff 100644
--- a/roles/lib_openshift/library/oc_serviceaccount.py
+++ b/roles/lib_openshift/library/oc_serviceaccount.py
@@ -1063,10 +1063,6 @@ class OpenShiftCLI(object):
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- rval = {}
- results = ''
- err = None
-
if self.verbose:
print(' '.join(cmds))
@@ -1076,34 +1072,26 @@ class OpenShiftCLI(object):
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
- "results": results,
"cmd": ' '.join(cmds)}
- if returncode == 0:
- if output:
- if output_type == 'json':
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as verr:
- if "No JSON object could be decoded" in verr.args:
- err = verr.args
- elif output_type == 'raw':
- rval['results'] = stdout
-
- if self.verbose:
- print("STDOUT: {0}".format(stdout))
- print("STDERR: {0}".format(stderr))
-
- if err:
- rval.update({"err": err,
- "stderr": stderr,
- "stdout": stdout,
- "cmd": cmds})
+ if output_type == 'json':
+ rval['results'] = {}
+ if output and stdout:
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ rval['err'] = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout if output else ''
- else:
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
- "stdout": stdout,
- "results": {}})
+ "stdout": stdout})
return rval
diff --git a/roles/lib_openshift/library/oc_serviceaccount_secret.py b/roles/lib_openshift/library/oc_serviceaccount_secret.py
index ebc5bf8a2..c7eb1986a 100644
--- a/roles/lib_openshift/library/oc_serviceaccount_secret.py
+++ b/roles/lib_openshift/library/oc_serviceaccount_secret.py
@@ -1063,10 +1063,6 @@ class OpenShiftCLI(object):
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- rval = {}
- results = ''
- err = None
-
if self.verbose:
print(' '.join(cmds))
@@ -1076,34 +1072,26 @@ class OpenShiftCLI(object):
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
- "results": results,
"cmd": ' '.join(cmds)}
- if returncode == 0:
- if output:
- if output_type == 'json':
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as verr:
- if "No JSON object could be decoded" in verr.args:
- err = verr.args
- elif output_type == 'raw':
- rval['results'] = stdout
-
- if self.verbose:
- print("STDOUT: {0}".format(stdout))
- print("STDERR: {0}".format(stderr))
-
- if err:
- rval.update({"err": err,
- "stderr": stderr,
- "stdout": stdout,
- "cmd": cmds})
+ if output_type == 'json':
+ rval['results'] = {}
+ if output and stdout:
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ rval['err'] = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout if output else ''
- else:
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
- "stdout": stdout,
- "results": {}})
+ "stdout": stdout})
return rval
diff --git a/roles/lib_openshift/library/oc_user.py b/roles/lib_openshift/library/oc_user.py
index d1a20fddc..3a98693b7 100644
--- a/roles/lib_openshift/library/oc_user.py
+++ b/roles/lib_openshift/library/oc_user.py
@@ -1123,10 +1123,6 @@ class OpenShiftCLI(object):
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- rval = {}
- results = ''
- err = None
-
if self.verbose:
print(' '.join(cmds))
@@ -1136,34 +1132,26 @@ class OpenShiftCLI(object):
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
- "results": results,
"cmd": ' '.join(cmds)}
- if returncode == 0:
- if output:
- if output_type == 'json':
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as verr:
- if "No JSON object could be decoded" in verr.args:
- err = verr.args
- elif output_type == 'raw':
- rval['results'] = stdout
-
- if self.verbose:
- print("STDOUT: {0}".format(stdout))
- print("STDERR: {0}".format(stderr))
-
- if err:
- rval.update({"err": err,
- "stderr": stderr,
- "stdout": stdout,
- "cmd": cmds})
+ if output_type == 'json':
+ rval['results'] = {}
+ if output and stdout:
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ rval['err'] = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout if output else ''
- else:
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
- "stdout": stdout,
- "results": {}})
+ "stdout": stdout})
return rval
diff --git a/roles/lib_openshift/library/oc_version.py b/roles/lib_openshift/library/oc_version.py
index 548c9d8e0..939261526 100644
--- a/roles/lib_openshift/library/oc_version.py
+++ b/roles/lib_openshift/library/oc_version.py
@@ -1035,10 +1035,6 @@ class OpenShiftCLI(object):
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- rval = {}
- results = ''
- err = None
-
if self.verbose:
print(' '.join(cmds))
@@ -1048,34 +1044,26 @@ class OpenShiftCLI(object):
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
- "results": results,
"cmd": ' '.join(cmds)}
- if returncode == 0:
- if output:
- if output_type == 'json':
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as verr:
- if "No JSON object could be decoded" in verr.args:
- err = verr.args
- elif output_type == 'raw':
- rval['results'] = stdout
-
- if self.verbose:
- print("STDOUT: {0}".format(stdout))
- print("STDERR: {0}".format(stderr))
-
- if err:
- rval.update({"err": err,
- "stderr": stderr,
- "stdout": stdout,
- "cmd": cmds})
+ if output_type == 'json':
+ rval['results'] = {}
+ if output and stdout:
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ rval['err'] = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout if output else ''
- else:
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
- "stdout": stdout,
- "results": {}})
+ "stdout": stdout})
return rval
diff --git a/roles/lib_openshift/library/oc_volume.py b/roles/lib_openshift/library/oc_volume.py
index 3826cd8e5..41e7d0ab8 100644
--- a/roles/lib_openshift/library/oc_volume.py
+++ b/roles/lib_openshift/library/oc_volume.py
@@ -1112,10 +1112,6 @@ class OpenShiftCLI(object):
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- rval = {}
- results = ''
- err = None
-
if self.verbose:
print(' '.join(cmds))
@@ -1125,34 +1121,26 @@ class OpenShiftCLI(object):
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
- "results": results,
"cmd": ' '.join(cmds)}
- if returncode == 0:
- if output:
- if output_type == 'json':
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as verr:
- if "No JSON object could be decoded" in verr.args:
- err = verr.args
- elif output_type == 'raw':
- rval['results'] = stdout
-
- if self.verbose:
- print("STDOUT: {0}".format(stdout))
- print("STDERR: {0}".format(stderr))
-
- if err:
- rval.update({"err": err,
- "stderr": stderr,
- "stdout": stdout,
- "cmd": cmds})
+ if output_type == 'json':
+ rval['results'] = {}
+ if output and stdout:
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ rval['err'] = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout if output else ''
- else:
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
- "stdout": stdout,
- "results": {}})
+ "stdout": stdout})
return rval
diff --git a/roles/lib_openshift/src/class/oc_obj.py b/roles/lib_openshift/src/class/oc_obj.py
index 6f0da3d5c..5e423bea9 100644
--- a/roles/lib_openshift/src/class/oc_obj.py
+++ b/roles/lib_openshift/src/class/oc_obj.py
@@ -33,7 +33,12 @@ class OCObject(OpenShiftCLI):
def delete(self):
'''delete the object'''
- return self._delete(self.kind, name=self.name, selector=self.selector)
+ results = self._delete(self.kind, name=self.name, selector=self.selector)
+ if (results['returncode'] != 0 and 'stderr' in results and
+ '\"{}\" not found'.format(self.name) in results['stderr']):
+ results['returncode'] = 0
+
+ return results
def create(self, files=None, content=None):
'''
@@ -117,7 +122,8 @@ class OCObject(OpenShiftCLI):
if state == 'absent':
# verify its not in our results
if (params['name'] is not None or params['selector'] is not None) and \
- (len(api_rval['results']) == 0 or len(api_rval['results'][0].get('items', [])) == 0):
+ (len(api_rval['results']) == 0 or \
+ ('items' in api_rval['results'][0] and len(api_rval['results'][0]['items']) == 0)):
return {'changed': False, 'state': state}
if check_mode:
diff --git a/roles/lib_openshift/src/class/oc_secret.py b/roles/lib_openshift/src/class/oc_secret.py
index ee83580df..4ee6443e9 100644
--- a/roles/lib_openshift/src/class/oc_secret.py
+++ b/roles/lib_openshift/src/class/oc_secret.py
@@ -44,7 +44,7 @@ class OCSecret(OpenShiftCLI):
'''delete a secret by name'''
return self._delete('secrets', self.name)
- def create(self, files=None, contents=None):
+ def create(self, files=None, contents=None, force=False):
'''Create a secret '''
if not files:
files = Utils.create_tmp_files_from_contents(contents)
@@ -53,6 +53,8 @@ class OCSecret(OpenShiftCLI):
cmd = ['secrets', 'new', self.name]
if self.type is not None:
cmd.append("--type=%s" % (self.type))
+ if force:
+ cmd.append('--confirm')
cmd.extend(secrets)
results = self.openshift_cmd(cmd)
@@ -65,7 +67,7 @@ class OCSecret(OpenShiftCLI):
This receives a list of file names and converts it into a secret.
The secret is then written to disk and passed into the `oc replace` command.
'''
- secret = self.prep_secret(files)
+ secret = self.prep_secret(files, force)
if secret['returncode'] != 0:
return secret
@@ -77,7 +79,7 @@ class OCSecret(OpenShiftCLI):
return self._replace(sfile_path, force=force)
- def prep_secret(self, files=None, contents=None):
+ def prep_secret(self, files=None, contents=None, force=False):
''' return what the secret would look like if created
This is accomplished by passing -ojson. This will most likely change in the future
'''
@@ -88,6 +90,8 @@ class OCSecret(OpenShiftCLI):
cmd = ['-ojson', 'secrets', 'new', self.name]
if self.type is not None:
cmd.extend(["--type=%s" % (self.type)])
+ if force:
+ cmd.append('--confirm')
cmd.extend(secrets)
return self.openshift_cmd(cmd, output=True)
@@ -150,7 +154,7 @@ class OCSecret(OpenShiftCLI):
return {'changed': True,
'msg': 'Would have performed a create.'}
- api_rval = ocsecret.create(files, params['contents'])
+ api_rval = ocsecret.create(files, params['contents'], force=params['force'])
# Remove files
if files and params['delete_after']:
@@ -167,7 +171,7 @@ class OCSecret(OpenShiftCLI):
########
# Update
########
- secret = ocsecret.prep_secret(params['files'], params['contents'])
+ secret = ocsecret.prep_secret(params['files'], params['contents'], force=params['force'])
if secret['returncode'] != 0:
return {'failed': True, 'msg': secret}
diff --git a/roles/lib_openshift/src/lib/base.py b/roles/lib_openshift/src/lib/base.py
index b3f01008b..16770b22d 100644
--- a/roles/lib_openshift/src/lib/base.py
+++ b/roles/lib_openshift/src/lib/base.py
@@ -273,10 +273,6 @@ class OpenShiftCLI(object):
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- rval = {}
- results = ''
- err = None
-
if self.verbose:
print(' '.join(cmds))
@@ -286,34 +282,26 @@ class OpenShiftCLI(object):
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
- "results": results,
"cmd": ' '.join(cmds)}
- if returncode == 0:
- if output:
- if output_type == 'json':
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as verr:
- if "No JSON object could be decoded" in verr.args:
- err = verr.args
- elif output_type == 'raw':
- rval['results'] = stdout
-
- if self.verbose:
- print("STDOUT: {0}".format(stdout))
- print("STDERR: {0}".format(stderr))
-
- if err:
- rval.update({"err": err,
- "stderr": stderr,
- "stdout": stdout,
- "cmd": cmds})
+ if output_type == 'json':
+ rval['results'] = {}
+ if output and stdout:
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ rval['err'] = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout if output else ''
- else:
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
- "stdout": stdout,
- "results": {}})
+ "stdout": stdout})
return rval
diff --git a/roles/lib_openshift/src/test/unit/test_oc_secret.py b/roles/lib_openshift/src/test/unit/test_oc_secret.py
index 09cc4a374..323b3423c 100755
--- a/roles/lib_openshift/src/test/unit/test_oc_secret.py
+++ b/roles/lib_openshift/src/test/unit/test_oc_secret.py
@@ -48,6 +48,7 @@ class OCSecretTest(unittest.TestCase):
'debug': False,
'files': None,
'delete_after': True,
+ 'force': False,
}
# Return values of our mocked function call. These get returned once per call.
diff --git a/roles/openshift_ca/tasks/main.yml b/roles/openshift_ca/tasks/main.yml
index c7b906949..b9a7ec32f 100644
--- a/roles/openshift_ca/tasks/main.yml
+++ b/roles/openshift_ca/tasks/main.yml
@@ -108,6 +108,38 @@
delegate_to: "{{ openshift_ca_host }}"
run_once: true
+- name: Test local loopback context
+ command: >
+ {{ hostvars[openshift_ca_host].openshift.common.client_binary }} config view
+ --config={{ openshift_master_loopback_config }}
+ changed_when: false
+ register: loopback_config
+ delegate_to: "{{ openshift_ca_host }}"
+ run_once: true
+
+- name: Generate the loopback master client config
+ command: >
+ {{ hostvars[openshift_ca_host].openshift.common.client_binary }} adm create-api-client-config
+ {% for named_ca_certificate in openshift.master.named_certificates | default([]) | oo_collect('cafile') %}
+ --certificate-authority {{ named_ca_certificate }}
+ {% endfor %}
+ --certificate-authority={{ openshift_ca_cert }}
+ --client-dir={{ openshift_ca_config_dir }}
+ --groups=system:masters,system:openshift-master
+ --master={{ hostvars[openshift_ca_host].openshift.master.loopback_api_url }}
+ --public-master={{ hostvars[openshift_ca_host].openshift.master.loopback_api_url }}
+ --signer-cert={{ openshift_ca_cert }}
+ --signer-key={{ openshift_ca_key }}
+ --signer-serial={{ openshift_ca_serial }}
+ --user=system:openshift-master
+ --basename=openshift-master
+ {% if openshift_version | oo_version_gte_3_5_or_1_5(openshift.common.deployment_type) | bool %}
+ --expire-days={{ openshift_master_cert_expire_days }}
+ {% endif %}
+ when: loopback_context_string not in loopback_config.stdout
+ delegate_to: "{{ openshift_ca_host }}"
+ run_once: true
+
- name: Restore original serviceaccount keys
copy:
src: "{{ item }}.keep"
diff --git a/roles/openshift_ca/vars/main.yml b/roles/openshift_ca/vars/main.yml
index a32e385ec..d04c1766d 100644
--- a/roles/openshift_ca/vars/main.yml
+++ b/roles/openshift_ca/vars/main.yml
@@ -4,3 +4,6 @@ openshift_ca_cert: "{{ openshift_ca_config_dir }}/ca.crt"
openshift_ca_key: "{{ openshift_ca_config_dir }}/ca.key"
openshift_ca_serial: "{{ openshift_ca_config_dir }}/ca.serial.txt"
openshift_version: "{{ openshift_pkg_version | default('') }}"
+
+openshift_master_loopback_config: "{{ openshift_ca_config_dir }}/openshift-master.kubeconfig"
+loopback_context_string: "current-context: {{ openshift.master.loopback_context_name }}"
diff --git a/roles/openshift_default_storage_class/README.md b/roles/openshift_default_storage_class/README.md
new file mode 100644
index 000000000..198163127
--- /dev/null
+++ b/roles/openshift_default_storage_class/README.md
@@ -0,0 +1,39 @@
+openshift_master_storage_class
+=========
+
+A role that deploys configuratons for Openshift StorageClass
+
+Requirements
+------------
+
+None
+
+Role Variables
+--------------
+
+openshift_storageclass_name: Name of the storage class to create
+openshift_storageclass_provisioner: The kubernetes provisioner to use
+openshift_storageclass_type: type of storage to use. This is different among clouds/providers
+
+Dependencies
+------------
+
+
+Example Playbook
+----------------
+
+- role: openshift_default_storage_class
+ openshift_storageclass_name: awsEBS
+ openshift_storageclass_provisioner: kubernetes.io/aws-ebs
+ openshift_storageclass_type: gp2
+
+
+License
+-------
+
+Apache
+
+Author Information
+------------------
+
+Openshift Operations
diff --git a/roles/openshift_default_storage_class/defaults/main.yml b/roles/openshift_default_storage_class/defaults/main.yml
new file mode 100644
index 000000000..66ffd2a73
--- /dev/null
+++ b/roles/openshift_default_storage_class/defaults/main.yml
@@ -0,0 +1,14 @@
+---
+openshift_storageclass_defaults:
+ aws:
+ name: gp2
+ provisioner: kubernetes.io/aws-ebs
+ type: gp2
+ gce:
+ name: standard
+ provisioner: kubernetes.io/gce-pd
+ type: pd-standard
+
+openshift_storageclass_name: "{{ openshift_storageclass_defaults[openshift_cloudprovider_kind]['name'] }}"
+openshift_storageclass_provisioner: "{{ openshift_storageclass_defaults[openshift_cloudprovider_kind]['provisioner'] }}"
+openshift_storageclass_type: "{{ openshift_storageclass_defaults[openshift_cloudprovider_kind]['type'] }}"
diff --git a/roles/openshift_default_storage_class/meta/main.yml b/roles/openshift_default_storage_class/meta/main.yml
new file mode 100644
index 000000000..d7d57fe39
--- /dev/null
+++ b/roles/openshift_default_storage_class/meta/main.yml
@@ -0,0 +1,15 @@
+---
+galaxy_info:
+ author: Openshift Operations
+ description: This role configures the StorageClass in Openshift
+ company: Red Hat
+ license: Apache
+ min_ansible_version: 2.2
+ platforms:
+ - name: EL
+ verisons:
+ - 7
+ categories:
+ - cloud
+dependencies:
+- role: lib_openshift
diff --git a/roles/openshift_default_storage_class/tasks/main.yml b/roles/openshift_default_storage_class/tasks/main.yml
new file mode 100644
index 000000000..408fc17c7
--- /dev/null
+++ b/roles/openshift_default_storage_class/tasks/main.yml
@@ -0,0 +1,19 @@
+---
+# Install default storage classes in GCE & AWS
+- name: Ensure storageclass object
+ oc_obj:
+ kind: storageclass
+ name: "{{ openshift_storageclass_name }}"
+ content:
+ path: /tmp/openshift_storageclass
+ data:
+ kind: StorageClass
+ apiVersion: storage.k8s.io/v1beta1
+ metadata:
+ name: "{{ openshift_storageclass_name }}"
+ annotations:
+ storageclass.beta.kubernetes.io/is-default-class: "true"
+ provisioner: "{{ openshift_storageclass_provisioner }}"
+ parameters:
+ type: "{{ openshift_storageclass_type }}"
+ run_once: true
diff --git a/roles/openshift_default_storage_class/vars/main.yml b/roles/openshift_default_storage_class/vars/main.yml
new file mode 100644
index 000000000..ed97d539c
--- /dev/null
+++ b/roles/openshift_default_storage_class/vars/main.yml
@@ -0,0 +1 @@
+---
diff --git a/roles/openshift_docker_facts/tasks/main.yml b/roles/openshift_docker_facts/tasks/main.yml
index 350512452..95e94171d 100644
--- a/roles/openshift_docker_facts/tasks/main.yml
+++ b/roles/openshift_docker_facts/tasks/main.yml
@@ -17,6 +17,9 @@
hosted_registry_insecure: "{{ openshift_docker_hosted_registry_insecure | default(openshift.docker.hosted_registry_insecure | default(False)) }}"
hosted_registry_network: "{{ openshift_docker_hosted_registry_network | default(None) }}"
use_system_container: "{{ openshift_docker_use_system_container | default(False) }}"
+ - role: node
+ local_facts:
+ sdn_mtu: "{{ openshift_node_sdn_mtu | default(None) }}"
- set_fact:
docker_additional_registries: "{{ openshift.docker.additional_registries
diff --git a/roles/openshift_etcd_facts/vars/main.yml b/roles/openshift_etcd_facts/vars/main.yml
index 82db36eba..b3ecd57a6 100644
--- a/roles/openshift_etcd_facts/vars/main.yml
+++ b/roles/openshift_etcd_facts/vars/main.yml
@@ -5,6 +5,7 @@ etcd_hostname: "{{ openshift.common.hostname }}"
etcd_ip: "{{ openshift.common.ip }}"
etcd_cert_subdir: "etcd-{{ openshift.common.hostname }}"
etcd_cert_prefix:
-etcd_cert_config_dir: "{{ '/etc/etcd' if not openshift.common.is_etcd_system_container | bool else '/var/lib/etcd/etcd.etcd/etc' }}"
+etcd_cert_config_dir: "/etc/etcd"
+etcd_system_container_cert_config_dir: /var/lib/etcd/etcd.etcd/etc
etcd_peer_url_scheme: https
etcd_url_scheme: https
diff --git a/roles/openshift_examples/files/examples/v3.6/image-streams/image-streams-centos7.json b/roles/openshift_examples/files/examples/v3.6/image-streams/image-streams-centos7.json
index a81dbb654..2583018b7 100644
--- a/roles/openshift_examples/files/examples/v3.6/image-streams/image-streams-centos7.json
+++ b/roles/openshift_examples/files/examples/v3.6/image-streams/image-streams-centos7.json
@@ -103,7 +103,7 @@
},
"from": {
"kind": "ImageStreamTag",
- "name": "4"
+ "name": "6"
}
},
{
@@ -137,6 +137,22 @@
"kind": "DockerImage",
"name": "centos/nodejs-4-centos7:latest"
}
+ },
+ {
+ "name": "6",
+ "annotations": {
+ "openshift.io/display-name": "Node.js 6",
+ "description": "Build and run Node.js 6 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-nodejs-container/blob/master/6/README.md.",
+ "iconClass": "icon-nodejs",
+ "tags": "builder,nodejs",
+ "supports":"nodejs:6,nodejs",
+ "version": "6",
+ "sampleRepo": "https://github.com/openshift/nodejs-ex.git"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "centos/nodejs-6-centos7:latest"
+ }
}
]
}
@@ -407,7 +423,7 @@
"iconClass": "icon-wildfly",
"tags": "builder,wildfly,java",
"supports":"jee,java",
- "sampleRepo": "https://github.com/bparees/openshift-jee-sample.git"
+ "sampleRepo": "https://github.com/openshift/openshift-jee-sample.git"
},
"from": {
"kind": "ImageStreamTag",
@@ -423,7 +439,7 @@
"tags": "builder,wildfly,java",
"supports":"wildfly:8.1,jee,java",
"version": "8.1",
- "sampleRepo": "https://github.com/bparees/openshift-jee-sample.git"
+ "sampleRepo": "https://github.com/openshift/openshift-jee-sample.git"
},
"from": {
"kind": "DockerImage",
@@ -439,7 +455,7 @@
"tags": "builder,wildfly,java",
"supports":"wildfly:9.0,jee,java",
"version": "9.0",
- "sampleRepo": "https://github.com/bparees/openshift-jee-sample.git"
+ "sampleRepo": "https://github.com/openshift/openshift-jee-sample.git"
},
"from": {
"kind": "DockerImage",
@@ -455,7 +471,7 @@
"tags": "builder,wildfly,java",
"supports":"wildfly:10.0,jee,java",
"version": "10.0",
- "sampleRepo": "https://github.com/bparees/openshift-jee-sample.git"
+ "sampleRepo": "https://github.com/openshift/openshift-jee-sample.git"
},
"from": {
"kind": "DockerImage",
@@ -471,7 +487,7 @@
"tags": "builder,wildfly,java",
"supports":"wildfly:10.1,jee,java",
"version": "10.1",
- "sampleRepo": "https://github.com/bparees/openshift-jee-sample.git"
+ "sampleRepo": "https://github.com/openshift/openshift-jee-sample.git"
},
"from": {
"kind": "DockerImage",
diff --git a/roles/openshift_examples/files/examples/v3.6/image-streams/image-streams-rhel7.json b/roles/openshift_examples/files/examples/v3.6/image-streams/image-streams-rhel7.json
index 2ed0efe1e..b65f0a5e3 100644
--- a/roles/openshift_examples/files/examples/v3.6/image-streams/image-streams-rhel7.json
+++ b/roles/openshift_examples/files/examples/v3.6/image-streams/image-streams-rhel7.json
@@ -103,7 +103,7 @@
},
"from": {
"kind": "ImageStreamTag",
- "name": "4"
+ "name": "6"
}
},
{
@@ -137,6 +137,22 @@
"kind": "DockerImage",
"name": "registry.access.redhat.com/rhscl/nodejs-4-rhel7:latest"
}
+ },
+ {
+ "name": "6",
+ "annotations": {
+ "openshift.io/display-name": "Node.js 6",
+ "description": "Build and run Node.js 6 applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-nodejs-container.",
+ "iconClass": "icon-nodejs",
+ "tags": "builder,nodejs",
+ "supports":"nodejs:6,nodejs",
+ "version": "6",
+ "sampleRepo": "https://github.com/openshift/nodejs-ex.git"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/rhscl/nodejs-6-rhel7:latest"
+ }
}
]
}
@@ -253,7 +269,7 @@
"tags": "hidden,builder,php",
"supports":"php:5.5,php",
"version": "5.5",
- "sampleRepo": "https://github.com/openshift/cakephp-ex.git"
+ "sampleRepo": "https://github.com/openshift/cakephp-ex.git"
},
"from": {
"kind": "DockerImage",
diff --git a/roles/openshift_excluder/tasks/install.yml b/roles/openshift_excluder/tasks/install.yml
index d09358bee..3a866cedf 100644
--- a/roles/openshift_excluder/tasks/install.yml
+++ b/roles/openshift_excluder/tasks/install.yml
@@ -1,14 +1,24 @@
---
-- name: Install docker excluder
- package:
- name: "{{ r_openshift_excluder_service_type }}-docker-excluder{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) + '*' }}"
- state: "{{ r_openshift_excluder_docker_package_state }}"
- when:
- - r_openshift_excluder_enable_docker_excluder | bool
-
-- name: Install openshift excluder
- package:
- name: "{{ r_openshift_excluder_service_type }}-excluder{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) + '*' }}"
- state: "{{ r_openshift_excluder_package_state }}"
- when:
- - r_openshift_excluder_enable_openshift_excluder | bool
+
+- when:
+ - not openshift.common.is_atomic | bool
+ - r_openshift_excluder_install_ran is not defined
+
+ block:
+
+ - name: Install docker excluder
+ package:
+ name: "{{ r_openshift_excluder_service_type }}-docker-excluder{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) + '*' }}"
+ state: "{{ r_openshift_excluder_docker_package_state }}"
+ when:
+ - r_openshift_excluder_enable_docker_excluder | bool
+
+ - name: Install openshift excluder
+ package:
+ name: "{{ r_openshift_excluder_service_type }}-excluder{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) + '*' }}"
+ state: "{{ r_openshift_excluder_package_state }}"
+ when:
+ - r_openshift_excluder_enable_openshift_excluder | bool
+
+ - set_fact:
+ r_openshift_excluder_install_ran: True
diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py
index 514c06500..cfe092a28 100755
--- a/roles/openshift_facts/library/openshift_facts.py
+++ b/roles/openshift_facts/library/openshift_facts.py
@@ -193,8 +193,7 @@ def hostname_valid(hostname):
"""
if (not hostname or
hostname.startswith('localhost') or
- hostname.endswith('localdomain') or
- hostname.endswith('novalocal')):
+ hostname.endswith('localdomain')):
return False
return True
@@ -1041,10 +1040,13 @@ def set_sdn_facts_if_unset(facts, system_facts):
def set_nodename(facts):
""" set nodename """
if 'node' in facts and 'common' in facts:
- if 'cloudprovider' in facts and facts['cloudprovider']['kind'] == 'openstack':
- facts['node']['nodename'] = facts['provider']['metadata']['hostname'].replace('.novalocal', '')
- elif 'cloudprovider' in facts and facts['cloudprovider']['kind'] == 'gce':
+ if 'cloudprovider' in facts and facts['cloudprovider']['kind'] == 'gce':
facts['node']['nodename'] = facts['provider']['metadata']['instance']['hostname'].split('.')[0]
+
+ # TODO: The openstack cloudprovider nodename setting was too opinionaed.
+ # It needs to be generalized before it can be enabled again.
+ # elif 'cloudprovider' in facts and facts['cloudprovider']['kind'] == 'openstack':
+ # facts['node']['nodename'] = facts['provider']['metadata']['hostname'].replace('.novalocal', '')
else:
facts['node']['nodename'] = facts['common']['hostname'].lower()
return facts
diff --git a/roles/openshift_facts/tasks/main.yml b/roles/openshift_facts/tasks/main.yml
index 1b9bda67e..50ed3e964 100644
--- a/roles/openshift_facts/tasks/main.yml
+++ b/roles/openshift_facts/tasks/main.yml
@@ -24,12 +24,18 @@
msg: |
openshift-ansible requires Python 3 for {{ ansible_distribution }};
For information on enabling Python 3 with Ansible, see https://docs.ansible.com/ansible/python_3_support.html
- when: ansible_distribution == 'Fedora' and ansible_python['version']['major'] != 3
+ when:
+ - ansible_distribution == 'Fedora'
+ - ansible_python['version']['major'] != 3
+ - r_openshift_facts_ran is not defined
- name: Validate python version
fail:
msg: "openshift-ansible requires Python 2 for {{ ansible_distribution }}"
- when: ansible_distribution != 'Fedora' and ansible_python['version']['major'] != 2
+ when:
+ - ansible_distribution != 'Fedora'
+ - ansible_python['version']['major'] != 2
+ - r_openshift_facts_ran is not defined
# Fail as early as possible if Atomic and old version of Docker
- block:
@@ -48,7 +54,9 @@
that:
- l_atomic_docker_version.stdout | replace('"', '') | version_compare('1.12','>=')
- when: l_is_atomic | bool
+ when:
+ - l_is_atomic | bool
+ - r_openshift_facts_ran is not defined
- name: Load variables
include_vars: "{{ item }}"
@@ -59,7 +67,9 @@
- name: Ensure various deps are installed
package: name={{ item }} state=present
with_items: "{{ required_packages }}"
- when: not l_is_atomic | bool
+ when:
+ - not l_is_atomic | bool
+ - r_openshift_facts_ran is not defined
- name: Ensure various deps for running system containers are installed
package: name={{ item }} state=present
@@ -67,6 +77,7 @@
when:
- not l_is_atomic | bool
- l_any_system_container | bool
+ - r_openshift_facts_ran is not defined
- name: Gather Cluster facts and set is_containerized if needed
openshift_facts:
@@ -99,3 +110,7 @@
- name: Set repoquery command
set_fact:
repoquery_cmd: "{{ 'dnf repoquery --latest-limit 1 -d 0' if ansible_pkg_mgr == 'dnf' else 'repoquery --plugins' }}"
+
+- name: Register that this already ran
+ set_fact:
+ r_openshift_facts_ran: True
diff --git a/roles/openshift_health_checker/library/aos_version.py b/roles/openshift_health_checker/library/aos_version.py
index 4460ec324..4c205e48c 100755
--- a/roles/openshift_health_checker/library/aos_version.py
+++ b/roles/openshift_health_checker/library/aos_version.py
@@ -16,8 +16,6 @@ of release availability already. Without duplicating all that, we would
like the user to have a helpful error message if we detect things will
not work out right. Note that if openshift_release is not specified in
the inventory, the version comparison checks just pass.
-
-TODO: fail gracefully on non-yum systems (dnf in Fedora)
'''
from ansible.module_utils.basic import AnsibleModule
@@ -26,7 +24,7 @@ IMPORT_EXCEPTION = None
try:
import yum # pylint: disable=import-error
except ImportError as err:
- IMPORT_EXCEPTION = err # in tox test env, yum import fails
+ IMPORT_EXCEPTION = err
class AosVersionException(Exception):
@@ -37,12 +35,10 @@ class AosVersionException(Exception):
def main():
- '''Entrypoint for this Ansible module'''
+ """Entrypoint for this Ansible module"""
module = AnsibleModule(
argument_spec=dict(
- requested_openshift_release=dict(type="str", default=''),
- openshift_deployment_type=dict(required=True),
- rpm_prefix=dict(required=True), # atomic-openshift, origin, ...?
+ package_list=dict(type="list", required=True),
),
supports_check_mode=True
)
@@ -51,32 +47,37 @@ def main():
module.fail_json(msg="aos_version module could not import yum: %s" % IMPORT_EXCEPTION)
# determine the packages we will look for
- rpm_prefix = module.params['rpm_prefix']
- if not rpm_prefix:
- module.fail_json(msg="rpm_prefix must not be empty")
- expected_pkgs = set([
- rpm_prefix,
- rpm_prefix + '-master',
- rpm_prefix + '-node',
- ])
-
- # determine what level of precision the user specified for the openshift version.
- # should look like a version string with possibly many segments e.g. "3.4.1":
- requested_openshift_release = module.params['requested_openshift_release']
+ package_list = module.params['package_list']
+ if not package_list:
+ module.fail_json(msg="package_list must not be empty")
+
+ # generate set with only the names of expected packages
+ expected_pkg_names = [p["name"] for p in package_list]
+
+ # gather packages that require a multi_minor_release check
+ multi_minor_pkgs = [p for p in package_list if p["check_multi"]]
+
+ # generate list of packages with a specified (non-empty) version
+ # should look like a version string with possibly many segments e.g. "3.4.1"
+ versioned_pkgs = [p for p in package_list if p["version"]]
# get the list of packages available and complain if anything is wrong
try:
- pkgs = _retrieve_available_packages(expected_pkgs)
- if requested_openshift_release:
- _check_precise_version_found(pkgs, expected_pkgs, requested_openshift_release)
- _check_higher_version_found(pkgs, expected_pkgs, requested_openshift_release)
- if module.params['openshift_deployment_type'] in ['openshift-enterprise']:
- _check_multi_minor_release(pkgs, expected_pkgs)
+ pkgs = _retrieve_available_packages(expected_pkg_names)
+ if versioned_pkgs:
+ _check_precise_version_found(pkgs, _to_dict(versioned_pkgs))
+ _check_higher_version_found(pkgs, _to_dict(versioned_pkgs))
+ if multi_minor_pkgs:
+ _check_multi_minor_release(pkgs, _to_dict(multi_minor_pkgs))
except AosVersionException as excinfo:
module.fail_json(msg=str(excinfo))
module.exit_json(changed=False)
+def _to_dict(pkg_list):
+ return {pkg["name"]: pkg for pkg in pkg_list}
+
+
def _retrieve_available_packages(expected_pkgs):
# search for package versions available for openshift pkgs
yb = yum.YumBase() # pylint: disable=invalid-name
@@ -104,56 +105,60 @@ def _retrieve_available_packages(expected_pkgs):
class PreciseVersionNotFound(AosVersionException):
- '''Exception for reporting packages not available at given release'''
- def __init__(self, requested_release, not_found):
- msg = ['Not all of the required packages are available at requested version %s:' % requested_release]
- msg += [' ' + name for name in not_found]
+ """Exception for reporting packages not available at given version"""
+ def __init__(self, not_found):
+ msg = ['Not all of the required packages are available at their requested version']
+ msg += ['{}:{} '.format(pkg["name"], pkg["version"]) for pkg in not_found]
msg += ['Please check your subscriptions and enabled repositories.']
AosVersionException.__init__(self, '\n'.join(msg), not_found)
-def _check_precise_version_found(pkgs, expected_pkgs, requested_openshift_release):
+def _check_precise_version_found(pkgs, expected_pkgs_dict):
# see if any packages couldn't be found at requested release version
# we would like to verify that the latest available pkgs have however specific a version is given.
# so e.g. if there is a package version 3.4.1.5 the check passes; if only 3.4.0, it fails.
- pkgs_precise_version_found = {}
+ pkgs_precise_version_found = set()
for pkg in pkgs:
- if pkg.name not in expected_pkgs:
+ if pkg.name not in expected_pkgs_dict:
continue
# does the version match, to the precision requested?
# and, is it strictly greater, at the precision requested?
- match_version = '.'.join(pkg.version.split('.')[:requested_openshift_release.count('.') + 1])
- if match_version == requested_openshift_release:
- pkgs_precise_version_found[pkg.name] = True
+ expected_pkg_version = expected_pkgs_dict[pkg.name]["version"]
+ match_version = '.'.join(pkg.version.split('.')[:expected_pkg_version.count('.') + 1])
+ if match_version == expected_pkg_version:
+ pkgs_precise_version_found.add(pkg.name)
not_found = []
- for name in expected_pkgs:
+ for name, pkg in expected_pkgs_dict.items():
if name not in pkgs_precise_version_found:
- not_found.append(name)
+ not_found.append(pkg)
if not_found:
- raise PreciseVersionNotFound(requested_openshift_release, not_found)
+ raise PreciseVersionNotFound(not_found)
class FoundHigherVersion(AosVersionException):
- '''Exception for reporting that a higher version than requested is available'''
- def __init__(self, requested_release, higher_found):
+ """Exception for reporting that a higher version than requested is available"""
+ def __init__(self, higher_found):
msg = ['Some required package(s) are available at a version',
- 'that is higher than requested %s:' % requested_release]
+ 'that is higher than requested']
msg += [' ' + name for name in higher_found]
msg += ['This will prevent installing the version you requested.']
msg += ['Please check your enabled repositories or adjust openshift_release.']
AosVersionException.__init__(self, '\n'.join(msg), higher_found)
-def _check_higher_version_found(pkgs, expected_pkgs, requested_openshift_release):
- req_release_arr = [int(segment) for segment in requested_openshift_release.split(".")]
+def _check_higher_version_found(pkgs, expected_pkgs_dict):
+ expected_pkg_names = list(expected_pkgs_dict)
+
# see if any packages are available in a version higher than requested
higher_version_for_pkg = {}
for pkg in pkgs:
- if pkg.name not in expected_pkgs:
+ if pkg.name not in expected_pkg_names:
continue
+ expected_pkg_version = expected_pkgs_dict[pkg.name]["version"]
+ req_release_arr = [int(segment) for segment in expected_pkg_version.split(".")]
version = [int(segment) for segment in pkg.version.split(".")]
too_high = version[:len(req_release_arr)] > req_release_arr
higher_than_seen = version > higher_version_for_pkg.get(pkg.name, [])
@@ -164,11 +169,11 @@ def _check_higher_version_found(pkgs, expected_pkgs, requested_openshift_release
higher_found = []
for name, version in higher_version_for_pkg.items():
higher_found.append(name + '-' + '.'.join(str(segment) for segment in version))
- raise FoundHigherVersion(requested_openshift_release, higher_found)
+ raise FoundHigherVersion(higher_found)
class FoundMultiRelease(AosVersionException):
- '''Exception for reporting multiple minor releases found for same package'''
+ """Exception for reporting multiple minor releases found for same package"""
def __init__(self, multi_found):
msg = ['Multiple minor versions of these packages are available']
msg += [' ' + name for name in multi_found]
@@ -176,18 +181,18 @@ class FoundMultiRelease(AosVersionException):
AosVersionException.__init__(self, '\n'.join(msg), multi_found)
-def _check_multi_minor_release(pkgs, expected_pkgs):
+def _check_multi_minor_release(pkgs, expected_pkgs_dict):
# see if any packages are available in more than one minor version
pkgs_by_name_version = {}
for pkg in pkgs:
# keep track of x.y (minor release) versions seen
minor_release = '.'.join(pkg.version.split('.')[:2])
if pkg.name not in pkgs_by_name_version:
- pkgs_by_name_version[pkg.name] = {}
- pkgs_by_name_version[pkg.name][minor_release] = True
+ pkgs_by_name_version[pkg.name] = set()
+ pkgs_by_name_version[pkg.name].add(minor_release)
multi_found = []
- for name in expected_pkgs:
+ for name in expected_pkgs_dict:
if name in pkgs_by_name_version and len(pkgs_by_name_version[name]) > 1:
multi_found.append(name)
diff --git a/roles/openshift_health_checker/library/ocutil.py b/roles/openshift_health_checker/library/ocutil.py
new file mode 100644
index 000000000..2e60735d6
--- /dev/null
+++ b/roles/openshift_health_checker/library/ocutil.py
@@ -0,0 +1,74 @@
+#!/usr/bin/python
+"""Interface to OpenShift oc command"""
+
+import os
+import shlex
+import shutil
+import subprocess
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+ADDITIONAL_PATH_LOOKUPS = ['/usr/local/bin', os.path.expanduser('~/bin')]
+
+
+def locate_oc_binary():
+ """Find and return oc binary file"""
+ # https://github.com/openshift/openshift-ansible/issues/3410
+ # oc can be in /usr/local/bin in some cases, but that may not
+ # be in $PATH due to ansible/sudo
+ paths = os.environ.get("PATH", os.defpath).split(os.pathsep) + ADDITIONAL_PATH_LOOKUPS
+
+ oc_binary = 'oc'
+
+ # Use shutil.which if it is available, otherwise fallback to a naive path search
+ try:
+ which_result = shutil.which(oc_binary, path=os.pathsep.join(paths))
+ if which_result is not None:
+ oc_binary = which_result
+ except AttributeError:
+ for path in paths:
+ if os.path.exists(os.path.join(path, oc_binary)):
+ oc_binary = os.path.join(path, oc_binary)
+ break
+
+ return oc_binary
+
+
+def main():
+ """Module that executes commands on a remote OpenShift cluster"""
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ namespace=dict(type="str", required=True),
+ config_file=dict(type="str", required=True),
+ cmd=dict(type="str", required=True),
+ extra_args=dict(type="list", default=[]),
+ ),
+ )
+
+ cmd = [
+ locate_oc_binary(),
+ '--config', module.params["config_file"],
+ '-n', module.params["namespace"],
+ ] + shlex.split(module.params["cmd"])
+
+ failed = True
+ try:
+ cmd_result = subprocess.check_output(list(cmd), stderr=subprocess.STDOUT)
+ failed = False
+ except subprocess.CalledProcessError as exc:
+ cmd_result = '[rc {}] {}\n{}'.format(exc.returncode, ' '.join(exc.cmd), exc.output)
+ except OSError as exc:
+ # we get this when 'oc' is not there
+ cmd_result = str(exc)
+
+ module.exit_json(
+ changed=False,
+ failed=failed,
+ result=cmd_result,
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/roles/openshift_health_checker/library/rpm_version.py b/roles/openshift_health_checker/library/rpm_version.py
new file mode 100644
index 000000000..8ea223055
--- /dev/null
+++ b/roles/openshift_health_checker/library/rpm_version.py
@@ -0,0 +1,127 @@
+#!/usr/bin/python
+"""
+Ansible module for rpm-based systems determining existing package version information in a host.
+"""
+
+from ansible.module_utils.basic import AnsibleModule
+
+IMPORT_EXCEPTION = None
+try:
+ import rpm # pylint: disable=import-error
+except ImportError as err:
+ IMPORT_EXCEPTION = err # in tox test env, rpm import fails
+
+
+class RpmVersionException(Exception):
+ """Base exception class for package version problems"""
+ def __init__(self, message, problem_pkgs=None):
+ Exception.__init__(self, message)
+ self.problem_pkgs = problem_pkgs
+
+
+def main():
+ """Entrypoint for this Ansible module"""
+ module = AnsibleModule(
+ argument_spec=dict(
+ package_list=dict(type="list", required=True),
+ ),
+ supports_check_mode=True
+ )
+
+ if IMPORT_EXCEPTION:
+ module.fail_json(msg="rpm_version module could not import rpm: %s" % IMPORT_EXCEPTION)
+
+ # determine the packages we will look for
+ pkg_list = module.params['package_list']
+ if not pkg_list:
+ module.fail_json(msg="package_list must not be empty")
+
+ # get list of packages available and complain if any
+ # of them are missing or if any errors occur
+ try:
+ pkg_versions = _retrieve_expected_pkg_versions(_to_dict(pkg_list))
+ _check_pkg_versions(pkg_versions, _to_dict(pkg_list))
+ except RpmVersionException as excinfo:
+ module.fail_json(msg=str(excinfo))
+ module.exit_json(changed=False)
+
+
+def _to_dict(pkg_list):
+ return {pkg["name"]: pkg for pkg in pkg_list}
+
+
+def _retrieve_expected_pkg_versions(expected_pkgs_dict):
+ """Search for installed packages matching given pkg names
+ and versions. Returns a dictionary: {pkg_name: [versions]}"""
+
+ transaction = rpm.TransactionSet()
+ pkgs = {}
+
+ for pkg_name in expected_pkgs_dict:
+ matched_pkgs = transaction.dbMatch("name", pkg_name)
+ if not matched_pkgs:
+ continue
+
+ for header in matched_pkgs:
+ if header['name'] == pkg_name:
+ if pkg_name not in pkgs:
+ pkgs[pkg_name] = []
+
+ pkgs[pkg_name].append(header['version'])
+
+ return pkgs
+
+
+def _check_pkg_versions(found_pkgs_dict, expected_pkgs_dict):
+ invalid_pkg_versions = {}
+ not_found_pkgs = []
+
+ for pkg_name, pkg in expected_pkgs_dict.items():
+ if not found_pkgs_dict.get(pkg_name):
+ not_found_pkgs.append(pkg_name)
+ continue
+
+ found_versions = [_parse_version(version) for version in found_pkgs_dict[pkg_name]]
+ expected_version = _parse_version(pkg["version"])
+ if expected_version not in found_versions:
+ invalid_pkg_versions[pkg_name] = {
+ "found_versions": found_versions,
+ "required_version": expected_version,
+ }
+
+ if not_found_pkgs:
+ raise RpmVersionException(
+ '\n'.join([
+ "The following packages were not found to be installed: {}".format('\n '.join([
+ "{}".format(pkg)
+ for pkg in not_found_pkgs
+ ]))
+ ]),
+ not_found_pkgs,
+ )
+
+ if invalid_pkg_versions:
+ raise RpmVersionException(
+ '\n '.join([
+ "The following packages were found to be installed with an incorrect version: {}".format('\n'.join([
+ " \n{}\n Required version: {}\n Found versions: {}".format(
+ pkg_name,
+ pkg["required_version"],
+ ', '.join([version for version in pkg["found_versions"]]))
+ for pkg_name, pkg in invalid_pkg_versions.items()
+ ]))
+ ]),
+ invalid_pkg_versions,
+ )
+
+
+def _parse_version(version_str):
+ segs = version_str.split('.')
+ if not segs or len(segs) <= 2:
+ return version_str
+
+ return '.'.join(segs[0:2])
+
+
+if __name__ == '__main__':
+ main()
diff --git a/roles/openshift_health_checker/openshift_checks/__init__.py b/roles/openshift_health_checker/openshift_checks/__init__.py
index be63d864a..5c9949ced 100644
--- a/roles/openshift_health_checker/openshift_checks/__init__.py
+++ b/roles/openshift_health_checker/openshift_checks/__init__.py
@@ -66,16 +66,26 @@ class OpenShiftCheck(object):
LOADER_EXCLUDES = (
"__init__.py",
"mixins.py",
+ "logging.py",
)
-def load_checks():
+def load_checks(path=None, subpkg=""):
"""Dynamically import all check modules for the side effect of registering checks."""
- return [
- import_module(__package__ + "." + name[:-3])
- for name in os.listdir(os.path.dirname(__file__))
- if name.endswith(".py") and name not in LOADER_EXCLUDES
- ]
+ if path is None:
+ path = os.path.dirname(__file__)
+
+ modules = []
+
+ for name in os.listdir(path):
+ if os.path.isdir(os.path.join(path, name)):
+ modules = modules + load_checks(os.path.join(path, name), subpkg + "." + name)
+ continue
+
+ if name.endswith(".py") and name not in LOADER_EXCLUDES:
+ modules.append(import_module(__package__ + subpkg + "." + name[:-3]))
+
+ return modules
def get_var(task_vars, *keys, **kwargs):
diff --git a/roles/openshift_health_checker/openshift_checks/docker_image_availability.py b/roles/openshift_health_checker/openshift_checks/docker_image_availability.py
index 4588ed634..60aacf715 100644
--- a/roles/openshift_health_checker/openshift_checks/docker_image_availability.py
+++ b/roles/openshift_health_checker/openshift_checks/docker_image_availability.py
@@ -1,8 +1,25 @@
-# pylint: disable=missing-docstring
+"""Check that required Docker images are available."""
+
from openshift_checks import OpenShiftCheck, get_var
+from openshift_checks.mixins import DockerHostMixin
+
+NODE_IMAGE_SUFFIXES = ["haproxy-router", "docker-registry", "deployer", "pod"]
+DEPLOYMENT_IMAGE_INFO = {
+ "origin": {
+ "namespace": "openshift",
+ "name": "origin",
+ "registry_console_image": "cockpit/kubernetes",
+ },
+ "openshift-enterprise": {
+ "namespace": "openshift3",
+ "name": "ose",
+ "registry_console_image": "registry.access.redhat.com/openshift3/registry-console",
+ },
+}
-class DockerImageAvailability(OpenShiftCheck):
+
+class DockerImageAvailability(DockerHostMixin, OpenShiftCheck):
"""Check that required Docker images are available.
This check attempts to ensure that required docker images are
@@ -12,43 +29,23 @@ class DockerImageAvailability(OpenShiftCheck):
name = "docker_image_availability"
tags = ["preflight"]
-
dependencies = ["skopeo", "python-docker-py"]
- deployment_image_info = {
- "origin": {
- "namespace": "openshift",
- "name": "origin",
- },
- "openshift-enterprise": {
- "namespace": "openshift3",
- "name": "ose",
- },
- }
-
@classmethod
def is_active(cls, task_vars):
"""Skip hosts with unsupported deployment types."""
deployment_type = get_var(task_vars, "openshift_deployment_type")
- has_valid_deployment_type = deployment_type in cls.deployment_image_info
+ has_valid_deployment_type = deployment_type in DEPLOYMENT_IMAGE_INFO
return super(DockerImageAvailability, cls).is_active(task_vars) and has_valid_deployment_type
def run(self, tmp, task_vars):
msg, failed, changed = self.ensure_dependencies(task_vars)
-
- # exit early if Skopeo update fails
if failed:
- if "No package matching" in msg:
- msg = "Ensure that all required dependencies can be installed via `yum`.\n"
return {
"failed": True,
"changed": changed,
- "msg": (
- "Unable to update or install required dependency packages on this host;\n"
- "These are required in order to check Docker image availability:"
- "\n {deps}\n{msg}"
- ).format(deps=',\n '.join(self.dependencies), msg=msg),
+ "msg": "Some dependencies are required in order to check Docker image availability.\n" + msg
}
required_images = self.required_images(task_vars)
@@ -77,51 +74,55 @@ class DockerImageAvailability(OpenShiftCheck):
return {"changed": changed}
- def required_images(self, task_vars):
- deployment_type = get_var(task_vars, "openshift_deployment_type")
- image_info = self.deployment_image_info[deployment_type]
-
- openshift_release = get_var(task_vars, "openshift_release", default="latest")
- openshift_image_tag = get_var(task_vars, "openshift_image_tag")
- is_containerized = get_var(task_vars, "openshift", "common", "is_containerized")
-
- images = set(self.required_docker_images(
- image_info["namespace"],
- image_info["name"],
- ["registry-console"] if "enterprise" in deployment_type else [], # include enterprise-only image names
- openshift_release,
- is_containerized,
- ))
-
- # append images with qualified image tags to our list of required images.
- # these are images with a (v0.0.0.0) tag, rather than a standard release
- # format tag (v0.0). We want to check this set in both containerized and
- # non-containerized installations.
- images.update(
- self.required_qualified_docker_images(
- image_info["namespace"],
- image_info["name"],
- openshift_image_tag,
- ),
- )
-
- return images
-
- @staticmethod
- def required_docker_images(namespace, name, additional_image_names, version, is_containerized):
- if is_containerized:
- return ["{}/{}:{}".format(namespace, name, version)] if name else []
-
- # include additional non-containerized images specific to the current deployment type
- return ["{}/{}:{}".format(namespace, img_name, version) for img_name in additional_image_names]
-
@staticmethod
- def required_qualified_docker_images(namespace, name, version):
- # pylint: disable=invalid-name
- return [
- "{}/{}-{}:{}".format(namespace, name, suffix, version)
- for suffix in ["haproxy-router", "docker-registry", "deployer", "pod"]
- ]
+ def required_images(task_vars):
+ """
+ Determine which images we expect to need for this host.
+ Returns: a set of required images like 'openshift/origin:v3.6'
+
+ The thorny issue of determining the image names from the variables is under consideration
+ via https://github.com/openshift/openshift-ansible/issues/4415
+
+ For now we operate as follows:
+ * For containerized components (master, node, ...) we look at the deployment type and
+ use openshift/origin or openshift3/ose as the base for those component images. The
+ version is openshift_image_tag as determined by the openshift_version role.
+ * For OpenShift-managed infrastructure (router, registry...) we use oreg_url if
+ it is defined; otherwise we again use the base that depends on the deployment type.
+ Registry is not included in constructed images. It may be in oreg_url or etcd image.
+ """
+ required = set()
+ deployment_type = get_var(task_vars, "openshift_deployment_type")
+ host_groups = get_var(task_vars, "group_names")
+ image_tag = get_var(task_vars, "openshift_image_tag")
+ image_info = DEPLOYMENT_IMAGE_INFO[deployment_type]
+ if not image_info:
+ return required
+
+ # template for images that run on top of OpenShift
+ image_url = "{}/{}-{}:{}".format(image_info["namespace"], image_info["name"], "${component}", "${version}")
+ image_url = get_var(task_vars, "oreg_url", default="") or image_url
+ if 'nodes' in host_groups:
+ for suffix in NODE_IMAGE_SUFFIXES:
+ required.add(image_url.replace("${component}", suffix).replace("${version}", image_tag))
+ # The registry-console is for some reason not prefixed with ose- like the other components.
+ # Nor is it versioned the same, so just look for latest.
+ # Also a completely different name is used for Origin.
+ required.add(image_info["registry_console_image"])
+
+ # images for containerized components
+ if get_var(task_vars, "openshift", "common", "is_containerized"):
+ components = set()
+ if 'nodes' in host_groups:
+ components.update(["node", "openvswitch"])
+ if 'masters' in host_groups: # name is "origin" or "ose"
+ components.add(image_info["name"])
+ for component in components:
+ required.add("{}/{}:{}".format(image_info["namespace"], component, image_tag))
+ if 'etcd' in host_groups: # special case, note it is the same for origin/enterprise
+ required.add("registry.access.redhat.com/rhel7/etcd") # and no image tag
+
+ return required
def local_images(self, images, task_vars):
"""Filter a list of images and return those available locally."""
@@ -131,7 +132,8 @@ class DockerImageAvailability(OpenShiftCheck):
]
def is_image_local(self, image, task_vars):
- result = self.module_executor("docker_image_facts", {"name": image}, task_vars)
+ """Check if image is already in local docker index."""
+ result = self.execute_module("docker_image_facts", {"name": image}, task_vars=task_vars)
if result.get("failed", False):
return False
@@ -139,6 +141,7 @@ class DockerImageAvailability(OpenShiftCheck):
@staticmethod
def known_docker_registries(task_vars):
+ """Build a list of docker registries available according to inventory vars."""
docker_facts = get_var(task_vars, "openshift", "docker")
regs = set(docker_facts["additional_registries"])
@@ -154,26 +157,21 @@ class DockerImageAvailability(OpenShiftCheck):
"""Inspect existing images using Skopeo and return all images successfully inspected."""
return [
image for image in images
- if any(self.is_available_skopeo_image(image, registry, task_vars) for registry in registries)
+ if self.is_available_skopeo_image(image, registries, task_vars)
]
- def is_available_skopeo_image(self, image, registry, task_vars):
- """Uses Skopeo to determine if required image exists in a given registry."""
-
- cmd_str = "skopeo inspect docker://{registry}/{image}".format(
- registry=registry,
- image=image,
- )
+ def is_available_skopeo_image(self, image, registries, task_vars):
+ """Use Skopeo to determine if required image exists in known registry(s)."""
- args = {"_raw_params": cmd_str}
- result = self.module_executor("command", args, task_vars)
- return not result.get("failed", False) and result.get("rc", 0) == 0
+ # if image does already includes a registry, just use that
+ if image.count("/") > 1:
+ registry, image = image.split("/", 1)
+ registries = [registry]
- # ensures that the skopeo and python-docker-py packages exist
- # check is skipped on atomic installations
- def ensure_dependencies(self, task_vars):
- if get_var(task_vars, "openshift", "common", "is_atomic"):
- return "", False, False
+ for registry in registries:
+ args = {"_raw_params": "skopeo inspect docker://{}/{}".format(registry, image)}
+ result = self.execute_module("command", args, task_vars=task_vars)
+ if result.get("rc", 0) == 0 and not result.get("failed"):
+ return True
- result = self.module_executor("yum", {"name": self.dependencies, "state": "latest"}, task_vars)
- return result.get("msg", ""), result.get("failed", False) or result.get("rc", 0) != 0, result.get("changed")
+ return False
diff --git a/roles/openshift_health_checker/openshift_checks/docker_storage.py b/roles/openshift_health_checker/openshift_checks/docker_storage.py
new file mode 100644
index 000000000..2bd615457
--- /dev/null
+++ b/roles/openshift_health_checker/openshift_checks/docker_storage.py
@@ -0,0 +1,185 @@
+"""Check Docker storage driver and usage."""
+import json
+import re
+from openshift_checks import OpenShiftCheck, OpenShiftCheckException, get_var
+from openshift_checks.mixins import DockerHostMixin
+
+
+class DockerStorage(DockerHostMixin, OpenShiftCheck):
+ """Check Docker storage driver compatibility.
+
+ This check ensures that Docker is using a supported storage driver,
+ and that loopback is not being used (if using devicemapper).
+ Also that storage usage is not above threshold.
+ """
+
+ name = "docker_storage"
+ tags = ["pre-install", "health", "preflight"]
+
+ dependencies = ["python-docker-py"]
+ storage_drivers = ["devicemapper", "overlay2"]
+ max_thinpool_data_usage_percent = 90.0
+ max_thinpool_meta_usage_percent = 90.0
+
+ # pylint: disable=too-many-return-statements
+ # Reason: permanent stylistic exception;
+ # it is clearer to return on failures and there are just many ways to fail here.
+ def run(self, tmp, task_vars):
+ msg, failed, changed = self.ensure_dependencies(task_vars)
+ if failed:
+ return {
+ "failed": True,
+ "changed": changed,
+ "msg": "Some dependencies are required in order to query docker storage on host:\n" + msg
+ }
+
+ # attempt to get the docker info hash from the API
+ info = self.execute_module("docker_info", {}, task_vars=task_vars)
+ if info.get("failed"):
+ return {"failed": True, "changed": changed,
+ "msg": "Failed to query Docker API. Is docker running on this host?"}
+ if not info.get("info"): # this would be very strange
+ return {"failed": True, "changed": changed,
+ "msg": "Docker API query missing info:\n{}".format(json.dumps(info))}
+ info = info["info"]
+
+ # check if the storage driver we saw is valid
+ driver = info.get("Driver", "[NONE]")
+ if driver not in self.storage_drivers:
+ msg = (
+ "Detected unsupported Docker storage driver '{driver}'.\n"
+ "Supported storage drivers are: {drivers}"
+ ).format(driver=driver, drivers=', '.join(self.storage_drivers))
+ return {"failed": True, "changed": changed, "msg": msg}
+
+ # driver status info is a list of tuples; convert to dict and validate based on driver
+ driver_status = {item[0]: item[1] for item in info.get("DriverStatus", [])}
+ if driver == "devicemapper":
+ if driver_status.get("Data loop file"):
+ msg = (
+ "Use of loopback devices with the Docker devicemapper storage driver\n"
+ "(the default storage configuration) is unsupported in production.\n"
+ "Please use docker-storage-setup to configure a backing storage volume.\n"
+ "See http://red.ht/2rNperO for further information."
+ )
+ return {"failed": True, "changed": changed, "msg": msg}
+ result = self._check_dm_usage(driver_status, task_vars)
+ result['changed'] = result.get('changed', False) or changed
+ return result
+
+ # TODO(lmeyer): determine how to check usage for overlay2
+
+ return {"changed": changed}
+
+ def _check_dm_usage(self, driver_status, task_vars):
+ """
+ Backing assumptions: We expect devicemapper to be backed by an auto-expanding thin pool
+ implemented as an LV in an LVM2 VG. This is how docker-storage-setup currently configures
+ devicemapper storage. The LV is "thin" because it does not use all available storage
+ from its VG, instead expanding as needed; so to determine available space, we gather
+ current usage as the Docker API reports for the driver as well as space available for
+ expansion in the pool's VG.
+ Usage within the LV is divided into pools allocated to data and metadata, either of which
+ could run out of space first; so we check both.
+ """
+ vals = dict(
+ vg_free=self._get_vg_free(driver_status.get("Pool Name"), task_vars),
+ data_used=driver_status.get("Data Space Used"),
+ data_total=driver_status.get("Data Space Total"),
+ metadata_used=driver_status.get("Metadata Space Used"),
+ metadata_total=driver_status.get("Metadata Space Total"),
+ )
+
+ # convert all human-readable strings to bytes
+ for key, value in vals.copy().items():
+ try:
+ vals[key + "_bytes"] = self._convert_to_bytes(value)
+ except ValueError as err: # unlikely to hit this from API info, but just to be safe
+ return {
+ "failed": True,
+ "values": vals,
+ "msg": "Could not interpret {} value '{}' as bytes: {}".format(key, value, str(err))
+ }
+
+ # determine the threshold percentages which usage should not exceed
+ for name, default in [("data", self.max_thinpool_data_usage_percent),
+ ("metadata", self.max_thinpool_meta_usage_percent)]:
+ percent = get_var(task_vars, "max_thinpool_" + name + "_usage_percent", default=default)
+ try:
+ vals[name + "_threshold"] = float(percent)
+ except ValueError:
+ return {
+ "failed": True,
+ "msg": "Specified thinpool {} usage limit '{}' is not a percentage".format(name, percent)
+ }
+
+ # test whether the thresholds are exceeded
+ messages = []
+ for name in ["data", "metadata"]:
+ vals[name + "_pct_used"] = 100 * vals[name + "_used_bytes"] / (
+ vals[name + "_total_bytes"] + vals["vg_free_bytes"])
+ if vals[name + "_pct_used"] > vals[name + "_threshold"]:
+ messages.append(
+ "Docker thinpool {name} usage percentage {pct:.1f} "
+ "is higher than threshold {thresh:.1f}.".format(
+ name=name,
+ pct=vals[name + "_pct_used"],
+ thresh=vals[name + "_threshold"],
+ ))
+ vals["failed"] = True
+
+ vals["msg"] = "\n".join(messages or ["Thinpool usage is within thresholds."])
+ return vals
+
+ def _get_vg_free(self, pool, task_vars):
+ # Determine which VG to examine according to the pool name, the only indicator currently
+ # available from the Docker API driver info. We assume a name that looks like
+ # "vg--name-docker--pool"; vg and lv names with inner hyphens doubled, joined by a hyphen.
+ match = re.match(r'((?:[^-]|--)+)-(?!-)', pool) # matches up to the first single hyphen
+ if not match: # unlikely, but... be clear if we assumed wrong
+ raise OpenShiftCheckException(
+ "This host's Docker reports it is using a storage pool named '{}'.\n"
+ "However this name does not have the expected format of 'vgname-lvname'\n"
+ "so the available storage in the VG cannot be determined.".format(pool)
+ )
+ vg_name = match.groups()[0].replace("--", "-")
+ vgs_cmd = "/sbin/vgs --noheadings -o vg_free --select vg_name=" + vg_name
+ # should return free space like " 12.00g" if the VG exists; empty if it does not
+
+ ret = self.execute_module("command", {"_raw_params": vgs_cmd}, task_vars=task_vars)
+ if ret.get("failed") or ret.get("rc", 0) != 0:
+ raise OpenShiftCheckException(
+ "Is LVM installed? Failed to run /sbin/vgs "
+ "to determine docker storage usage:\n" + ret.get("msg", "")
+ )
+ size = ret.get("stdout", "").strip()
+ if not size:
+ raise OpenShiftCheckException(
+ "This host's Docker reports it is using a storage pool named '{pool}'.\n"
+ "which we expect to come from local VG '{vg}'.\n"
+ "However, /sbin/vgs did not find this VG. Is Docker for this host"
+ "running and using the storage on the host?".format(pool=pool, vg=vg_name)
+ )
+ return size
+
+ @staticmethod
+ def _convert_to_bytes(string):
+ units = dict(
+ b=1,
+ k=1024,
+ m=1024**2,
+ g=1024**3,
+ t=1024**4,
+ p=1024**5,
+ )
+ string = string or ""
+ match = re.match(r'(\d+(?:\.\d+)?)\s*(\w)?', string) # float followed by optional unit
+ if not match:
+ raise ValueError("Cannot convert to a byte size: " + string)
+
+ number, unit = match.groups()
+ multiplier = 1 if not unit else units.get(unit.lower())
+ if not multiplier:
+ raise ValueError("Cannot convert to a byte size: " + string)
+
+ return float(number) * multiplier
diff --git a/roles/openshift_health_checker/openshift_checks/logging/__init__.py b/roles/openshift_health_checker/openshift_checks/logging/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/roles/openshift_health_checker/openshift_checks/logging/__init__.py
diff --git a/roles/openshift_health_checker/openshift_checks/logging/curator.py b/roles/openshift_health_checker/openshift_checks/logging/curator.py
new file mode 100644
index 000000000..c9fc59896
--- /dev/null
+++ b/roles/openshift_health_checker/openshift_checks/logging/curator.py
@@ -0,0 +1,61 @@
+"""
+Module for performing checks on an Curator logging deployment
+"""
+
+from openshift_checks import get_var
+from openshift_checks.logging.logging import LoggingCheck
+
+
+class Curator(LoggingCheck):
+ """Module that checks an integrated logging Curator deployment"""
+
+ name = "curator"
+ tags = ["health", "logging"]
+
+ logging_namespace = None
+
+ def run(self, tmp, task_vars):
+ """Check various things and gather errors. Returns: result as hash"""
+
+ self.logging_namespace = get_var(task_vars, "openshift_logging_namespace", default="logging")
+ curator_pods, error = super(Curator, self).get_pods_for_component(
+ self.module_executor,
+ self.logging_namespace,
+ "curator",
+ task_vars
+ )
+ if error:
+ return {"failed": True, "changed": False, "msg": error}
+ check_error = self.check_curator(curator_pods)
+
+ if check_error:
+ msg = ("The following Curator deployment issue was found:"
+ "\n-------\n"
+ "{}".format(check_error))
+ return {"failed": True, "changed": False, "msg": msg}
+
+ # TODO(lmeyer): run it all again for the ops cluster
+ return {"failed": False, "changed": False, "msg": 'No problems found with Curator deployment.'}
+
+ def check_curator(self, pods):
+ """Check to see if curator is up and working. Returns: error string"""
+ if not pods:
+ return (
+ "There are no Curator pods for the logging stack,\n"
+ "so nothing will prune Elasticsearch indexes.\n"
+ "Is Curator correctly deployed?"
+ )
+
+ not_running = super(Curator, self).not_running_pods(pods)
+ if len(not_running) == len(pods):
+ return (
+ "The Curator pod is not currently in a running state,\n"
+ "so Elasticsearch indexes may increase without bound."
+ )
+ if len(pods) - len(not_running) > 1:
+ return (
+ "There is more than one Curator pod running. This should not normally happen.\n"
+ "Although this doesn't cause any problems, you may want to investigate."
+ )
+
+ return None
diff --git a/roles/openshift_health_checker/openshift_checks/logging/elasticsearch.py b/roles/openshift_health_checker/openshift_checks/logging/elasticsearch.py
new file mode 100644
index 000000000..01cb35b81
--- /dev/null
+++ b/roles/openshift_health_checker/openshift_checks/logging/elasticsearch.py
@@ -0,0 +1,217 @@
+"""
+Module for performing checks on an Elasticsearch logging deployment
+"""
+
+import json
+import re
+
+from openshift_checks import get_var
+from openshift_checks.logging.logging import LoggingCheck
+
+
+class Elasticsearch(LoggingCheck):
+ """Module that checks an integrated logging Elasticsearch deployment"""
+
+ name = "elasticsearch"
+ tags = ["health", "logging"]
+
+ logging_namespace = None
+
+ def run(self, tmp, task_vars):
+ """Check various things and gather errors. Returns: result as hash"""
+
+ self.logging_namespace = get_var(task_vars, "openshift_logging_namespace", default="logging")
+ es_pods, error = super(Elasticsearch, self).get_pods_for_component(
+ self.execute_module,
+ self.logging_namespace,
+ "es",
+ task_vars,
+ )
+ if error:
+ return {"failed": True, "changed": False, "msg": error}
+ check_error = self.check_elasticsearch(es_pods, task_vars)
+
+ if check_error:
+ msg = ("The following Elasticsearch deployment issue was found:"
+ "\n-------\n"
+ "{}".format(check_error))
+ return {"failed": True, "changed": False, "msg": msg}
+
+ # TODO(lmeyer): run it all again for the ops cluster
+ return {"failed": False, "changed": False, "msg": 'No problems found with Elasticsearch deployment.'}
+
+ def _not_running_elasticsearch_pods(self, es_pods):
+ """Returns: list of running pods, list of errors about non-running pods"""
+ not_running = super(Elasticsearch, self).not_running_pods(es_pods)
+ if not_running:
+ return not_running, [(
+ 'The following Elasticsearch pods are not running:\n'
+ '{pods}'
+ 'These pods will not aggregate logs from their nodes.'
+ ).format(pods=''.join(
+ " {} ({})\n".format(pod['metadata']['name'], pod['spec'].get('host', 'None'))
+ for pod in not_running
+ ))]
+ return not_running, []
+
+ def check_elasticsearch(self, es_pods, task_vars):
+ """Various checks for elasticsearch. Returns: error string"""
+ not_running_pods, error_msgs = self._not_running_elasticsearch_pods(es_pods)
+ running_pods = [pod for pod in es_pods if pod not in not_running_pods]
+ pods_by_name = {
+ pod['metadata']['name']: pod for pod in running_pods
+ # Filter out pods that are not members of a DC
+ if pod['metadata'].get('labels', {}).get('deploymentconfig')
+ }
+ if not pods_by_name:
+ return 'No logging Elasticsearch pods were found. Is logging deployed?'
+ error_msgs += self._check_elasticsearch_masters(pods_by_name, task_vars)
+ error_msgs += self._check_elasticsearch_node_list(pods_by_name, task_vars)
+ error_msgs += self._check_es_cluster_health(pods_by_name, task_vars)
+ error_msgs += self._check_elasticsearch_diskspace(pods_by_name, task_vars)
+ return '\n'.join(error_msgs)
+
+ @staticmethod
+ def _build_es_curl_cmd(pod_name, url):
+ base = "exec {name} -- curl -s --cert {base}cert --key {base}key --cacert {base}ca -XGET '{url}'"
+ return base.format(base="/etc/elasticsearch/secret/admin-", name=pod_name, url=url)
+
+ def _check_elasticsearch_masters(self, pods_by_name, task_vars):
+ """Check that Elasticsearch masters are sane. Returns: list of error strings"""
+ es_master_names = set()
+ error_msgs = []
+ for pod_name in pods_by_name.keys():
+ # Compare what each ES node reports as master and compare for split brain
+ get_master_cmd = self._build_es_curl_cmd(pod_name, "https://localhost:9200/_cat/master")
+ master_name_str = self._exec_oc(get_master_cmd, [], task_vars)
+ master_names = (master_name_str or '').split(' ')
+ if len(master_names) > 1:
+ es_master_names.add(master_names[1])
+ else:
+ error_msgs.append(
+ 'No master? Elasticsearch {pod} returned bad string when asked master name:\n'
+ ' {response}'.format(pod=pod_name, response=master_name_str)
+ )
+
+ if not es_master_names:
+ error_msgs.append('No logging Elasticsearch masters were found. Is logging deployed?')
+ return '\n'.join(error_msgs)
+
+ if len(es_master_names) > 1:
+ error_msgs.append(
+ 'Found multiple Elasticsearch masters according to the pods:\n'
+ '{master_list}\n'
+ 'This implies that the masters have "split brain" and are not correctly\n'
+ 'replicating data for the logging cluster. Log loss is likely to occur.'
+ .format(master_list='\n'.join(' ' + master for master in es_master_names))
+ )
+
+ return error_msgs
+
+ def _check_elasticsearch_node_list(self, pods_by_name, task_vars):
+ """Check that reported ES masters are accounted for by pods. Returns: list of error strings"""
+
+ if not pods_by_name:
+ return ['No logging Elasticsearch masters were found. Is logging deployed?']
+
+ # get ES cluster nodes
+ node_cmd = self._build_es_curl_cmd(list(pods_by_name.keys())[0], 'https://localhost:9200/_nodes')
+ cluster_node_data = self._exec_oc(node_cmd, [], task_vars)
+ try:
+ cluster_nodes = json.loads(cluster_node_data)['nodes']
+ except (ValueError, KeyError):
+ return [
+ 'Failed to query Elasticsearch for the list of ES nodes. The output was:\n' +
+ cluster_node_data
+ ]
+
+ # Try to match all ES-reported node hosts to known pods.
+ error_msgs = []
+ for node in cluster_nodes.values():
+ # Note that with 1.4/3.4 the pod IP may be used as the master name
+ if not any(node['host'] in (pod_name, pod['status'].get('podIP'))
+ for pod_name, pod in pods_by_name.items()):
+ error_msgs.append(
+ 'The Elasticsearch cluster reports a member node "{node}"\n'
+ 'that does not correspond to any known ES pod.'.format(node=node['host'])
+ )
+
+ return error_msgs
+
+ def _check_es_cluster_health(self, pods_by_name, task_vars):
+ """Exec into the elasticsearch pods and check the cluster health. Returns: list of errors"""
+ error_msgs = []
+ for pod_name in pods_by_name.keys():
+ cluster_health_cmd = self._build_es_curl_cmd(pod_name, 'https://localhost:9200/_cluster/health?pretty=true')
+ cluster_health_data = self._exec_oc(cluster_health_cmd, [], task_vars)
+ try:
+ health_res = json.loads(cluster_health_data)
+ if not health_res or not health_res.get('status'):
+ raise ValueError()
+ except ValueError:
+ error_msgs.append(
+ 'Could not retrieve cluster health status from logging ES pod "{pod}".\n'
+ 'Response was:\n{output}'.format(pod=pod_name, output=cluster_health_data)
+ )
+ continue
+
+ if health_res['status'] not in ['green', 'yellow']:
+ error_msgs.append(
+ 'Elasticsearch cluster health status is RED according to pod "{}"'.format(pod_name)
+ )
+
+ return error_msgs
+
+ def _check_elasticsearch_diskspace(self, pods_by_name, task_vars):
+ """
+ Exec into an ES pod and query the diskspace on the persistent volume.
+ Returns: list of errors
+ """
+ error_msgs = []
+ for pod_name in pods_by_name.keys():
+ df_cmd = 'exec {} -- df --output=ipcent,pcent /elasticsearch/persistent'.format(pod_name)
+ disk_output = self._exec_oc(df_cmd, [], task_vars)
+ lines = disk_output.splitlines()
+ # expecting one header looking like 'IUse% Use%' and one body line
+ body_re = r'\s*(\d+)%?\s+(\d+)%?\s*$'
+ if len(lines) != 2 or len(lines[0].split()) != 2 or not re.match(body_re, lines[1]):
+ error_msgs.append(
+ 'Could not retrieve storage usage from logging ES pod "{pod}".\n'
+ 'Response to `df` command was:\n{output}'.format(pod=pod_name, output=disk_output)
+ )
+ continue
+ inode_pct, disk_pct = re.match(body_re, lines[1]).groups()
+
+ inode_pct_thresh = get_var(task_vars, 'openshift_check_efk_es_inode_pct', default='90')
+ if int(inode_pct) >= int(inode_pct_thresh):
+ error_msgs.append(
+ 'Inode percent usage on the storage volume for logging ES pod "{pod}"\n'
+ ' is {pct}, greater than threshold {limit}.\n'
+ ' Note: threshold can be specified in inventory with {param}'.format(
+ pod=pod_name,
+ pct=str(inode_pct),
+ limit=str(inode_pct_thresh),
+ param='openshift_check_efk_es_inode_pct',
+ ))
+ disk_pct_thresh = get_var(task_vars, 'openshift_check_efk_es_storage_pct', default='80')
+ if int(disk_pct) >= int(disk_pct_thresh):
+ error_msgs.append(
+ 'Disk percent usage on the storage volume for logging ES pod "{pod}"\n'
+ ' is {pct}, greater than threshold {limit}.\n'
+ ' Note: threshold can be specified in inventory with {param}'.format(
+ pod=pod_name,
+ pct=str(disk_pct),
+ limit=str(disk_pct_thresh),
+ param='openshift_check_efk_es_storage_pct',
+ ))
+
+ return error_msgs
+
+ def _exec_oc(self, cmd_str, extra_args, task_vars):
+ return super(Elasticsearch, self).exec_oc(
+ self.execute_module,
+ self.logging_namespace,
+ cmd_str,
+ extra_args,
+ task_vars,
+ )
diff --git a/roles/openshift_health_checker/openshift_checks/logging/fluentd.py b/roles/openshift_health_checker/openshift_checks/logging/fluentd.py
new file mode 100644
index 000000000..627567293
--- /dev/null
+++ b/roles/openshift_health_checker/openshift_checks/logging/fluentd.py
@@ -0,0 +1,170 @@
+"""
+Module for performing checks on an Fluentd logging deployment
+"""
+
+import json
+
+from openshift_checks import get_var
+from openshift_checks.logging.logging import LoggingCheck
+
+
+class Fluentd(LoggingCheck):
+ """Module that checks an integrated logging Fluentd deployment"""
+ name = "fluentd"
+ tags = ["health", "logging"]
+
+ logging_namespace = None
+
+ def run(self, tmp, task_vars):
+ """Check various things and gather errors. Returns: result as hash"""
+
+ self.logging_namespace = get_var(task_vars, "openshift_logging_namespace", default="logging")
+ fluentd_pods, error = super(Fluentd, self).get_pods_for_component(
+ self.execute_module,
+ self.logging_namespace,
+ "fluentd",
+ task_vars,
+ )
+ if error:
+ return {"failed": True, "changed": False, "msg": error}
+ check_error = self.check_fluentd(fluentd_pods, task_vars)
+
+ if check_error:
+ msg = ("The following Fluentd deployment issue was found:"
+ "\n-------\n"
+ "{}".format(check_error))
+ return {"failed": True, "changed": False, "msg": msg}
+
+ # TODO(lmeyer): run it all again for the ops cluster
+ return {"failed": False, "changed": False, "msg": 'No problems found with Fluentd deployment.'}
+
+ @staticmethod
+ def _filter_fluentd_labeled_nodes(nodes_by_name, node_selector):
+ """Filter to all nodes with fluentd label. Returns dict(name: node), error string"""
+ label, value = node_selector.split('=', 1)
+ fluentd_nodes = {
+ name: node for name, node in nodes_by_name.items()
+ if node['metadata']['labels'].get(label) == value
+ }
+ if not fluentd_nodes:
+ return None, (
+ 'There are no nodes with the fluentd label {label}.\n'
+ 'This means no logs will be aggregated from the nodes.'
+ ).format(label=node_selector)
+ return fluentd_nodes, None
+
+ @staticmethod
+ def _check_node_labeling(nodes_by_name, fluentd_nodes, node_selector, task_vars):
+ """Note if nodes are not labeled as expected. Returns: error string"""
+ intended_nodes = get_var(task_vars, 'openshift_logging_fluentd_hosts', default=['--all'])
+ if not intended_nodes or '--all' in intended_nodes:
+ intended_nodes = nodes_by_name.keys()
+ nodes_missing_labels = set(intended_nodes) - set(fluentd_nodes.keys())
+ if nodes_missing_labels:
+ return (
+ 'The following nodes are supposed to be labeled with {label} but are not:\n'
+ ' {nodes}\n'
+ 'Fluentd will not aggregate logs from these nodes.'
+ ).format(label=node_selector, nodes=', '.join(nodes_missing_labels))
+ return None
+
+ @staticmethod
+ def _check_nodes_have_fluentd(pods, fluentd_nodes):
+ """Make sure fluentd is on all the labeled nodes. Returns: error string"""
+ unmatched_nodes = fluentd_nodes.copy()
+ node_names_by_label = {
+ node['metadata']['labels']['kubernetes.io/hostname']: name
+ for name, node in fluentd_nodes.items()
+ }
+ node_names_by_internal_ip = {
+ address['address']: name
+ for name, node in fluentd_nodes.items()
+ for address in node['status']['addresses']
+ if address['type'] == "InternalIP"
+ }
+ for pod in pods:
+ for name in [
+ pod['spec']['nodeName'],
+ node_names_by_internal_ip.get(pod['spec']['nodeName']),
+ node_names_by_label.get(pod.get('spec', {}).get('host')),
+ ]:
+ unmatched_nodes.pop(name, None)
+ if unmatched_nodes:
+ return (
+ 'The following nodes are supposed to have a Fluentd pod but do not:\n'
+ '{nodes}'
+ 'These nodes will not have their logs aggregated.'
+ ).format(nodes=''.join(
+ " {}\n".format(name)
+ for name in unmatched_nodes.keys()
+ ))
+ return None
+
+ def _check_fluentd_pods_running(self, pods):
+ """Make sure all fluentd pods are running. Returns: error string"""
+ not_running = super(Fluentd, self).not_running_pods(pods)
+ if not_running:
+ return (
+ 'The following Fluentd pods are supposed to be running but are not:\n'
+ '{pods}'
+ 'These pods will not aggregate logs from their nodes.'
+ ).format(pods=''.join(
+ " {} ({})\n".format(pod['metadata']['name'], pod['spec'].get('host', 'None'))
+ for pod in not_running
+ ))
+ return None
+
+ def check_fluentd(self, pods, task_vars):
+ """Verify fluentd is running everywhere. Returns: error string"""
+
+ node_selector = get_var(task_vars, 'openshift_logging_fluentd_nodeselector',
+ default='logging-infra-fluentd=true')
+
+ nodes_by_name, error = self.get_nodes_by_name(task_vars)
+
+ if error:
+ return error
+ fluentd_nodes, error = self._filter_fluentd_labeled_nodes(nodes_by_name, node_selector)
+ if error:
+ return error
+
+ error_msgs = []
+ error = self._check_node_labeling(nodes_by_name, fluentd_nodes, node_selector, task_vars)
+ if error:
+ error_msgs.append(error)
+ error = self._check_nodes_have_fluentd(pods, fluentd_nodes)
+ if error:
+ error_msgs.append(error)
+ error = self._check_fluentd_pods_running(pods)
+ if error:
+ error_msgs.append(error)
+
+ # Make sure there are no extra fluentd pods
+ if len(pods) > len(fluentd_nodes):
+ error_msgs.append(
+ 'There are more Fluentd pods running than nodes labeled.\n'
+ 'This may not cause problems with logging but it likely indicates something wrong.'
+ )
+
+ return '\n'.join(error_msgs)
+
+ def get_nodes_by_name(self, task_vars):
+ """Retrieve all the node definitions. Returns: dict(name: node), error string"""
+ nodes_json = self._exec_oc("get nodes -o json", [], task_vars)
+ try:
+ nodes = json.loads(nodes_json)
+ except ValueError: # no valid json - should not happen
+ return None, "Could not obtain a list of nodes to validate fluentd. Output from oc get:\n" + nodes_json
+ if not nodes or not nodes.get('items'): # also should not happen
+ return None, "No nodes appear to be defined according to the API."
+ return {
+ node['metadata']['name']: node
+ for node in nodes['items']
+ }, None
+
+ def _exec_oc(self, cmd_str, extra_args, task_vars):
+ return super(Fluentd, self).exec_oc(self.execute_module,
+ self.logging_namespace,
+ cmd_str,
+ extra_args,
+ task_vars)
diff --git a/roles/openshift_health_checker/openshift_checks/logging/kibana.py b/roles/openshift_health_checker/openshift_checks/logging/kibana.py
new file mode 100644
index 000000000..442f407b1
--- /dev/null
+++ b/roles/openshift_health_checker/openshift_checks/logging/kibana.py
@@ -0,0 +1,229 @@
+"""
+Module for performing checks on a Kibana logging deployment
+"""
+
+import json
+import ssl
+
+try:
+ from urllib2 import HTTPError, URLError
+ import urllib2
+except ImportError:
+ from urllib.error import HTTPError, URLError
+ import urllib.request as urllib2
+
+from openshift_checks import get_var
+from openshift_checks.logging.logging import LoggingCheck
+
+
+class Kibana(LoggingCheck):
+ """Module that checks an integrated logging Kibana deployment"""
+
+ name = "kibana"
+ tags = ["health", "logging"]
+
+ logging_namespace = None
+
+ def run(self, tmp, task_vars):
+ """Check various things and gather errors. Returns: result as hash"""
+
+ self.logging_namespace = get_var(task_vars, "openshift_logging_namespace", default="logging")
+ kibana_pods, error = super(Kibana, self).get_pods_for_component(
+ self.execute_module,
+ self.logging_namespace,
+ "kibana",
+ task_vars,
+ )
+ if error:
+ return {"failed": True, "changed": False, "msg": error}
+ check_error = self.check_kibana(kibana_pods)
+
+ if not check_error:
+ check_error = self._check_kibana_route(task_vars)
+
+ if check_error:
+ msg = ("The following Kibana deployment issue was found:"
+ "\n-------\n"
+ "{}".format(check_error))
+ return {"failed": True, "changed": False, "msg": msg}
+
+ # TODO(lmeyer): run it all again for the ops cluster
+ return {"failed": False, "changed": False, "msg": 'No problems found with Kibana deployment.'}
+
+ def _verify_url_internal(self, url, task_vars):
+ """
+ Try to reach a URL from the host.
+ Returns: success (bool), reason (for failure)
+ """
+ args = dict(
+ url=url,
+ follow_redirects='none',
+ validate_certs='no', # likely to be signed with internal CA
+ # TODO(lmeyer): give users option to validate certs
+ status_code=302,
+ )
+ result = self.execute_module('uri', args, task_vars)
+ if result.get('failed'):
+ return result['msg']
+ return None
+
+ @staticmethod
+ def _verify_url_external(url):
+ """
+ Try to reach a URL from ansible control host.
+ Returns: success (bool), reason (for failure)
+ """
+ # This actually checks from the ansible control host, which may or may not
+ # really be "external" to the cluster.
+
+ # Disable SSL cert validation to work around internally signed certs
+ ctx = ssl.create_default_context()
+ ctx.check_hostname = False # or setting CERT_NONE is refused
+ ctx.verify_mode = ssl.CERT_NONE
+
+ # Verify that the url is returning a valid response
+ try:
+ # We only care if the url connects and responds
+ return_code = urllib2.urlopen(url, context=ctx).getcode()
+ except HTTPError as httperr:
+ return httperr.reason
+ except URLError as urlerr:
+ return str(urlerr)
+
+ # there appears to be no way to prevent urlopen from following redirects
+ if return_code != 200:
+ return 'Expected success (200) but got return code {}'.format(int(return_code))
+
+ return None
+
+ def check_kibana(self, pods):
+ """Check to see if Kibana is up and working. Returns: error string."""
+
+ if not pods:
+ return "There are no Kibana pods deployed, so no access to the logging UI."
+
+ not_running = self.not_running_pods(pods)
+ if len(not_running) == len(pods):
+ return "No Kibana pod is in a running state, so there is no access to the logging UI."
+ elif not_running:
+ return (
+ "The following Kibana pods are not currently in a running state:\n"
+ "{pods}"
+ "However at least one is, so service may not be impacted."
+ ).format(pods="".join(" " + pod['metadata']['name'] + "\n" for pod in not_running))
+
+ return None
+
+ def _get_kibana_url(self, task_vars):
+ """
+ Get kibana route or report error.
+ Returns: url (or empty), reason for failure
+ """
+
+ # Get logging url
+ get_route = self._exec_oc("get route logging-kibana -o json", [], task_vars)
+ if not get_route:
+ return None, 'no_route_exists'
+
+ route = json.loads(get_route)
+
+ # check that the route has been accepted by a router
+ ingress = route["status"]["ingress"]
+ # ingress can be null if there is no router, or empty if not routed
+ if not ingress or not ingress[0]:
+ return None, 'route_not_accepted'
+
+ host = route.get("spec", {}).get("host")
+ if not host:
+ return None, 'route_missing_host'
+
+ return 'https://{}/'.format(host), None
+
+ def _check_kibana_route(self, task_vars):
+ """
+ Check to see if kibana route is up and working.
+ Returns: error string
+ """
+ known_errors = dict(
+ no_route_exists=(
+ 'No route is defined for Kibana in the logging namespace,\n'
+ 'so the logging stack is not accessible. Is logging deployed?\n'
+ 'Did something remove the logging-kibana route?'
+ ),
+ route_not_accepted=(
+ 'The logging-kibana route is not being routed by any router.\n'
+ 'Is the router deployed and working?'
+ ),
+ route_missing_host=(
+ 'The logging-kibana route has no hostname defined,\n'
+ 'which should never happen. Did something alter its definition?'
+ ),
+ )
+
+ kibana_url, error = self._get_kibana_url(task_vars)
+ if not kibana_url:
+ return known_errors.get(error, error)
+
+ # first, check that kibana is reachable from the master.
+ error = self._verify_url_internal(kibana_url, task_vars)
+ if error:
+ if 'urlopen error [Errno 111] Connection refused' in error:
+ error = (
+ 'Failed to connect from this master to Kibana URL {url}\n'
+ 'Is kibana running, and is at least one router routing to it?'
+ ).format(url=kibana_url)
+ elif 'urlopen error [Errno -2] Name or service not known' in error:
+ error = (
+ 'Failed to connect from this master to Kibana URL {url}\n'
+ 'because the hostname does not resolve.\n'
+ 'Is DNS configured for the Kibana hostname?'
+ ).format(url=kibana_url)
+ elif 'Status code was not' in error:
+ error = (
+ 'A request from this master to the Kibana URL {url}\n'
+ 'did not return the correct status code (302).\n'
+ 'This could mean that Kibana is malfunctioning, the hostname is\n'
+ 'resolving incorrectly, or other network issues. The output was:\n'
+ ' {error}'
+ ).format(url=kibana_url, error=error)
+ return 'Error validating the logging Kibana route:\n' + error
+
+ # in production we would like the kibana route to work from outside the
+ # cluster too; but that may not be the case, so allow disabling just this part.
+ if not get_var(task_vars, "openshift_check_efk_kibana_external", default=True):
+ return None
+ error = self._verify_url_external(kibana_url)
+ if error:
+ if 'urlopen error [Errno 111] Connection refused' in error:
+ error = (
+ 'Failed to connect from the Ansible control host to Kibana URL {url}\n'
+ 'Is the router for the Kibana hostname exposed externally?'
+ ).format(url=kibana_url)
+ elif 'urlopen error [Errno -2] Name or service not known' in error:
+ error = (
+ 'Failed to resolve the Kibana hostname in {url}\n'
+ 'from the Ansible control host.\n'
+ 'Is DNS configured to resolve this Kibana hostname externally?'
+ ).format(url=kibana_url)
+ elif 'Expected success (200)' in error:
+ error = (
+ 'A request to Kibana at {url}\n'
+ 'returned the wrong error code:\n'
+ ' {error}\n'
+ 'This could mean that Kibana is malfunctioning, the hostname is\n'
+ 'resolving incorrectly, or other network issues.'
+ ).format(url=kibana_url, error=error)
+ error = (
+ 'Error validating the logging Kibana route:\n{error}\n'
+ 'To disable external Kibana route validation, set in your inventory:\n'
+ ' openshift_check_efk_kibana_external=False'
+ ).format(error=error)
+ return error
+ return None
+
+ def _exec_oc(self, cmd_str, extra_args, task_vars):
+ return super(Kibana, self).exec_oc(self.execute_module,
+ self.logging_namespace,
+ cmd_str,
+ extra_args,
+ task_vars)
diff --git a/roles/openshift_health_checker/openshift_checks/logging/logging.py b/roles/openshift_health_checker/openshift_checks/logging/logging.py
new file mode 100644
index 000000000..05b4d300c
--- /dev/null
+++ b/roles/openshift_health_checker/openshift_checks/logging/logging.py
@@ -0,0 +1,96 @@
+"""
+Util functions for performing checks on an Elasticsearch, Fluentd, and Kibana stack
+"""
+
+import json
+import os
+
+from openshift_checks import OpenShiftCheck, OpenShiftCheckException, get_var
+
+
+class LoggingCheck(OpenShiftCheck):
+ """Base class for logging component checks"""
+
+ name = "logging"
+
+ @classmethod
+ def is_active(cls, task_vars):
+ return super(LoggingCheck, cls).is_active(task_vars) and cls.is_first_master(task_vars)
+
+ @staticmethod
+ def is_first_master(task_vars):
+ """Run only on first master and only when logging is configured. Returns: bool"""
+ logging_deployed = get_var(task_vars, "openshift_hosted_logging_deploy", default=True)
+ # Note: It would be nice to use membership in oo_first_master group, however for now it
+ # seems best to avoid requiring that setup and just check this is the first master.
+ hostname = get_var(task_vars, "ansible_ssh_host") or [None]
+ masters = get_var(task_vars, "groups", "masters", default=None) or [None]
+ return logging_deployed and masters[0] == hostname
+
+ def run(self, tmp, task_vars):
+ pass
+
+ def get_pods_for_component(self, execute_module, namespace, logging_component, task_vars):
+ """Get all pods for a given component. Returns: list of pods for component, error string"""
+ pod_output = self.exec_oc(
+ execute_module,
+ namespace,
+ "get pods -l component={} -o json".format(logging_component),
+ [],
+ task_vars
+ )
+ try:
+ pods = json.loads(pod_output)
+ if not pods or not pods.get('items'):
+ raise ValueError()
+ except ValueError:
+ # successful run but non-parsing data generally means there were no pods in the namespace
+ return None, 'There are no pods in the {} namespace. Is logging deployed?'.format(namespace)
+
+ return pods['items'], None
+
+ @staticmethod
+ def not_running_pods(pods):
+ """Returns: list of pods not in a ready and running state"""
+ return [
+ pod for pod in pods
+ if any(
+ container['ready'] is False
+ for container in pod['status']['containerStatuses']
+ ) or not any(
+ condition['type'] == 'Ready' and condition['status'] == 'True'
+ for condition in pod['status']['conditions']
+ )
+ ]
+
+ @staticmethod
+ def exec_oc(execute_module=None, namespace="logging", cmd_str="", extra_args=None, task_vars=None):
+ """
+ Execute an 'oc' command in the remote host.
+ Returns: output of command and namespace,
+ or raises OpenShiftCheckException on error
+ """
+ config_base = get_var(task_vars, "openshift", "common", "config_base")
+ args = {
+ "namespace": namespace,
+ "config_file": os.path.join(config_base, "master", "admin.kubeconfig"),
+ "cmd": cmd_str,
+ "extra_args": list(extra_args) if extra_args else [],
+ }
+
+ result = execute_module("ocutil", args, task_vars)
+ if result.get("failed"):
+ msg = (
+ 'Unexpected error using `oc` to validate the logging stack components.\n'
+ 'Error executing `oc {cmd}`:\n'
+ '{error}'
+ ).format(cmd=args['cmd'], error=result['result'])
+
+ if result['result'] == '[Errno 2] No such file or directory':
+ msg = (
+ "This host is supposed to be a master but does not have the `oc` command where expected.\n"
+ "Has an installation been run on this host yet?"
+ )
+ raise OpenShiftCheckException(msg)
+
+ return result.get("result", "")
diff --git a/roles/openshift_health_checker/openshift_checks/mixins.py b/roles/openshift_health_checker/openshift_checks/mixins.py
index 20d160eaf..2cb2e21aa 100644
--- a/roles/openshift_health_checker/openshift_checks/mixins.py
+++ b/roles/openshift_health_checker/openshift_checks/mixins.py
@@ -1,4 +1,3 @@
-# pylint: disable=missing-docstring,too-few-public-methods
"""
Mixin classes meant to be used with subclasses of OpenShiftCheck.
"""
@@ -8,8 +7,52 @@ from openshift_checks import get_var
class NotContainerizedMixin(object):
"""Mixin for checks that are only active when not in containerized mode."""
+ # permanent # pylint: disable=too-few-public-methods
+ # Reason: The mixin is not intended to stand on its own as a class.
@classmethod
def is_active(cls, task_vars):
+ """Only run on non-containerized hosts."""
is_containerized = get_var(task_vars, "openshift", "common", "is_containerized")
return super(NotContainerizedMixin, cls).is_active(task_vars) and not is_containerized
+
+
+class DockerHostMixin(object):
+ """Mixin for checks that are only active on hosts that require Docker."""
+
+ dependencies = []
+
+ @classmethod
+ def is_active(cls, task_vars):
+ """Only run on hosts that depend on Docker."""
+ is_containerized = get_var(task_vars, "openshift", "common", "is_containerized")
+ is_node = "nodes" in get_var(task_vars, "group_names", default=[])
+ return super(DockerHostMixin, cls).is_active(task_vars) and (is_containerized or is_node)
+
+ def ensure_dependencies(self, task_vars):
+ """
+ Ensure that docker-related packages exist, but not on atomic hosts
+ (which would not be able to install but should already have them).
+ Returns: msg, failed, changed
+ """
+ if get_var(task_vars, "openshift", "common", "is_atomic"):
+ return "", False, False
+
+ # NOTE: we would use the "package" module but it's actually an action plugin
+ # and it's not clear how to invoke one of those. This is about the same anyway:
+ result = self.execute_module(
+ get_var(task_vars, "ansible_pkg_mgr", default="yum"),
+ {"name": self.dependencies, "state": "present"},
+ task_vars=task_vars,
+ )
+ msg = result.get("msg", "")
+ if result.get("failed"):
+ if "No package matching" in msg:
+ msg = "Ensure that all required dependencies can be installed via `yum`.\n"
+ msg = (
+ "Unable to install required packages on this host:\n"
+ " {deps}\n{msg}"
+ ).format(deps=',\n '.join(self.dependencies), msg=msg)
+ failed = result.get("failed", False) or result.get("rc", 0) != 0
+ changed = result.get("changed", False)
+ return msg, failed, changed
diff --git a/roles/openshift_health_checker/openshift_checks/ovs_version.py b/roles/openshift_health_checker/openshift_checks/ovs_version.py
new file mode 100644
index 000000000..2dd045f1f
--- /dev/null
+++ b/roles/openshift_health_checker/openshift_checks/ovs_version.py
@@ -0,0 +1,78 @@
+"""
+Ansible module for determining if an installed version of Open vSwitch is incompatible with the
+currently installed version of OpenShift.
+"""
+
+from openshift_checks import OpenShiftCheck, OpenShiftCheckException, get_var
+from openshift_checks.mixins import NotContainerizedMixin
+
+
+class OvsVersion(NotContainerizedMixin, OpenShiftCheck):
+ """Check that packages in a package_list are installed on the host
+ and are the correct version as determined by an OpenShift installation.
+ """
+
+ name = "ovs_version"
+ tags = ["health"]
+
+ openshift_to_ovs_version = {
+ "3.6": "2.6",
+ "3.5": "2.6",
+ "3.4": "2.4",
+ }
+
+ # map major release versions across releases
+ # to a common major version
+ openshift_major_release_version = {
+ "1": "3",
+ }
+
+ @classmethod
+ def is_active(cls, task_vars):
+ """Skip hosts that do not have package requirements."""
+ group_names = get_var(task_vars, "group_names", default=[])
+ master_or_node = 'masters' in group_names or 'nodes' in group_names
+ return super(OvsVersion, cls).is_active(task_vars) and master_or_node
+
+ def run(self, tmp, task_vars):
+ args = {
+ "package_list": [
+ {
+ "name": "openvswitch",
+ "version": self.get_required_ovs_version(task_vars),
+ },
+ ],
+ }
+ return self.execute_module("rpm_version", args, task_vars=task_vars)
+
+ def get_required_ovs_version(self, task_vars):
+ """Return the correct Open vSwitch version for the current OpenShift version"""
+ openshift_version = self._get_openshift_version(task_vars)
+
+ if float(openshift_version) < 3.5:
+ return self.openshift_to_ovs_version["3.4"]
+
+ ovs_version = self.openshift_to_ovs_version.get(str(openshift_version))
+ if ovs_version:
+ return self.openshift_to_ovs_version[str(openshift_version)]
+
+ msg = "There is no recommended version of Open vSwitch for the current version of OpenShift: {}"
+ raise OpenShiftCheckException(msg.format(openshift_version))
+
+ def _get_openshift_version(self, task_vars):
+ openshift_version = get_var(task_vars, "openshift_image_tag")
+ if openshift_version and openshift_version[0] == 'v':
+ openshift_version = openshift_version[1:]
+
+ return self._parse_version(openshift_version)
+
+ def _parse_version(self, version):
+ components = version.split(".")
+ if not components or len(components) < 2:
+ msg = "An invalid version of OpenShift was found for this host: {}"
+ raise OpenShiftCheckException(msg.format(version))
+
+ if components[0] in self.openshift_major_release_version:
+ components[0] = self.openshift_major_release_version[components[0]]
+
+ return '.'.join(components[:2])
diff --git a/roles/openshift_health_checker/openshift_checks/package_availability.py b/roles/openshift_health_checker/openshift_checks/package_availability.py
index a7eb720fd..e87567fe6 100644
--- a/roles/openshift_health_checker/openshift_checks/package_availability.py
+++ b/roles/openshift_health_checker/openshift_checks/package_availability.py
@@ -25,7 +25,7 @@ class PackageAvailability(NotContainerizedMixin, OpenShiftCheck):
packages.update(self.node_packages(rpm_prefix))
args = {"packages": sorted(set(packages))}
- return self.execute_module("check_yum_update", args, tmp, task_vars)
+ return self.execute_module("check_yum_update", args, tmp=tmp, task_vars=task_vars)
@staticmethod
def master_packages(rpm_prefix):
@@ -36,7 +36,6 @@ class PackageAvailability(NotContainerizedMixin, OpenShiftCheck):
"bash-completion",
"cockpit-bridge",
"cockpit-docker",
- "cockpit-kubernetes",
"cockpit-shell",
"cockpit-ws",
"etcd",
diff --git a/roles/openshift_health_checker/openshift_checks/package_update.py b/roles/openshift_health_checker/openshift_checks/package_update.py
index fd0c0a755..f432380c6 100644
--- a/roles/openshift_health_checker/openshift_checks/package_update.py
+++ b/roles/openshift_health_checker/openshift_checks/package_update.py
@@ -11,4 +11,4 @@ class PackageUpdate(NotContainerizedMixin, OpenShiftCheck):
def run(self, tmp, task_vars):
args = {"packages": []}
- return self.execute_module("check_yum_update", args, tmp, task_vars)
+ return self.execute_module("check_yum_update", args, tmp=tmp, task_vars=task_vars)
diff --git a/roles/openshift_health_checker/openshift_checks/package_version.py b/roles/openshift_health_checker/openshift_checks/package_version.py
index 682f6bd40..6a76bb93d 100644
--- a/roles/openshift_health_checker/openshift_checks/package_version.py
+++ b/roles/openshift_health_checker/openshift_checks/package_version.py
@@ -1,5 +1,5 @@
# pylint: disable=missing-docstring
-from openshift_checks import OpenShiftCheck, get_var
+from openshift_checks import OpenShiftCheck, OpenShiftCheckException, get_var
from openshift_checks.mixins import NotContainerizedMixin
@@ -9,6 +9,25 @@ class PackageVersion(NotContainerizedMixin, OpenShiftCheck):
name = "package_version"
tags = ["preflight"]
+ openshift_to_ovs_version = {
+ "3.6": "2.6",
+ "3.5": "2.6",
+ "3.4": "2.4",
+ }
+
+ openshift_to_docker_version = {
+ "3.1": "1.8",
+ "3.2": "1.10",
+ "3.3": "1.10",
+ "3.4": "1.12",
+ }
+
+ # map major release versions across releases
+ # to a common major version
+ openshift_major_release_version = {
+ "1": "3",
+ }
+
@classmethod
def is_active(cls, task_vars):
"""Skip hosts that do not have package requirements."""
@@ -17,9 +36,90 @@ class PackageVersion(NotContainerizedMixin, OpenShiftCheck):
return super(PackageVersion, cls).is_active(task_vars) and master_or_node
def run(self, tmp, task_vars):
+ rpm_prefix = get_var(task_vars, "openshift", "common", "service_type")
+ openshift_release = get_var(task_vars, "openshift_release", default='')
+ deployment_type = get_var(task_vars, "openshift_deployment_type")
+ check_multi_minor_release = deployment_type in ['openshift-enterprise']
+
args = {
- "requested_openshift_release": get_var(task_vars, "openshift_release", default=''),
- "openshift_deployment_type": get_var(task_vars, "openshift_deployment_type"),
- "rpm_prefix": get_var(task_vars, "openshift", "common", "service_type"),
+ "package_list": [
+ {
+ "name": "openvswitch",
+ "version": self.get_required_ovs_version(task_vars),
+ "check_multi": False,
+ },
+ {
+ "name": "docker",
+ "version": self.get_required_docker_version(task_vars),
+ "check_multi": False,
+ },
+ {
+ "name": "{}".format(rpm_prefix),
+ "version": openshift_release,
+ "check_multi": check_multi_minor_release,
+ },
+ {
+ "name": "{}-master".format(rpm_prefix),
+ "version": openshift_release,
+ "check_multi": check_multi_minor_release,
+ },
+ {
+ "name": "{}-node".format(rpm_prefix),
+ "version": openshift_release,
+ "check_multi": check_multi_minor_release,
+ },
+ ],
}
- return self.execute_module("aos_version", args, tmp, task_vars)
+
+ return self.execute_module("aos_version", args, tmp=tmp, task_vars=task_vars)
+
+ def get_required_ovs_version(self, task_vars):
+ """Return the correct Open vSwitch version for the current OpenShift version.
+ If the current OpenShift version is >= 3.5, ensure Open vSwitch version 2.6,
+ Else ensure Open vSwitch version 2.4"""
+ openshift_version = self.get_openshift_version(task_vars)
+
+ if float(openshift_version) < 3.5:
+ return self.openshift_to_ovs_version["3.4"]
+
+ ovs_version = self.openshift_to_ovs_version.get(str(openshift_version))
+ if ovs_version:
+ return ovs_version
+
+ msg = "There is no recommended version of Open vSwitch for the current version of OpenShift: {}"
+ raise OpenShiftCheckException(msg.format(openshift_version))
+
+ def get_required_docker_version(self, task_vars):
+ """Return the correct Docker version for the current OpenShift version.
+ If the OpenShift version is 3.1, ensure Docker version 1.8.
+ If the OpenShift version is 3.2 or 3.3, ensure Docker version 1.10.
+ If the current OpenShift version is >= 3.4, ensure Docker version 1.12."""
+ openshift_version = self.get_openshift_version(task_vars)
+
+ if float(openshift_version) >= 3.4:
+ return self.openshift_to_docker_version["3.4"]
+
+ docker_version = self.openshift_to_docker_version.get(str(openshift_version))
+ if docker_version:
+ return docker_version
+
+ msg = "There is no recommended version of Docker for the current version of OpenShift: {}"
+ raise OpenShiftCheckException(msg.format(openshift_version))
+
+ def get_openshift_version(self, task_vars):
+ openshift_version = get_var(task_vars, "openshift_image_tag")
+ if openshift_version and openshift_version[0] == 'v':
+ openshift_version = openshift_version[1:]
+
+ return self.parse_version(openshift_version)
+
+ def parse_version(self, version):
+ components = version.split(".")
+ if not components or len(components) < 2:
+ msg = "An invalid version of OpenShift was found for this host: {}"
+ raise OpenShiftCheckException(msg.format(version))
+
+ if components[0] in self.openshift_major_release_version:
+ components[0] = self.openshift_major_release_version[components[0]]
+
+ return '.'.join(components[:2])
diff --git a/roles/openshift_health_checker/test/aos_version_test.py b/roles/openshift_health_checker/test/aos_version_test.py
index 39c86067a..697805dd2 100644
--- a/roles/openshift_health_checker/test/aos_version_test.py
+++ b/roles/openshift_health_checker/test/aos_version_test.py
@@ -4,89 +4,118 @@ import aos_version
from collections import namedtuple
Package = namedtuple('Package', ['name', 'version'])
-expected_pkgs = set(['spam', 'eggs'])
+expected_pkgs = {
+ "spam": {
+ "name": "spam",
+ "version": "3.2.1",
+ "check_multi": False,
+ },
+ "eggs": {
+ "name": "eggs",
+ "version": "3.2.1",
+ "check_multi": False,
+ },
+}
-@pytest.mark.parametrize('pkgs, requested_release, expect_not_found', [
+@pytest.mark.parametrize('pkgs, expect_not_found', [
(
[],
- '3.2.1',
- expected_pkgs, # none found
+ {
+ "spam": {
+ "name": "spam",
+ "version": "3.2.1",
+ "check_multi": False,
+ },
+ "eggs": {
+ "name": "eggs",
+ "version": "3.2.1",
+ "check_multi": False,
+ }
+ }, # none found
),
(
[Package('spam', '3.2.1')],
- '3.2',
- ['eggs'], # completely missing
+ {
+ "eggs": {
+ "name": "eggs",
+ "version": "3.2.1",
+ "check_multi": False,
+ }
+ }, # completely missing
),
(
[Package('spam', '3.2.1'), Package('eggs', '3.3.2')],
- '3.2',
- ['eggs'], # not the right version
+ {
+ "eggs": {
+ "name": "eggs",
+ "version": "3.2.1",
+ "check_multi": False,
+ }
+ }, # not the right version
),
(
[Package('spam', '3.2.1'), Package('eggs', '3.2.1')],
- '3.2',
- [], # all found
+ {}, # all found
),
(
[Package('spam', '3.2.1'), Package('eggs', '3.2.1.5')],
- '3.2.1',
- [], # found with more specific version
+ {}, # found with more specific version
),
(
[Package('eggs', '1.2.3'), Package('eggs', '3.2.1.5')],
- '3.2.1',
- ['spam'], # eggs found with multiple versions
+ {
+ "spam": {
+ "name": "spam",
+ "version": "3.2.1",
+ "check_multi": False,
+ }
+ }, # eggs found with multiple versions
),
])
-def test_check_pkgs_for_precise_version(pkgs, requested_release, expect_not_found):
+def test_check_pkgs_for_precise_version(pkgs, expect_not_found):
if expect_not_found:
with pytest.raises(aos_version.PreciseVersionNotFound) as e:
- aos_version._check_precise_version_found(pkgs, expected_pkgs, requested_release)
- assert set(expect_not_found) == set(e.value.problem_pkgs)
+ aos_version._check_precise_version_found(pkgs, expected_pkgs)
+
+ assert list(expect_not_found.values()) == e.value.problem_pkgs
else:
- aos_version._check_precise_version_found(pkgs, expected_pkgs, requested_release)
+ aos_version._check_precise_version_found(pkgs, expected_pkgs)
-@pytest.mark.parametrize('pkgs, requested_release, expect_higher', [
+@pytest.mark.parametrize('pkgs, expect_higher', [
(
[],
- '3.2.1',
[],
),
(
- [Package('spam', '3.2.1')],
- '3.2',
+ [Package('spam', '3.2.1.9')],
[], # more precise but not strictly higher
),
(
[Package('spam', '3.3')],
- '3.2.1',
['spam-3.3'], # lower precision, but higher
),
(
[Package('spam', '3.2.1'), Package('eggs', '3.3.2')],
- '3.2',
['eggs-3.3.2'], # one too high
),
(
[Package('eggs', '1.2.3'), Package('eggs', '3.2.1.5'), Package('eggs', '3.4')],
- '3.2.1',
['eggs-3.4'], # multiple versions, one is higher
),
(
[Package('eggs', '3.2.1'), Package('eggs', '3.4'), Package('eggs', '3.3')],
- '3.2.1',
['eggs-3.4'], # multiple versions, two are higher
),
])
-def test_check_pkgs_for_greater_version(pkgs, requested_release, expect_higher):
+def test_check_pkgs_for_greater_version(pkgs, expect_higher):
if expect_higher:
with pytest.raises(aos_version.FoundHigherVersion) as e:
- aos_version._check_higher_version_found(pkgs, expected_pkgs, requested_release)
+ aos_version._check_higher_version_found(pkgs, expected_pkgs)
assert set(expect_higher) == set(e.value.problem_pkgs)
else:
- aos_version._check_higher_version_found(pkgs, expected_pkgs, requested_release)
+ aos_version._check_higher_version_found(pkgs, expected_pkgs)
@pytest.mark.parametrize('pkgs, expect_to_flag_pkgs', [
diff --git a/roles/openshift_health_checker/test/curator_test.py b/roles/openshift_health_checker/test/curator_test.py
new file mode 100644
index 000000000..ae108c96e
--- /dev/null
+++ b/roles/openshift_health_checker/test/curator_test.py
@@ -0,0 +1,68 @@
+import pytest
+
+from openshift_checks.logging.curator import Curator
+
+
+def canned_curator(exec_oc=None):
+ """Create a Curator check object with canned exec_oc method"""
+ check = Curator("dummy") # fails if a module is actually invoked
+ if exec_oc:
+ check._exec_oc = exec_oc
+ return check
+
+
+def assert_error(error, expect_error):
+ if expect_error:
+ assert error
+ assert expect_error in error
+ else:
+ assert not error
+
+
+plain_curator_pod = {
+ "metadata": {
+ "labels": {"component": "curator", "deploymentconfig": "logging-curator"},
+ "name": "logging-curator-1",
+ },
+ "status": {
+ "containerStatuses": [{"ready": True}],
+ "conditions": [{"status": "True", "type": "Ready"}],
+ "podIP": "10.10.10.10",
+ }
+}
+
+not_running_curator_pod = {
+ "metadata": {
+ "labels": {"component": "curator", "deploymentconfig": "logging-curator"},
+ "name": "logging-curator-2",
+ },
+ "status": {
+ "containerStatuses": [{"ready": False}],
+ "conditions": [{"status": "False", "type": "Ready"}],
+ "podIP": "10.10.10.10",
+ }
+}
+
+
+@pytest.mark.parametrize('pods, expect_error', [
+ (
+ [],
+ "no Curator pods",
+ ),
+ (
+ [plain_curator_pod],
+ None,
+ ),
+ (
+ [not_running_curator_pod],
+ "not currently in a running state",
+ ),
+ (
+ [plain_curator_pod, plain_curator_pod],
+ "more than one Curator pod",
+ ),
+])
+def test_get_curator_pods(pods, expect_error):
+ check = canned_curator()
+ error = check.check_curator(pods)
+ assert_error(error, expect_error)
diff --git a/roles/openshift_health_checker/test/docker_image_availability_test.py b/roles/openshift_health_checker/test/docker_image_availability_test.py
index 0379cafb5..0a7c0f8d3 100644
--- a/roles/openshift_health_checker/test/docker_image_availability_test.py
+++ b/roles/openshift_health_checker/test/docker_image_availability_test.py
@@ -3,19 +3,25 @@ import pytest
from openshift_checks.docker_image_availability import DockerImageAvailability
-@pytest.mark.parametrize('deployment_type,is_active', [
- ("origin", True),
- ("openshift-enterprise", True),
- ("enterprise", False),
- ("online", False),
- ("invalid", False),
- ("", False),
+@pytest.mark.parametrize('deployment_type, is_containerized, group_names, expect_active', [
+ ("origin", True, [], True),
+ ("openshift-enterprise", True, [], True),
+ ("enterprise", True, [], False),
+ ("online", True, [], False),
+ ("invalid", True, [], False),
+ ("", True, [], False),
+ ("origin", False, [], False),
+ ("openshift-enterprise", False, [], False),
+ ("origin", False, ["nodes", "masters"], True),
+ ("openshift-enterprise", False, ["etcd"], False),
])
-def test_is_active(deployment_type, is_active):
+def test_is_active(deployment_type, is_containerized, group_names, expect_active):
task_vars = dict(
+ openshift=dict(common=dict(is_containerized=is_containerized)),
openshift_deployment_type=deployment_type,
+ group_names=group_names,
)
- assert DockerImageAvailability.is_active(task_vars=task_vars) == is_active
+ assert DockerImageAvailability.is_active(task_vars=task_vars) == expect_active
@pytest.mark.parametrize("is_containerized,is_atomic", [
@@ -25,15 +31,15 @@ def test_is_active(deployment_type, is_active):
(False, True),
])
def test_all_images_available_locally(is_containerized, is_atomic):
- def execute_module(module_name, args, task_vars):
+ def execute_module(module_name, module_args, task_vars):
if module_name == "yum":
return {"changed": True}
assert module_name == "docker_image_facts"
- assert 'name' in args
- assert args['name']
+ assert 'name' in module_args
+ assert module_args['name']
return {
- 'images': [args['name']],
+ 'images': [module_args['name']],
}
result = DockerImageAvailability(execute_module=execute_module).run(tmp=None, task_vars=dict(
@@ -46,8 +52,8 @@ def test_all_images_available_locally(is_containerized, is_atomic):
docker=dict(additional_registries=["docker.io"]),
),
openshift_deployment_type='origin',
- openshift_release='v3.4',
openshift_image_tag='3.4',
+ group_names=['nodes', 'masters'],
))
assert not result.get('failed', False)
@@ -58,7 +64,7 @@ def test_all_images_available_locally(is_containerized, is_atomic):
True,
])
def test_all_images_available_remotely(available_locally):
- def execute_module(module_name, args, task_vars):
+ def execute_module(module_name, module_args, task_vars):
if module_name == 'docker_image_facts':
return {'images': [], 'failed': available_locally}
return {'changed': False}
@@ -73,8 +79,8 @@ def test_all_images_available_remotely(available_locally):
docker=dict(additional_registries=["docker.io", "registry.access.redhat.com"]),
),
openshift_deployment_type='origin',
- openshift_release='3.4',
openshift_image_tag='v3.4',
+ group_names=['nodes', 'masters'],
))
assert not result.get('failed', False)
@@ -102,8 +108,8 @@ def test_all_images_unavailable():
docker=dict(additional_registries=["docker.io"]),
),
openshift_deployment_type="openshift-enterprise",
- openshift_release=None,
- openshift_image_tag='latest'
+ openshift_image_tag='latest',
+ group_names=['nodes', 'masters'],
))
assert actual['failed']
@@ -141,8 +147,8 @@ def test_skopeo_update_failure(message, extra_words):
docker=dict(additional_registries=["unknown.io"]),
),
openshift_deployment_type="openshift-enterprise",
- openshift_release='',
openshift_image_tag='',
+ group_names=['nodes', 'masters'],
))
assert actual["failed"]
@@ -171,8 +177,85 @@ def test_registry_availability(deployment_type, registries):
docker=dict(additional_registries=registries),
),
openshift_deployment_type=deployment_type,
- openshift_release='',
openshift_image_tag='',
+ group_names=['nodes', 'masters'],
))
assert not actual.get("failed", False)
+
+
+@pytest.mark.parametrize("deployment_type, is_containerized, groups, oreg_url, expected", [
+ ( # standard set of stuff required on nodes
+ "origin", False, ['nodes'], None,
+ set([
+ 'openshift/origin-pod:vtest',
+ 'openshift/origin-deployer:vtest',
+ 'openshift/origin-docker-registry:vtest',
+ 'openshift/origin-haproxy-router:vtest',
+ 'cockpit/kubernetes', # origin version of registry-console
+ ])
+ ),
+ ( # set a different URL for images
+ "origin", False, ['nodes'], 'foo.io/openshift/origin-${component}:${version}',
+ set([
+ 'foo.io/openshift/origin-pod:vtest',
+ 'foo.io/openshift/origin-deployer:vtest',
+ 'foo.io/openshift/origin-docker-registry:vtest',
+ 'foo.io/openshift/origin-haproxy-router:vtest',
+ 'cockpit/kubernetes', # AFAICS this is not built from the URL
+ ])
+ ),
+ (
+ "origin", True, ['nodes', 'masters', 'etcd'], None,
+ set([
+ # images running on top of openshift
+ 'openshift/origin-pod:vtest',
+ 'openshift/origin-deployer:vtest',
+ 'openshift/origin-docker-registry:vtest',
+ 'openshift/origin-haproxy-router:vtest',
+ 'cockpit/kubernetes',
+ # containerized component images
+ 'openshift/origin:vtest',
+ 'openshift/node:vtest',
+ 'openshift/openvswitch:vtest',
+ 'registry.access.redhat.com/rhel7/etcd',
+ ])
+ ),
+ ( # enterprise images
+ "openshift-enterprise", True, ['nodes'], 'foo.io/openshift3/ose-${component}:f13ac45',
+ set([
+ 'foo.io/openshift3/ose-pod:f13ac45',
+ 'foo.io/openshift3/ose-deployer:f13ac45',
+ 'foo.io/openshift3/ose-docker-registry:f13ac45',
+ 'foo.io/openshift3/ose-haproxy-router:f13ac45',
+ # registry-console is not constructed/versioned the same as the others.
+ 'registry.access.redhat.com/openshift3/registry-console',
+ # containerized images aren't built from oreg_url
+ 'openshift3/node:vtest',
+ 'openshift3/openvswitch:vtest',
+ ])
+ ),
+ (
+ "openshift-enterprise", True, ['etcd', 'lb'], 'foo.io/openshift3/ose-${component}:f13ac45',
+ set([
+ 'registry.access.redhat.com/rhel7/etcd',
+ # lb does not yet come in a containerized version
+ ])
+ ),
+
+])
+def test_required_images(deployment_type, is_containerized, groups, oreg_url, expected):
+ task_vars = dict(
+ openshift=dict(
+ common=dict(
+ is_containerized=is_containerized,
+ is_atomic=False,
+ ),
+ ),
+ openshift_deployment_type=deployment_type,
+ group_names=groups,
+ oreg_url=oreg_url,
+ openshift_image_tag='vtest',
+ )
+
+ assert expected == DockerImageAvailability("DUMMY").required_images(task_vars)
diff --git a/roles/openshift_health_checker/test/docker_storage_test.py b/roles/openshift_health_checker/test/docker_storage_test.py
new file mode 100644
index 000000000..876614b1d
--- /dev/null
+++ b/roles/openshift_health_checker/test/docker_storage_test.py
@@ -0,0 +1,224 @@
+import pytest
+
+from openshift_checks import OpenShiftCheckException
+from openshift_checks.docker_storage import DockerStorage
+
+
+def dummy_check(execute_module=None):
+ def dummy_exec(self, status, task_vars):
+ raise Exception("dummy executor called")
+ return DockerStorage(execute_module=execute_module or dummy_exec)
+
+
+@pytest.mark.parametrize('is_containerized, group_names, is_active', [
+ (False, ["masters", "etcd"], False),
+ (False, ["masters", "nodes"], True),
+ (True, ["etcd"], True),
+])
+def test_is_active(is_containerized, group_names, is_active):
+ task_vars = dict(
+ openshift=dict(common=dict(is_containerized=is_containerized)),
+ group_names=group_names,
+ )
+ assert DockerStorage.is_active(task_vars=task_vars) == is_active
+
+
+non_atomic_task_vars = {"openshift": {"common": {"is_atomic": False}}}
+
+
+@pytest.mark.parametrize('docker_info, failed, expect_msg', [
+ (
+ dict(failed=True, msg="Error connecting: Error while fetching server API version"),
+ True,
+ ["Is docker running on this host?"],
+ ),
+ (
+ dict(msg="I have no info"),
+ True,
+ ["missing info"],
+ ),
+ (
+ dict(info={
+ "Driver": "devicemapper",
+ "DriverStatus": [("Pool Name", "docker-docker--pool")],
+ }),
+ False,
+ [],
+ ),
+ (
+ dict(info={
+ "Driver": "devicemapper",
+ "DriverStatus": [("Data loop file", "true")],
+ }),
+ True,
+ ["loopback devices with the Docker devicemapper storage driver"],
+ ),
+ (
+ dict(info={
+ "Driver": "overlay2",
+ "DriverStatus": []
+ }),
+ False,
+ [],
+ ),
+ (
+ dict(info={
+ "Driver": "overlay",
+ }),
+ True,
+ ["unsupported Docker storage driver"],
+ ),
+ (
+ dict(info={
+ "Driver": "unsupported",
+ }),
+ True,
+ ["unsupported Docker storage driver"],
+ ),
+])
+def test_check_storage_driver(docker_info, failed, expect_msg):
+ def execute_module(module_name, module_args, tmp=None, task_vars=None):
+ if module_name == "yum":
+ return {}
+ if module_name != "docker_info":
+ raise ValueError("not expecting module " + module_name)
+ return docker_info
+
+ check = dummy_check(execute_module=execute_module)
+ check._check_dm_usage = lambda status, task_vars: dict() # stub out for this test
+ result = check.run(tmp=None, task_vars=non_atomic_task_vars)
+
+ if failed:
+ assert result["failed"]
+ else:
+ assert not result.get("failed", False)
+
+ for word in expect_msg:
+ assert word in result["msg"]
+
+
+enough_space = {
+ "Pool Name": "docker--vg-docker--pool",
+ "Data Space Used": "19.92 MB",
+ "Data Space Total": "8.535 GB",
+ "Metadata Space Used": "40.96 kB",
+ "Metadata Space Total": "25.17 MB",
+}
+
+not_enough_space = {
+ "Pool Name": "docker--vg-docker--pool",
+ "Data Space Used": "10 GB",
+ "Data Space Total": "10 GB",
+ "Metadata Space Used": "42 kB",
+ "Metadata Space Total": "43 kB",
+}
+
+
+@pytest.mark.parametrize('task_vars, driver_status, vg_free, success, expect_msg', [
+ (
+ {"max_thinpool_data_usage_percent": "not a float"},
+ enough_space,
+ "12g",
+ False,
+ ["is not a percentage"],
+ ),
+ (
+ {},
+ {}, # empty values from driver status
+ "bogus", # also does not parse as bytes
+ False,
+ ["Could not interpret", "as bytes"],
+ ),
+ (
+ {},
+ enough_space,
+ "12.00g",
+ True,
+ [],
+ ),
+ (
+ {},
+ not_enough_space,
+ "0.00",
+ False,
+ ["data usage", "metadata usage", "higher than threshold"],
+ ),
+])
+def test_dm_usage(task_vars, driver_status, vg_free, success, expect_msg):
+ check = dummy_check()
+ check._get_vg_free = lambda pool, task_vars: vg_free
+ result = check._check_dm_usage(driver_status, task_vars)
+ result_success = not result.get("failed")
+
+ assert result_success is success
+ for msg in expect_msg:
+ assert msg in result["msg"]
+
+
+@pytest.mark.parametrize('pool, command_returns, raises, returns', [
+ (
+ "foo-bar",
+ { # vgs missing
+ "msg": "[Errno 2] No such file or directory",
+ "failed": True,
+ "cmd": "/sbin/vgs",
+ "rc": 2,
+ },
+ "Failed to run /sbin/vgs",
+ None,
+ ),
+ (
+ "foo", # no hyphen in name - should not happen
+ {},
+ "name does not have the expected format",
+ None,
+ ),
+ (
+ "foo-bar",
+ dict(stdout=" 4.00g\n"),
+ None,
+ "4.00g",
+ ),
+ (
+ "foo-bar",
+ dict(stdout="\n"), # no matching VG
+ "vgs did not find this VG",
+ None,
+ )
+])
+def test_vg_free(pool, command_returns, raises, returns):
+ def execute_module(module_name, module_args, tmp=None, task_vars=None):
+ if module_name != "command":
+ raise ValueError("not expecting module " + module_name)
+ return command_returns
+
+ check = dummy_check(execute_module=execute_module)
+ if raises:
+ with pytest.raises(OpenShiftCheckException) as err:
+ check._get_vg_free(pool, {})
+ assert raises in str(err.value)
+ else:
+ ret = check._get_vg_free(pool, {})
+ assert ret == returns
+
+
+@pytest.mark.parametrize('string, expect_bytes', [
+ ("12", 12.0),
+ ("12 k", 12.0 * 1024),
+ ("42.42 MB", 42.42 * 1024**2),
+ ("12g", 12.0 * 1024**3),
+])
+def test_convert_to_bytes(string, expect_bytes):
+ got = DockerStorage._convert_to_bytes(string)
+ assert got == expect_bytes
+
+
+@pytest.mark.parametrize('string', [
+ "bork",
+ "42 Qs",
+])
+def test_convert_to_bytes_error(string):
+ with pytest.raises(ValueError) as err:
+ DockerStorage._convert_to_bytes(string)
+ assert "Cannot convert" in str(err.value)
+ assert string in str(err.value)
diff --git a/roles/openshift_health_checker/test/elasticsearch_test.py b/roles/openshift_health_checker/test/elasticsearch_test.py
new file mode 100644
index 000000000..b9d375d8c
--- /dev/null
+++ b/roles/openshift_health_checker/test/elasticsearch_test.py
@@ -0,0 +1,180 @@
+import pytest
+import json
+
+from openshift_checks.logging.elasticsearch import Elasticsearch
+
+task_vars_config_base = dict(openshift=dict(common=dict(config_base='/etc/origin')))
+
+
+def canned_elasticsearch(exec_oc=None):
+ """Create an Elasticsearch check object with canned exec_oc method"""
+ check = Elasticsearch("dummy") # fails if a module is actually invoked
+ if exec_oc:
+ check._exec_oc = exec_oc
+ return check
+
+
+def assert_error(error, expect_error):
+ if expect_error:
+ assert error
+ assert expect_error in error
+ else:
+ assert not error
+
+
+plain_es_pod = {
+ "metadata": {
+ "labels": {"component": "es", "deploymentconfig": "logging-es"},
+ "name": "logging-es",
+ },
+ "status": {
+ "conditions": [{"status": "True", "type": "Ready"}],
+ "containerStatuses": [{"ready": True}],
+ "podIP": "10.10.10.10",
+ },
+ "_test_master_name_str": "name logging-es",
+}
+
+split_es_pod = {
+ "metadata": {
+ "labels": {"component": "es", "deploymentconfig": "logging-es-2"},
+ "name": "logging-es-2",
+ },
+ "status": {
+ "conditions": [{"status": "True", "type": "Ready"}],
+ "containerStatuses": [{"ready": True}],
+ "podIP": "10.10.10.10",
+ },
+ "_test_master_name_str": "name logging-es-2",
+}
+
+
+def test_check_elasticsearch():
+ assert 'No logging Elasticsearch pods' in canned_elasticsearch().check_elasticsearch([], {})
+
+ # canned oc responses to match so all the checks pass
+ def _exec_oc(cmd, args, task_vars):
+ if '_cat/master' in cmd:
+ return 'name logging-es'
+ elif '/_nodes' in cmd:
+ return json.dumps(es_node_list)
+ elif '_cluster/health' in cmd:
+ return '{"status": "green"}'
+ elif ' df ' in cmd:
+ return 'IUse% Use%\n 3% 4%\n'
+ else:
+ raise Exception(cmd)
+
+ assert not canned_elasticsearch(_exec_oc).check_elasticsearch([plain_es_pod], {})
+
+
+def pods_by_name(pods):
+ return {pod['metadata']['name']: pod for pod in pods}
+
+
+@pytest.mark.parametrize('pods, expect_error', [
+ (
+ [],
+ 'No logging Elasticsearch masters',
+ ),
+ (
+ [plain_es_pod],
+ None,
+ ),
+ (
+ [plain_es_pod, split_es_pod],
+ 'Found multiple Elasticsearch masters',
+ ),
+])
+def test_check_elasticsearch_masters(pods, expect_error):
+ test_pods = list(pods)
+ check = canned_elasticsearch(lambda cmd, args, task_vars: test_pods.pop(0)['_test_master_name_str'])
+
+ errors = check._check_elasticsearch_masters(pods_by_name(pods), task_vars_config_base)
+ assert_error(''.join(errors), expect_error)
+
+
+es_node_list = {
+ 'nodes': {
+ 'random-es-name': {
+ 'host': 'logging-es',
+ }}}
+
+
+@pytest.mark.parametrize('pods, node_list, expect_error', [
+ (
+ [],
+ {},
+ 'No logging Elasticsearch masters',
+ ),
+ (
+ [plain_es_pod],
+ es_node_list,
+ None,
+ ),
+ (
+ [plain_es_pod],
+ {}, # empty list of nodes triggers KeyError
+ "Failed to query",
+ ),
+ (
+ [split_es_pod],
+ es_node_list,
+ 'does not correspond to any known ES pod',
+ ),
+])
+def test_check_elasticsearch_node_list(pods, node_list, expect_error):
+ check = canned_elasticsearch(lambda cmd, args, task_vars: json.dumps(node_list))
+
+ errors = check._check_elasticsearch_node_list(pods_by_name(pods), task_vars_config_base)
+ assert_error(''.join(errors), expect_error)
+
+
+@pytest.mark.parametrize('pods, health_data, expect_error', [
+ (
+ [plain_es_pod],
+ [{"status": "green"}],
+ None,
+ ),
+ (
+ [plain_es_pod],
+ [{"no-status": "should bomb"}],
+ 'Could not retrieve cluster health status',
+ ),
+ (
+ [plain_es_pod, split_es_pod],
+ [{"status": "green"}, {"status": "red"}],
+ 'Elasticsearch cluster health status is RED',
+ ),
+])
+def test_check_elasticsearch_cluster_health(pods, health_data, expect_error):
+ test_health_data = list(health_data)
+ check = canned_elasticsearch(lambda cmd, args, task_vars: json.dumps(test_health_data.pop(0)))
+
+ errors = check._check_es_cluster_health(pods_by_name(pods), task_vars_config_base)
+ assert_error(''.join(errors), expect_error)
+
+
+@pytest.mark.parametrize('disk_data, expect_error', [
+ (
+ 'df: /elasticsearch/persistent: No such file or directory\n',
+ 'Could not retrieve storage usage',
+ ),
+ (
+ 'IUse% Use%\n 3% 4%\n',
+ None,
+ ),
+ (
+ 'IUse% Use%\n 95% 40%\n',
+ 'Inode percent usage on the storage volume',
+ ),
+ (
+ 'IUse% Use%\n 3% 94%\n',
+ 'Disk percent usage on the storage volume',
+ ),
+])
+def test_check_elasticsearch_diskspace(disk_data, expect_error):
+ check = canned_elasticsearch(lambda cmd, args, task_vars: disk_data)
+
+ errors = check._check_elasticsearch_diskspace(pods_by_name([plain_es_pod]), task_vars_config_base)
+ assert_error(''.join(errors), expect_error)
diff --git a/roles/openshift_health_checker/test/fluentd_test.py b/roles/openshift_health_checker/test/fluentd_test.py
new file mode 100644
index 000000000..d151c0b19
--- /dev/null
+++ b/roles/openshift_health_checker/test/fluentd_test.py
@@ -0,0 +1,109 @@
+import pytest
+import json
+
+from openshift_checks.logging.fluentd import Fluentd
+
+
+def canned_fluentd(exec_oc=None):
+ """Create a Fluentd check object with canned exec_oc method"""
+ check = Fluentd("dummy") # fails if a module is actually invoked
+ if exec_oc:
+ check._exec_oc = exec_oc
+ return check
+
+
+def assert_error(error, expect_error):
+ if expect_error:
+ assert error
+ assert expect_error in error
+ else:
+ assert not error
+
+
+fluentd_pod_node1 = {
+ "metadata": {
+ "labels": {"component": "fluentd", "deploymentconfig": "logging-fluentd"},
+ "name": "logging-fluentd-1",
+ },
+ "spec": {"host": "node1", "nodeName": "node1"},
+ "status": {
+ "containerStatuses": [{"ready": True}],
+ "conditions": [{"status": "True", "type": "Ready"}],
+ }
+}
+fluentd_pod_node2_down = {
+ "metadata": {
+ "labels": {"component": "fluentd", "deploymentconfig": "logging-fluentd"},
+ "name": "logging-fluentd-2",
+ },
+ "spec": {"host": "node2", "nodeName": "node2"},
+ "status": {
+ "containerStatuses": [{"ready": False}],
+ "conditions": [{"status": "False", "type": "Ready"}],
+ }
+}
+fluentd_node1 = {
+ "metadata": {
+ "labels": {"logging-infra-fluentd": "true", "kubernetes.io/hostname": "node1"},
+ "name": "node1",
+ },
+ "status": {"addresses": [{"type": "InternalIP", "address": "10.10.1.1"}]},
+}
+fluentd_node2 = {
+ "metadata": {
+ "labels": {"logging-infra-fluentd": "true", "kubernetes.io/hostname": "hostname"},
+ "name": "node2",
+ },
+ "status": {"addresses": [{"type": "InternalIP", "address": "10.10.1.2"}]},
+}
+fluentd_node3_unlabeled = {
+ "metadata": {
+ "labels": {"kubernetes.io/hostname": "hostname"},
+ "name": "node3",
+ },
+ "status": {"addresses": [{"type": "InternalIP", "address": "10.10.1.3"}]},
+}
+
+
+@pytest.mark.parametrize('pods, nodes, expect_error', [
+ (
+ [],
+ [],
+ 'No nodes appear to be defined',
+ ),
+ (
+ [],
+ [fluentd_node3_unlabeled],
+ 'There are no nodes with the fluentd label',
+ ),
+ (
+ [],
+ [fluentd_node1, fluentd_node3_unlabeled],
+ 'Fluentd will not aggregate logs from these nodes.',
+ ),
+ (
+ [],
+ [fluentd_node2],
+ "nodes are supposed to have a Fluentd pod but do not",
+ ),
+ (
+ [fluentd_pod_node1, fluentd_pod_node1],
+ [fluentd_node1],
+ 'more Fluentd pods running than nodes labeled',
+ ),
+ (
+ [fluentd_pod_node2_down],
+ [fluentd_node2],
+ "Fluentd pods are supposed to be running",
+ ),
+ (
+ [fluentd_pod_node1],
+ [fluentd_node1],
+ None,
+ ),
+])
+def test_get_fluentd_pods(pods, nodes, expect_error):
+ check = canned_fluentd(lambda cmd, args, task_vars: json.dumps(dict(items=nodes)))
+
+ error = check.check_fluentd(pods, {})
+ assert_error(error, expect_error)
diff --git a/roles/openshift_health_checker/test/kibana_test.py b/roles/openshift_health_checker/test/kibana_test.py
new file mode 100644
index 000000000..19140a1b6
--- /dev/null
+++ b/roles/openshift_health_checker/test/kibana_test.py
@@ -0,0 +1,218 @@
+import pytest
+import json
+
+try:
+ import urllib2
+ from urllib2 import HTTPError, URLError
+except ImportError:
+ from urllib.error import HTTPError, URLError
+ import urllib.request as urllib2
+
+from openshift_checks.logging.kibana import Kibana
+
+
+def canned_kibana(exec_oc=None):
+ """Create a Kibana check object with canned exec_oc method"""
+ check = Kibana("dummy") # fails if a module is actually invoked
+ if exec_oc:
+ check._exec_oc = exec_oc
+ return check
+
+
+def assert_error(error, expect_error):
+ if expect_error:
+ assert error
+ assert expect_error in error
+ else:
+ assert not error
+
+
+plain_kibana_pod = {
+ "metadata": {
+ "labels": {"component": "kibana", "deploymentconfig": "logging-kibana"},
+ "name": "logging-kibana-1",
+ },
+ "status": {
+ "containerStatuses": [{"ready": True}, {"ready": True}],
+ "conditions": [{"status": "True", "type": "Ready"}],
+ }
+}
+not_running_kibana_pod = {
+ "metadata": {
+ "labels": {"component": "kibana", "deploymentconfig": "logging-kibana"},
+ "name": "logging-kibana-2",
+ },
+ "status": {
+ "containerStatuses": [{"ready": True}, {"ready": False}],
+ "conditions": [{"status": "True", "type": "Ready"}],
+ }
+}
+
+
+@pytest.mark.parametrize('pods, expect_error', [
+ (
+ [],
+ "There are no Kibana pods deployed",
+ ),
+ (
+ [plain_kibana_pod],
+ None,
+ ),
+ (
+ [not_running_kibana_pod],
+ "No Kibana pod is in a running state",
+ ),
+ (
+ [plain_kibana_pod, not_running_kibana_pod],
+ "The following Kibana pods are not currently in a running state",
+ ),
+])
+def test_check_kibana(pods, expect_error):
+ check = canned_kibana()
+ error = check.check_kibana(pods)
+ assert_error(error, expect_error)
+
+
+@pytest.mark.parametrize('route, expect_url, expect_error', [
+ (
+ None,
+ None,
+ 'no_route_exists',
+ ),
+
+ # test route with no ingress
+ (
+ {
+ "metadata": {
+ "labels": {"component": "kibana", "deploymentconfig": "logging-kibana"},
+ "name": "logging-kibana",
+ },
+ "status": {
+ "ingress": [],
+ },
+ "spec": {
+ "host": "hostname",
+ }
+ },
+ None,
+ 'route_not_accepted',
+ ),
+
+ # test route with no host
+ (
+ {
+ "metadata": {
+ "labels": {"component": "kibana", "deploymentconfig": "logging-kibana"},
+ "name": "logging-kibana",
+ },
+ "status": {
+ "ingress": [{
+ "status": True,
+ }],
+ },
+ "spec": {},
+ },
+ None,
+ 'route_missing_host',
+ ),
+
+ # test route that looks fine
+ (
+ {
+ "metadata": {
+ "labels": {"component": "kibana", "deploymentconfig": "logging-kibana"},
+ "name": "logging-kibana",
+ },
+ "status": {
+ "ingress": [{
+ "status": True,
+ }],
+ },
+ "spec": {
+ "host": "hostname",
+ },
+ },
+ "https://hostname/",
+ None,
+ ),
+])
+def test_get_kibana_url(route, expect_url, expect_error):
+ check = canned_kibana(lambda cmd, args, task_vars: json.dumps(route) if route else "")
+
+ url, error = check._get_kibana_url({})
+ if expect_url:
+ assert url == expect_url
+ else:
+ assert not url
+ if expect_error:
+ assert error == expect_error
+ else:
+ assert not error
+
+
+@pytest.mark.parametrize('exec_result, expect', [
+ (
+ 'urlopen error [Errno 111] Connection refused',
+ 'at least one router routing to it?',
+ ),
+ (
+ 'urlopen error [Errno -2] Name or service not known',
+ 'DNS configured for the Kibana hostname?',
+ ),
+ (
+ 'Status code was not [302]: HTTP Error 500: Server error',
+ 'did not return the correct status code',
+ ),
+ (
+ 'bork bork bork',
+ 'bork bork bork', # should pass through
+ ),
+])
+def test_verify_url_internal_failure(exec_result, expect):
+ check = Kibana(execute_module=lambda module_name, args, task_vars: dict(failed=True, msg=exec_result))
+ check._get_kibana_url = lambda task_vars: ('url', None)
+
+ error = check._check_kibana_route({})
+ assert_error(error, expect)
+
+
+@pytest.mark.parametrize('lib_result, expect', [
+ (
+ HTTPError('url', 500, "it broke", hdrs=None, fp=None),
+ 'it broke',
+ ),
+ (
+ URLError('it broke'),
+ 'it broke',
+ ),
+ (
+ 302,
+ 'returned the wrong error code',
+ ),
+ (
+ 200,
+ None,
+ ),
+])
+def test_verify_url_external_failure(lib_result, expect, monkeypatch):
+
+ class _http_return:
+
+ def __init__(self, code):
+ self.code = code
+
+ def getcode(self):
+ return self.code
+
+ def urlopen(url, context):
+ if type(lib_result) is int:
+ return _http_return(lib_result)
+ raise lib_result
+ monkeypatch.setattr(urllib2, 'urlopen', urlopen)
+
+ check = canned_kibana()
+ check._get_kibana_url = lambda task_vars: ('url', None)
+ check._verify_url_internal = lambda url, task_vars: None
+
+ error = check._check_kibana_route({})
+ assert_error(error, expect)
diff --git a/roles/openshift_health_checker/test/logging_check_test.py b/roles/openshift_health_checker/test/logging_check_test.py
new file mode 100644
index 000000000..b6db34fe3
--- /dev/null
+++ b/roles/openshift_health_checker/test/logging_check_test.py
@@ -0,0 +1,137 @@
+import pytest
+import json
+
+from openshift_checks.logging.logging import LoggingCheck, OpenShiftCheckException
+
+task_vars_config_base = dict(openshift=dict(common=dict(config_base='/etc/origin')))
+
+
+logging_namespace = "logging"
+
+
+def canned_loggingcheck(exec_oc=None):
+ """Create a LoggingCheck object with canned exec_oc method"""
+ check = LoggingCheck("dummy") # fails if a module is actually invoked
+ check.logging_namespace = 'logging'
+ if exec_oc:
+ check.exec_oc = exec_oc
+ return check
+
+
+def assert_error(error, expect_error):
+ if expect_error:
+ assert error
+ assert expect_error in error
+ else:
+ assert not error
+
+
+plain_es_pod = {
+ "metadata": {
+ "labels": {"component": "es", "deploymentconfig": "logging-es"},
+ "name": "logging-es",
+ },
+ "status": {
+ "conditions": [{"status": "True", "type": "Ready"}],
+ "containerStatuses": [{"ready": True}],
+ "podIP": "10.10.10.10",
+ },
+ "_test_master_name_str": "name logging-es",
+}
+
+plain_kibana_pod = {
+ "metadata": {
+ "labels": {"component": "kibana", "deploymentconfig": "logging-kibana"},
+ "name": "logging-kibana-1",
+ },
+ "status": {
+ "containerStatuses": [{"ready": True}, {"ready": True}],
+ "conditions": [{"status": "True", "type": "Ready"}],
+ }
+}
+
+fluentd_pod_node1 = {
+ "metadata": {
+ "labels": {"component": "fluentd", "deploymentconfig": "logging-fluentd"},
+ "name": "logging-fluentd-1",
+ },
+ "spec": {"host": "node1", "nodeName": "node1"},
+ "status": {
+ "containerStatuses": [{"ready": True}],
+ "conditions": [{"status": "True", "type": "Ready"}],
+ }
+}
+
+plain_curator_pod = {
+ "metadata": {
+ "labels": {"component": "curator", "deploymentconfig": "logging-curator"},
+ "name": "logging-curator-1",
+ },
+ "status": {
+ "containerStatuses": [{"ready": True}],
+ "conditions": [{"status": "True", "type": "Ready"}],
+ "podIP": "10.10.10.10",
+ }
+}
+
+
+@pytest.mark.parametrize('problem, expect', [
+ ("[Errno 2] No such file or directory", "supposed to be a master"),
+ ("Permission denied", "Unexpected error using `oc`"),
+])
+def test_oc_failure(problem, expect):
+ def execute_module(module_name, args, task_vars):
+ if module_name == "ocutil":
+ return dict(failed=True, result=problem)
+ return dict(changed=False)
+
+ check = LoggingCheck({})
+
+ with pytest.raises(OpenShiftCheckException) as excinfo:
+ check.exec_oc(execute_module, logging_namespace, 'get foo', [], task_vars=task_vars_config_base)
+ assert expect in str(excinfo)
+
+
+groups_with_first_master = dict(masters=['this-host', 'other-host'])
+groups_with_second_master = dict(masters=['other-host', 'this-host'])
+groups_not_a_master = dict(masters=['other-host'])
+
+
+@pytest.mark.parametrize('groups, logging_deployed, is_active', [
+ (groups_with_first_master, True, True),
+ (groups_with_first_master, False, False),
+ (groups_not_a_master, True, False),
+ (groups_with_second_master, True, False),
+ (groups_not_a_master, True, False),
+])
+def test_is_active(groups, logging_deployed, is_active):
+ task_vars = dict(
+ ansible_ssh_host='this-host',
+ groups=groups,
+ openshift_hosted_logging_deploy=logging_deployed,
+ )
+
+ assert LoggingCheck.is_active(task_vars=task_vars) == is_active
+
+
+@pytest.mark.parametrize('pod_output, expect_pods, expect_error', [
+ (
+ 'No resources found.',
+ None,
+ 'There are no pods in the logging namespace',
+ ),
+ (
+ json.dumps({'items': [plain_kibana_pod, plain_es_pod, plain_curator_pod, fluentd_pod_node1]}),
+ [plain_es_pod],
+ None,
+ ),
+])
+def test_get_pods_for_component(pod_output, expect_pods, expect_error):
+ check = canned_loggingcheck(lambda exec_module, namespace, cmd, args, task_vars: pod_output)
+ pods, error = check.get_pods_for_component(
+ lambda name, args, task_vars: {},
+ logging_namespace,
+ "es",
+ {}
+ )
+ assert_error(error, expect_error)
diff --git a/roles/openshift_health_checker/test/ovs_version_test.py b/roles/openshift_health_checker/test/ovs_version_test.py
new file mode 100644
index 000000000..6494e1c06
--- /dev/null
+++ b/roles/openshift_health_checker/test/ovs_version_test.py
@@ -0,0 +1,89 @@
+import pytest
+
+from openshift_checks.ovs_version import OvsVersion, OpenShiftCheckException
+
+
+def test_openshift_version_not_supported():
+ def execute_module(module_name=None, module_args=None, tmp=None, task_vars=None):
+ return {}
+
+ openshift_release = '111.7.0'
+
+ task_vars = dict(
+ openshift=dict(common=dict(service_type='origin')),
+ openshift_release=openshift_release,
+ openshift_image_tag='v' + openshift_release,
+ openshift_deployment_type='origin',
+ )
+
+ check = OvsVersion(execute_module=execute_module)
+ with pytest.raises(OpenShiftCheckException) as excinfo:
+ check.run(tmp=None, task_vars=task_vars)
+
+ assert "no recommended version of Open vSwitch" in str(excinfo.value)
+
+
+def test_invalid_openshift_release_format():
+ def execute_module(module_name=None, module_args=None, tmp=None, task_vars=None):
+ return {}
+
+ task_vars = dict(
+ openshift=dict(common=dict(service_type='origin')),
+ openshift_image_tag='v0',
+ openshift_deployment_type='origin',
+ )
+
+ check = OvsVersion(execute_module=execute_module)
+ with pytest.raises(OpenShiftCheckException) as excinfo:
+ check.run(tmp=None, task_vars=task_vars)
+ assert "invalid version" in str(excinfo.value)
+
+
+@pytest.mark.parametrize('openshift_release,expected_ovs_version', [
+ ("3.5", "2.6"),
+ ("3.6", "2.6"),
+ ("3.4", "2.4"),
+ ("3.3", "2.4"),
+ ("1.0", "2.4"),
+])
+def test_ovs_package_version(openshift_release, expected_ovs_version):
+ task_vars = dict(
+ openshift=dict(common=dict(service_type='origin')),
+ openshift_release=openshift_release,
+ openshift_image_tag='v' + openshift_release,
+ )
+ return_value = object()
+
+ def execute_module(module_name=None, module_args=None, tmp=None, task_vars=None):
+ assert module_name == 'rpm_version'
+ assert "package_list" in module_args
+
+ for pkg in module_args["package_list"]:
+ if pkg["name"] == "openvswitch":
+ assert pkg["version"] == expected_ovs_version
+
+ return return_value
+
+ check = OvsVersion(execute_module=execute_module)
+ result = check.run(tmp=None, task_vars=task_vars)
+ assert result is return_value
+
+
+@pytest.mark.parametrize('group_names,is_containerized,is_active', [
+ (['masters'], False, True),
+ # ensure check is skipped on containerized installs
+ (['masters'], True, False),
+ (['nodes'], False, True),
+ (['masters', 'nodes'], False, True),
+ (['masters', 'etcd'], False, True),
+ ([], False, False),
+ (['etcd'], False, False),
+ (['lb'], False, False),
+ (['nfs'], False, False),
+])
+def test_ovs_version_skip_when_not_master_nor_node(group_names, is_containerized, is_active):
+ task_vars = dict(
+ group_names=group_names,
+ openshift=dict(common=dict(is_containerized=is_containerized)),
+ )
+ assert OvsVersion.is_active(task_vars=task_vars) == is_active
diff --git a/roles/openshift_health_checker/test/package_version_test.py b/roles/openshift_health_checker/test/package_version_test.py
index 196d9816a..91eace512 100644
--- a/roles/openshift_health_checker/test/package_version_test.py
+++ b/roles/openshift_health_checker/test/package_version_test.py
@@ -1,24 +1,132 @@
import pytest
-from openshift_checks.package_version import PackageVersion
+from openshift_checks.package_version import PackageVersion, OpenShiftCheckException
-def test_package_version():
+@pytest.mark.parametrize('openshift_release, extra_words', [
+ ('111.7.0', ["no recommended version of Open vSwitch"]),
+ ('0.0.0', ["no recommended version of Docker"]),
+])
+def test_openshift_version_not_supported(openshift_release, extra_words):
+ def execute_module(module_name=None, module_args=None, tmp=None, task_vars=None):
+ return {}
+
+ task_vars = dict(
+ openshift=dict(common=dict(service_type='origin')),
+ openshift_release=openshift_release,
+ openshift_image_tag='v' + openshift_release,
+ openshift_deployment_type='origin',
+ )
+
+ check = PackageVersion(execute_module=execute_module)
+ with pytest.raises(OpenShiftCheckException) as excinfo:
+ check.run(tmp=None, task_vars=task_vars)
+
+ for word in extra_words:
+ assert word in str(excinfo.value)
+
+
+def test_invalid_openshift_release_format():
+ def execute_module(module_name=None, module_args=None, tmp=None, task_vars=None):
+ return {}
+
+ task_vars = dict(
+ openshift=dict(common=dict(service_type='origin')),
+ openshift_image_tag='v0',
+ openshift_deployment_type='origin',
+ )
+
+ check = PackageVersion(execute_module=execute_module)
+ with pytest.raises(OpenShiftCheckException) as excinfo:
+ check.run(tmp=None, task_vars=task_vars)
+ assert "invalid version" in str(excinfo.value)
+
+
+@pytest.mark.parametrize('openshift_release', [
+ "3.5",
+ "3.6",
+ "3.4",
+ "3.3",
+])
+def test_package_version(openshift_release):
task_vars = dict(
openshift=dict(common=dict(service_type='origin')),
- openshift_release='3.5',
+ openshift_release=openshift_release,
+ openshift_image_tag='v' + openshift_release,
openshift_deployment_type='origin',
)
return_value = object()
def execute_module(module_name=None, module_args=None, tmp=None, task_vars=None):
assert module_name == 'aos_version'
- assert 'requested_openshift_release' in module_args
- assert 'openshift_deployment_type' in module_args
- assert 'rpm_prefix' in module_args
- assert module_args['requested_openshift_release'] == task_vars['openshift_release']
- assert module_args['openshift_deployment_type'] == task_vars['openshift_deployment_type']
- assert module_args['rpm_prefix'] == task_vars['openshift']['common']['service_type']
+ assert "package_list" in module_args
+
+ for pkg in module_args["package_list"]:
+ if "-master" in pkg["name"] or "-node" in pkg["name"]:
+ assert pkg["version"] == task_vars["openshift_release"]
+
+ return return_value
+
+ check = PackageVersion(execute_module=execute_module)
+ result = check.run(tmp=None, task_vars=task_vars)
+ assert result is return_value
+
+
+@pytest.mark.parametrize('deployment_type,openshift_release,expected_ovs_version', [
+ ("openshift-enterprise", "3.5", "2.6"),
+ ("origin", "3.6", "2.6"),
+ ("openshift-enterprise", "3.4", "2.4"),
+ ("origin", "3.3", "2.4"),
+])
+def test_ovs_package_version(deployment_type, openshift_release, expected_ovs_version):
+ task_vars = dict(
+ openshift=dict(common=dict(service_type='origin')),
+ openshift_release=openshift_release,
+ openshift_image_tag='v' + openshift_release,
+ openshift_deployment_type=deployment_type,
+ )
+ return_value = object()
+
+ def execute_module(module_name=None, module_args=None, tmp=None, task_vars=None):
+ assert module_name == 'aos_version'
+ assert "package_list" in module_args
+
+ for pkg in module_args["package_list"]:
+ if pkg["name"] == "openvswitch":
+ assert pkg["version"] == expected_ovs_version
+
+ return return_value
+
+ check = PackageVersion(execute_module=execute_module)
+ result = check.run(tmp=None, task_vars=task_vars)
+ assert result is return_value
+
+
+@pytest.mark.parametrize('deployment_type,openshift_release,expected_docker_version', [
+ ("origin", "3.5", "1.12"),
+ ("openshift-enterprise", "3.4", "1.12"),
+ ("origin", "3.3", "1.10"),
+ ("openshift-enterprise", "3.2", "1.10"),
+ ("origin", "3.1", "1.8"),
+ ("openshift-enterprise", "3.1", "1.8"),
+])
+def test_docker_package_version(deployment_type, openshift_release, expected_docker_version):
+ task_vars = dict(
+ openshift=dict(common=dict(service_type='origin')),
+ openshift_release=openshift_release,
+ openshift_image_tag='v' + openshift_release,
+ openshift_deployment_type=deployment_type,
+ )
+ return_value = object()
+
+ def execute_module(module_name=None, module_args=None, tmp=None, task_vars=None):
+ assert module_name == 'aos_version'
+ assert "package_list" in module_args
+
+ for pkg in module_args["package_list"]:
+ if pkg["name"] == "docker":
+ assert pkg["version"] == expected_docker_version
+
return return_value
check = PackageVersion(execute_module=execute_module)
diff --git a/roles/openshift_health_checker/test/rpm_version_test.py b/roles/openshift_health_checker/test/rpm_version_test.py
new file mode 100644
index 000000000..2f09ef965
--- /dev/null
+++ b/roles/openshift_health_checker/test/rpm_version_test.py
@@ -0,0 +1,82 @@
+import pytest
+import rpm_version
+
+expected_pkgs = {
+ "spam": {
+ "name": "spam",
+ "version": "3.2.1",
+ },
+ "eggs": {
+ "name": "eggs",
+ "version": "3.2.1",
+ },
+}
+
+
+@pytest.mark.parametrize('pkgs, expect_not_found', [
+ (
+ {},
+ ["spam", "eggs"], # none found
+ ),
+ (
+ {"spam": ["3.2.1", "4.5.1"]},
+ ["eggs"], # completely missing
+ ),
+ (
+ {
+ "spam": ["3.2.1", "4.5.1"],
+ "eggs": ["3.2.1"],
+ },
+ [], # all found
+ ),
+])
+def test_check_pkg_found(pkgs, expect_not_found):
+ if expect_not_found:
+ with pytest.raises(rpm_version.RpmVersionException) as e:
+ rpm_version._check_pkg_versions(pkgs, expected_pkgs)
+
+ assert "not found to be installed" in str(e.value)
+ assert set(expect_not_found) == set(e.value.problem_pkgs)
+ else:
+ rpm_version._check_pkg_versions(pkgs, expected_pkgs)
+
+
+@pytest.mark.parametrize('pkgs, expect_not_found', [
+ (
+ {
+ 'spam': ['3.2.1'],
+ 'eggs': ['3.3.2'],
+ },
+ {
+ "eggs": {
+ "required_version": "3.2",
+ "found_versions": ["3.3"],
+ }
+ }, # not the right version
+ ),
+ (
+ {
+ 'spam': ['3.1.2', "3.3.2"],
+ 'eggs': ['3.3.2', "1.2.3"],
+ },
+ {
+ "eggs": {
+ "required_version": "3.2",
+ "found_versions": ["3.3", "1.2"],
+ },
+ "spam": {
+ "required_version": "3.2",
+ "found_versions": ["3.1", "3.3"],
+ }
+ }, # not the right version
+ ),
+])
+def test_check_pkg_version_found(pkgs, expect_not_found):
+ if expect_not_found:
+ with pytest.raises(rpm_version.RpmVersionException) as e:
+ rpm_version._check_pkg_versions(pkgs, expected_pkgs)
+
+ assert "found to be installed with an incorrect version" in str(e.value)
+ assert expect_not_found == e.value.problem_pkgs
+ else:
+ rpm_version._check_pkg_versions(pkgs, expected_pkgs)
diff --git a/roles/openshift_hosted/tasks/registry/storage/object_storage.yml b/roles/openshift_hosted/tasks/registry/storage/object_storage.yml
index 3dde83bee..8aaba0f3c 100644
--- a/roles/openshift_hosted/tasks/registry/storage/object_storage.yml
+++ b/roles/openshift_hosted/tasks/registry/storage/object_storage.yml
@@ -1,20 +1,4 @@
---
-- name: Assert supported openshift.hosted.registry.storage.provider
- assert:
- that:
- - openshift.hosted.registry.storage.provider in ['azure_blob', 's3', 'swift']
- msg: >
- Object Storage Provider: "{{ openshift.hosted.registry.storage.provider }}"
- is not currently supported
-
-- name: Assert implemented openshift.hosted.registry.storage.provider
- assert:
- that:
- - openshift.hosted.registry.storage.provider not in ['azure_blob', 'swift']
- msg: >
- Support for provider: "{{ openshift.hosted.registry.storage.provider }}"
- not implemented yet
-
- include: s3.yml
when: openshift.hosted.registry.storage.provider == 's3'
diff --git a/roles/openshift_logging/defaults/main.yml b/roles/openshift_logging/defaults/main.yml
index 95a86354e..66d880d23 100644
--- a/roles/openshift_logging/defaults/main.yml
+++ b/roles/openshift_logging/defaults/main.yml
@@ -89,7 +89,7 @@ openshift_logging_es_cpu_limit: null
# the logging appenders for the root loggers to write ES logs. Valid values: 'file', 'console'
openshift_logging_es_log_appenders: ['file']
openshift_logging_es_memory_limit: "{{ openshift_hosted_logging_elasticsearch_instance_ram | default('8Gi') }}"
-openshift_logging_es_pv_selector: null
+openshift_logging_es_pv_selector: "{{ openshift_hosted_logging_storage_labels | default(null) }}"
openshift_logging_es_pvc_dynamic: "{{ openshift_hosted_logging_elasticsearch_pvc_dynamic | default(False) }}"
openshift_logging_es_pvc_size: "{{ openshift_hosted_logging_elasticsearch_pvc_size | default('') }}"
openshift_logging_es_pvc_prefix: "{{ openshift_hosted_logging_elasticsearch_pvc_prefix | default('logging-es') }}"
@@ -128,7 +128,7 @@ openshift_logging_es_ops_client_key: /etc/fluent/keys/key
openshift_logging_es_ops_cluster_size: "{{ openshift_hosted_logging_elasticsearch_ops_cluster_size | default(1) }}"
openshift_logging_es_ops_cpu_limit: null
openshift_logging_es_ops_memory_limit: "{{ openshift_hosted_logging_elasticsearch_ops_instance_ram | default('8Gi') }}"
-openshift_logging_es_ops_pv_selector: None
+openshift_logging_es_ops_pv_selector: "{{ openshift_hosted_loggingops_storage_labels | default(null) }}"
openshift_logging_es_ops_pvc_dynamic: "{{ openshift_hosted_logging_elasticsearch_ops_pvc_dynamic | default(False) }}"
openshift_logging_es_ops_pvc_size: "{{ openshift_hosted_logging_elasticsearch_ops_pvc_size | default('') }}"
openshift_logging_es_ops_pvc_prefix: "{{ openshift_hosted_logging_elasticsearch_ops_pvc_prefix | default('logging-es-ops') }}"
diff --git a/roles/openshift_logging/tasks/delete_logging.yaml b/roles/openshift_logging/tasks/delete_logging.yaml
index 0c7152b16..6d023a02d 100644
--- a/roles/openshift_logging/tasks/delete_logging.yaml
+++ b/roles/openshift_logging/tasks/delete_logging.yaml
@@ -32,9 +32,8 @@
# delete our old secrets
- name: delete logging secrets
- oc_obj:
+ oc_secret:
state: absent
- kind: secret
namespace: "{{ openshift_logging_namespace }}"
name: "{{ item }}"
with_items:
diff --git a/roles/openshift_logging/tasks/generate_certs.yaml b/roles/openshift_logging/tasks/generate_certs.yaml
index 040356e3d..9c8f0986a 100644
--- a/roles/openshift_logging/tasks/generate_certs.yaml
+++ b/roles/openshift_logging/tasks/generate_certs.yaml
@@ -17,7 +17,7 @@
- name: Generate certificates
command: >
- {{ openshift.common.admin_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig ca create-signer-cert
+ {{ openshift.common.client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig ca create-signer-cert
--key={{generated_certs_dir}}/ca.key --cert={{generated_certs_dir}}/ca.crt
--serial={{generated_certs_dir}}/ca.serial.txt --name=logging-signer-test
check_mode: no
diff --git a/roles/openshift_logging/tasks/install_logging.yaml b/roles/openshift_logging/tasks/install_logging.yaml
index dde76b142..7c1062b77 100644
--- a/roles/openshift_logging/tasks/install_logging.yaml
+++ b/roles/openshift_logging/tasks/install_logging.yaml
@@ -10,7 +10,7 @@
name: "{{ openshift_logging_namespace }}"
node_selector: "{{ openshift_logging_nodeselector | default(null) }}"
-- name: Labelling logging project
+- name: Labeling logging project
oc_label:
state: present
kind: namespace
@@ -23,7 +23,7 @@
- openshift_logging_labels is defined
- openshift_logging_labels is dict
-- name: Labelling logging project
+- name: Labeling logging project
oc_label:
state: present
kind: namespace
@@ -78,6 +78,8 @@
- "{{ openshift_logging_facts.elasticsearch.deploymentconfigs }}"
- "{{ openshift_logging_facts.elasticsearch.pvcs }}"
- "{{ es_indices }}"
+ when:
+ - openshift_logging_facts.elasticsearch.deploymentconfigs.keys() | count > 0
# Create any new DC that may be required
- include_role:
@@ -124,6 +126,7 @@
- "{{ es_ops_indices }}"
when:
- openshift_logging_use_ops | bool
+ - openshift_logging_facts.elasticsearch_ops.deploymentconfigs.keys() | count > 0
# Create any new DC that may be required
- include_role:
diff --git a/roles/openshift_logging/tasks/procure_server_certs.yaml b/roles/openshift_logging/tasks/procure_server_certs.yaml
index 7ab140357..00de0ca06 100644
--- a/roles/openshift_logging/tasks/procure_server_certs.yaml
+++ b/roles/openshift_logging/tasks/procure_server_certs.yaml
@@ -27,7 +27,7 @@
- name: Creating signed server cert and key for {{ cert_info.procure_component }}
command: >
- {{ openshift.common.admin_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig ca create-server-cert
+ {{ openshift.common.client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig ca create-server-cert
--key={{generated_certs_dir}}/{{cert_info.procure_component}}.key --cert={{generated_certs_dir}}/{{cert_info.procure_component}}.crt
--hostnames={{cert_info.hostnames|quote}} --signer-cert={{generated_certs_dir}}/ca.crt --signer-key={{generated_certs_dir}}/ca.key
--signer-serial={{generated_certs_dir}}/ca.serial.txt
diff --git a/roles/openshift_logging_elasticsearch/tasks/main.yaml b/roles/openshift_logging_elasticsearch/tasks/main.yaml
index 7e88a7498..f1d15b76d 100644
--- a/roles/openshift_logging_elasticsearch/tasks/main.yaml
+++ b/roles/openshift_logging_elasticsearch/tasks/main.yaml
@@ -217,7 +217,7 @@
access_modes: "{{ openshift_logging_elasticsearch_pvc_access_modes | list }}"
pv_selector: "{{ openshift_logging_elasticsearch_pvc_pv_selector }}"
annotations:
- volume.alpha.kubernetes.io/storage-class: "dynamic"
+ volume.beta.kubernetes.io/storage-class: "dynamic"
when:
- openshift_logging_elasticsearch_storage_type == "pvc"
- openshift_logging_elasticsearch_pvc_dynamic
diff --git a/roles/openshift_logging_elasticsearch/templates/elasticsearch.yml.j2 b/roles/openshift_logging_elasticsearch/templates/elasticsearch.yml.j2
index 681f5a7e6..58c325c8a 100644
--- a/roles/openshift_logging_elasticsearch/templates/elasticsearch.yml.j2
+++ b/roles/openshift_logging_elasticsearch/templates/elasticsearch.yml.j2
@@ -38,6 +38,7 @@ gateway:
io.fabric8.elasticsearch.authentication.users: ["system.logging.kibana", "system.logging.fluentd", "system.logging.curator", "system.admin"]
io.fabric8.elasticsearch.kibana.mapping.app: /usr/share/elasticsearch/index_patterns/com.redhat.viaq-openshift.index-pattern.json
io.fabric8.elasticsearch.kibana.mapping.ops: /usr/share/elasticsearch/index_patterns/com.redhat.viaq-openshift.index-pattern.json
+io.fabric8.elasticsearch.kibana.mapping.empty: /usr/share/elasticsearch/index_patterns/com.redhat.viaq-openshift.index-pattern.json
openshift.config:
use_common_data_model: true
diff --git a/roles/openshift_logging_elasticsearch/templates/es.j2 b/roles/openshift_logging_elasticsearch/templates/es.j2
index e129205ca..bd2289f0d 100644
--- a/roles/openshift_logging_elasticsearch/templates/es.j2
+++ b/roles/openshift_logging_elasticsearch/templates/es.j2
@@ -84,6 +84,9 @@ spec:
name: "RECOVER_AFTER_TIME"
value: "{{openshift_logging_elasticsearch_recover_after_time}}"
-
+ name: "READINESS_PROBE_TIMEOUT"
+ value: "30"
+ -
name: "IS_MASTER"
value: "{% if deploy_type in ['data-master', 'master'] %}true{% else %}false{% endif %}"
@@ -104,8 +107,8 @@ spec:
exec:
command:
- "/usr/share/elasticsearch/probe/readiness.sh"
- initialDelaySeconds: 5
- timeoutSeconds: 4
+ initialDelaySeconds: 10
+ timeoutSeconds: 30
periodSeconds: 5
volumes:
- name: elasticsearch
diff --git a/roles/openshift_logging_kibana/tasks/main.yaml b/roles/openshift_logging_kibana/tasks/main.yaml
index 55b28ee24..bae55ffaa 100644
--- a/roles/openshift_logging_kibana/tasks/main.yaml
+++ b/roles/openshift_logging_kibana/tasks/main.yaml
@@ -43,6 +43,31 @@
kibana_name: "{{ 'logging-kibana' ~ ( (openshift_logging_kibana_ops_deployment | default(false) | bool) | ternary('-ops', '')) }}"
kibana_component: "{{ 'kibana' ~ ( (openshift_logging_kibana_ops_deployment | default(false) | bool) | ternary('-ops', '')) }}"
+# Check {{ generated_certs_dir }} for session_secret and oauth_secret
+- name: Checking for session_secret
+ stat: path="{{generated_certs_dir}}/session_secret"
+ register: session_secret_file
+
+- name: Checking for oauth_secret
+ stat: path="{{generated_certs_dir}}/oauth_secret"
+ register: oauth_secret_file
+
+# gen session_secret if necessary
+- name: Generate session secret
+ copy:
+ content: "{{ 200 | oo_random_word }}"
+ dest: "{{ generated_certs_dir }}/session_secret"
+ when:
+ - not session_secret_file.stat.exists
+
+# gen oauth_secret if necessary
+- name: Generate oauth secret
+ copy:
+ content: "{{ 64 | oo_random_word }}"
+ dest: "{{ generated_certs_dir }}/oauth_secret"
+ when:
+ - not oauth_secret_file.stat.exists
+
- name: Retrieving the cert to use when generating secrets for the logging components
slurp:
src: "{{ generated_certs_dir }}/{{ item.file }}"
@@ -52,6 +77,8 @@
- { name: "kibana_internal_key", file: "kibana-internal.key"}
- { name: "kibana_internal_cert", file: "kibana-internal.crt"}
- { name: "server_tls", file: "server-tls.json"}
+ - { name: "session_secret", file: "session_secret" }
+ - { name: "oauth_secret", file: "oauth_secret" }
# services
- name: Set {{ kibana_name }} service
@@ -120,19 +147,16 @@
files:
- "{{ tempdir }}/templates/kibana-route.yaml"
-# gen session_secret -- if necessary
-# TODO: make idempotent
-- name: Generate proxy session
- set_fact:
- session_secret: "{{ 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789' | random_word(200) }}"
- check_mode: no
+# preserve list of current hostnames
+- name: Get current oauthclient hostnames
+ oc_obj:
+ state: list
+ name: kibana-proxy
+ namespace: "{{ openshift_logging_namespace }}"
+ kind: oauthclient
+ register: oauth_client_list
-# gen oauth_secret -- if necessary
-# TODO: make idempotent
-- name: Generate oauth client secret
- set_fact:
- oauth_secret: "{{ 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789' | random_word(64) }}"
- check_mode: no
+- set_fact: proxy_hostnames={{ oauth_client_list.results.results[0].redirectURIs | default ([]) + ['https://' ~ openshift_logging_kibana_hostname] }}
# create oauth client
- name: Create oauth-client template
@@ -140,8 +164,8 @@
src: oauth-client.j2
dest: "{{ tempdir }}/templates/oauth-client.yml"
vars:
- kibana_hostname: "{{ openshift_logging_kibana_hostname }}"
- secret: "{{ oauth_secret }}"
+ kibana_hostnames: "{{ proxy_hostnames | unique }}"
+ secret: "{{ key_pairs | entry_from_named_pair('oauth_secret') | b64decode }}"
- name: Set kibana-proxy oauth-client
oc_obj:
@@ -179,18 +203,18 @@
# path: "{{ generated_certs_dir }}/kibana-internal.key"
#- name: server-cert
# path: "{{ generated_certs_dir }}/kibana-internal.crt"
- #- name: server-tls
+ #- name: server-tls.json
# path: "{{ generated_certs_dir }}/server-tls.json"
contents:
- path: oauth-secret
- data: "{{ oauth_secret }}"
+ data: "{{ key_pairs | entry_from_named_pair('oauth_secret') | b64decode }}"
- path: session-secret
- data: "{{ session_secret }}"
+ data: "{{ key_pairs | entry_from_named_pair('session_secret') | b64decode }}"
- path: server-key
data: "{{ key_pairs | entry_from_named_pair('kibana_internal_key') | b64decode }}"
- path: server-cert
data: "{{ key_pairs | entry_from_named_pair('kibana_internal_cert') | b64decode }}"
- - path: server-tls
+ - path: server-tls.json
data: "{{ key_pairs | entry_from_named_pair('server_tls') | b64decode }}"
# create Kibana DC
diff --git a/roles/openshift_logging_kibana/templates/oauth-client.j2 b/roles/openshift_logging_kibana/templates/oauth-client.j2
index 6767f6d89..c80ff3d30 100644
--- a/roles/openshift_logging_kibana/templates/oauth-client.j2
+++ b/roles/openshift_logging_kibana/templates/oauth-client.j2
@@ -4,9 +4,11 @@ metadata:
name: kibana-proxy
labels:
logging-infra: support
-secret: {{secret}}
+secret: {{ secret }}
redirectURIs:
-- https://{{kibana_hostname}}
+{% for host in kibana_hostnames %}
+- {{ host }}
+{% endfor %}
scopeRestrictions:
- literals:
- user:info
diff --git a/roles/openshift_logging_mux/templates/mux.j2 b/roles/openshift_logging_mux/templates/mux.j2
index a00f7c939..243698c6a 100644
--- a/roles/openshift_logging_mux/templates/mux.j2
+++ b/roles/openshift_logging_mux/templates/mux.j2
@@ -117,7 +117,7 @@ spec:
name: logging-mux
- name: certs
secret:
- secretName: logging-mux
+ secretName: logging-fluentd
- name: dockerhostname
hostPath:
path: /etc/hostname
diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml
index 5522fef26..aed5598c0 100644
--- a/roles/openshift_master/tasks/main.yml
+++ b/roles/openshift_master/tasks/main.yml
@@ -177,6 +177,7 @@
# https://github.com/openshift/origin/issues/6447
- name: Start and enable master
systemd:
+ daemon_reload: yes
name: "{{ openshift.common.service_type }}-master"
enabled: yes
state: started
diff --git a/roles/openshift_master_certificates/tasks/main.yml b/roles/openshift_master_certificates/tasks/main.yml
index 9706da24b..62413536b 100644
--- a/roles/openshift_master_certificates/tasks/main.yml
+++ b/roles/openshift_master_certificates/tasks/main.yml
@@ -71,7 +71,7 @@
delegate_to: "{{ openshift_ca_host }}"
run_once: true
-- name: Generate the master client config
+- name: Generate the loopback master client config
command: >
{{ hostvars[openshift_ca_host].openshift.common.client_binary }} adm create-api-client-config
{% for named_ca_certificate in openshift.master.named_certificates | default([]) | oo_collect('cafile') %}
@@ -80,8 +80,8 @@
--certificate-authority={{ openshift_ca_cert }}
--client-dir={{ openshift_generated_configs_dir }}/master-{{ hostvars[item].openshift.common.hostname }}
--groups=system:masters,system:openshift-master
- --master={{ openshift.master.api_url }}
- --public-master={{ openshift.master.public_api_url }}
+ --master={{ hostvars[item].openshift.master.loopback_api_url }}
+ --public-master={{ hostvars[item].openshift.master.loopback_api_url }}
--signer-cert={{ openshift_ca_cert }}
--signer-key={{ openshift_ca_key }}
--signer-serial={{ openshift_ca_serial }}
diff --git a/roles/openshift_metrics/README.md b/roles/openshift_metrics/README.md
index 84503217b..1f10de4a2 100644
--- a/roles/openshift_metrics/README.md
+++ b/roles/openshift_metrics/README.md
@@ -68,6 +68,9 @@ For default values, see [`defaults/main.yaml`](defaults/main.yaml).
- `openshift_metrics_resolution`: How often metrics should be gathered.
+- `openshift_metrics_install_hawkular_agent`: Install the Hawkular OpenShift Agent (HOSA). HOSA can be used
+ to collect custom metrics from your pods. This component is currently in tech-preview and is not installed by default.
+
## Additional variables to control resource limits
Each metrics component (hawkular, cassandra, heapster) can specify a cpu and memory limits and requests by setting
the corresponding role variable:
diff --git a/roles/openshift_metrics/defaults/main.yaml b/roles/openshift_metrics/defaults/main.yaml
index 1d3db8a1a..ba50566e9 100644
--- a/roles/openshift_metrics/defaults/main.yaml
+++ b/roles/openshift_metrics/defaults/main.yaml
@@ -16,6 +16,7 @@ openshift_metrics_hawkular_nodeselector: ""
openshift_metrics_cassandra_replicas: 1
openshift_metrics_cassandra_storage_type: "{{ openshift_hosted_metrics_storage_kind | default('emptydir') }}"
openshift_metrics_cassandra_pvc_size: "{{ openshift_hosted_metrics_storage_volume_size | default('10Gi') }}"
+openshift_metrics_cassandra_pv_selector: "{{ openshift_hosted_metrics_storage_labels | default(null) }}"
openshift_metrics_cassandra_limits_memory: 2G
openshift_metrics_cassandra_limits_cpu: null
openshift_metrics_cassandra_requests_memory: 1G
@@ -30,6 +31,14 @@ openshift_metrics_heapster_requests_memory: 0.9375G
openshift_metrics_heapster_requests_cpu: null
openshift_metrics_heapster_nodeselector: ""
+openshift_metrics_install_hawkular_agent: False
+openshift_metrics_hawkular_agent_limits_memory: null
+openshift_metrics_hawkular_agent_limits_cpu: null
+openshift_metrics_hawkular_agent_requests_memory: null
+openshift_metrics_hawkular_agent_requests_cpu: null
+openshift_metrics_hawkular_agent_nodeselector: ""
+openshift_metrics_hawkular_agent_namespace: "default"
+
openshift_metrics_hawkular_hostname: "hawkular-metrics.{{openshift_master_default_subdomain}}"
openshift_metrics_duration: 7
diff --git a/roles/openshift_metrics/tasks/generate_certificates.yaml b/roles/openshift_metrics/tasks/generate_certificates.yaml
index 7af3f9467..3dc15d58b 100644
--- a/roles/openshift_metrics/tasks/generate_certificates.yaml
+++ b/roles/openshift_metrics/tasks/generate_certificates.yaml
@@ -1,7 +1,7 @@
---
- name: generate ca certificate chain
command: >
- {{ openshift.common.admin_binary }} ca create-signer-cert
+ {{ openshift.common.client_binary }} adm ca create-signer-cert
--config={{ mktemp.stdout }}/admin.kubeconfig
--key='{{ mktemp.stdout }}/ca.key'
--cert='{{ mktemp.stdout }}/ca.crt'
diff --git a/roles/openshift_metrics/tasks/generate_hawkular_certificates.yaml b/roles/openshift_metrics/tasks/generate_hawkular_certificates.yaml
index fb4fe2f03..7b81b3c10 100644
--- a/roles/openshift_metrics/tasks/generate_hawkular_certificates.yaml
+++ b/roles/openshift_metrics/tasks/generate_hawkular_certificates.yaml
@@ -73,6 +73,8 @@
{{ hawkular_secrets['hawkular-metrics.key'] }}
tls.truststore.crt: >
{{ hawkular_secrets['hawkular-cassandra.crt'] }}
+ ca.crt: >
+ {{ hawkular_secrets['ca.crt'] }}
when: name not in metrics_secrets.stdout_lines
changed_when: no
diff --git a/roles/openshift_metrics/tasks/install_cassandra.yaml b/roles/openshift_metrics/tasks/install_cassandra.yaml
index 3b4e8560f..62b7f52cb 100644
--- a/roles/openshift_metrics/tasks/install_cassandra.yaml
+++ b/roles/openshift_metrics/tasks/install_cassandra.yaml
@@ -23,7 +23,7 @@
changed_when: false
- set_fact: openshift_metrics_cassandra_pvc_prefix="hawkular-metrics"
- when: not openshift_metrics_cassandra_pvc_prefix or openshift_metrics_cassandra_pvc_prefix == ''
+ when: "not openshift_metrics_cassandra_pvc_prefix or openshift_metrics_cassandra_pvc_prefix == ''"
- name: generate hawkular-cassandra persistent volume claims
template:
@@ -35,6 +35,7 @@
metrics-infra: hawkular-cassandra
access_modes: "{{ openshift_metrics_cassandra_pvc_access | list }}"
size: "{{ openshift_metrics_cassandra_pvc_size }}"
+ pv_selector: "{{ openshift_metrics_cassandra_pv_selector }}"
with_sequence: count={{ openshift_metrics_cassandra_replicas }}
when:
- openshift_metrics_cassandra_storage_type != 'emptydir'
@@ -50,9 +51,10 @@
labels:
metrics-infra: hawkular-cassandra
annotations:
- volume.alpha.kubernetes.io/storage-class: dynamic
+ volume.beta.kubernetes.io/storage-class: dynamic
access_modes: "{{ openshift_metrics_cassandra_pvc_access | list }}"
size: "{{ openshift_metrics_cassandra_pvc_size }}"
+ pv_selector: "{{ openshift_metrics_cassandra_pv_selector }}"
with_sequence: count={{ openshift_metrics_cassandra_replicas }}
when: openshift_metrics_cassandra_storage_type == 'dynamic'
changed_when: false
diff --git a/roles/openshift_metrics/tasks/install_hosa.yaml b/roles/openshift_metrics/tasks/install_hosa.yaml
new file mode 100644
index 000000000..cc533a68b
--- /dev/null
+++ b/roles/openshift_metrics/tasks/install_hosa.yaml
@@ -0,0 +1,44 @@
+---
+- name: Generate Hawkular Agent (HOSA) Cluster Role
+ template:
+ src: hawkular_openshift_agent_role.j2
+ dest: "{{mktemp.stdout}}/templates/metrics-hawkular-openshift-agent-role.yaml"
+ changed_when: no
+
+- name: Generate Hawkular Agent (HOSA) Service Account
+ template:
+ src: hawkular_openshift_agent_sa.j2
+ dest: "{{mktemp.stdout}}/templates/metrics-hawkular-openshift-agent-sa.yaml"
+ changed_when: no
+
+- name: Generate Hawkular Agent (HOSA) Daemon Set
+ template:
+ src: hawkular_openshift_agent_ds.j2
+ dest: "{{mktemp.stdout}}/templates/metrics-hawkular-openshift-agent-ds.yaml"
+ vars:
+ node_selector: "{{openshift_metrics_hawkular_agent_nodeselector | default('') }}"
+ changed_when: no
+
+- name: Generate the Hawkular Agent (HOSA) Configmap
+ template:
+ src: hawkular_openshift_agent_cm.j2
+ dest: "{{mktemp.stdout}}/templates/metrics-hawkular-openshift-agent-cm.yaml"
+ changed_when: no
+
+- name: Generate role binding for the hawkular-openshift-agent service account
+ template:
+ src: rolebinding.j2
+ dest: "{{ mktemp.stdout }}/templates/metrics-hawkular-agent-rolebinding.yaml"
+ vars:
+ cluster: True
+ obj_name: hawkular-openshift-agent-rb
+ labels:
+ metrics-infra: hawkular-agent
+ roleRef:
+ kind: ClusterRole
+ name: hawkular-openshift-agent
+ subjects:
+ - kind: ServiceAccount
+ name: hawkular-openshift-agent
+ namespace: "{{openshift_metrics_hawkular_agent_namespace}}"
+ changed_when: no
diff --git a/roles/openshift_metrics/tasks/install_metrics.yaml b/roles/openshift_metrics/tasks/install_metrics.yaml
index 74eb56713..fdf4ae57f 100644
--- a/roles/openshift_metrics/tasks/install_metrics.yaml
+++ b/roles/openshift_metrics/tasks/install_metrics.yaml
@@ -16,11 +16,19 @@
include: install_heapster.yaml
when: openshift_metrics_heapster_standalone | bool
-- find: paths={{ mktemp.stdout }}/templates patterns=*.yaml
+- name: Install Hawkular OpenShift Agent (HOSA)
+ include: install_hosa.yaml
+ when: openshift_metrics_install_hawkular_agent | default(false) | bool
+
+- find:
+ paths: "{{ mktemp.stdout }}/templates"
+ patterns: "^(?!metrics-hawkular-openshift-agent).*.yaml"
+ use_regex: true
register: object_def_files
changed_when: no
-- slurp: src={{item.path}}
+- slurp:
+ src: "{{item.path}}"
register: object_defs
with_items: "{{object_def_files.files}}"
changed_when: no
@@ -34,6 +42,31 @@
file_content: "{{ item.content | b64decode | from_yaml }}"
with_items: "{{ object_defs.results }}"
+- find:
+ paths: "{{ mktemp.stdout }}/templates"
+ patterns: "^metrics-hawkular-openshift-agent.*.yaml"
+ use_regex: true
+ register: hawkular_agent_object_def_files
+ when: openshift_metrics_install_hawkular_agent | bool
+ changed_when: no
+
+- slurp:
+ src: "{{item.path}}"
+ register: hawkular_agent_object_defs
+ with_items: "{{ hawkular_agent_object_def_files.files }}"
+ when: openshift_metrics_install_hawkular_agent | bool
+ changed_when: no
+
+- name: Create Hawkular Agent objects
+ include: oc_apply.yaml
+ vars:
+ kubeconfig: "{{ mktemp.stdout }}/admin.kubeconfig"
+ namespace: "{{ openshift_metrics_hawkular_agent_namespace }}"
+ file_name: "{{ item.source }}"
+ file_content: "{{ item.content | b64decode | from_yaml }}"
+ with_items: "{{ hawkular_agent_object_defs.results }}"
+ when: openshift_metrics_install_hawkular_agent | bool
+
- include: update_master_config.yaml
- command: >
diff --git a/roles/openshift_metrics/tasks/main.yaml b/roles/openshift_metrics/tasks/main.yaml
index 5d8506a73..0b5f23c24 100644
--- a/roles/openshift_metrics/tasks/main.yaml
+++ b/roles/openshift_metrics/tasks/main.yaml
@@ -44,6 +44,9 @@
- include: "{{ (openshift_metrics_install_metrics | bool) | ternary('install_metrics.yaml','uninstall_metrics.yaml') }}"
+- include: uninstall_hosa.yaml
+ when: not openshift_metrics_install_hawkular_agent | bool
+
- name: Delete temp directory
local_action: file path=local_tmp.stdout state=absent
tags: metrics_cleanup
diff --git a/roles/openshift_metrics/tasks/oc_apply.yaml b/roles/openshift_metrics/tasks/oc_apply.yaml
index dd67703b4..1e1af40e8 100644
--- a/roles/openshift_metrics/tasks/oc_apply.yaml
+++ b/roles/openshift_metrics/tasks/oc_apply.yaml
@@ -14,7 +14,7 @@
command: >
{{ openshift.common.client_binary }} --config={{ kubeconfig }}
apply -f {{ file_name }}
- -n {{ openshift_metrics_project }}
+ -n {{namespace}}
register: generation_apply
failed_when: "'error' in generation_apply.stderr"
changed_when: no
diff --git a/roles/openshift_metrics/tasks/setup_certificate.yaml b/roles/openshift_metrics/tasks/setup_certificate.yaml
index 199968579..2d880f4d6 100644
--- a/roles/openshift_metrics/tasks/setup_certificate.yaml
+++ b/roles/openshift_metrics/tasks/setup_certificate.yaml
@@ -1,7 +1,7 @@
---
- name: generate {{ component }} keys
command: >
- {{ openshift.common.admin_binary }} ca create-server-cert
+ {{ openshift.common.client_binary }} adm ca create-server-cert
--config={{ mktemp.stdout }}/admin.kubeconfig
--key='{{ mktemp.stdout }}/{{ component }}.key'
--cert='{{ mktemp.stdout }}/{{ component }}.crt'
diff --git a/roles/openshift_metrics/tasks/uninstall_hosa.yaml b/roles/openshift_metrics/tasks/uninstall_hosa.yaml
new file mode 100644
index 000000000..42ed02460
--- /dev/null
+++ b/roles/openshift_metrics/tasks/uninstall_hosa.yaml
@@ -0,0 +1,15 @@
+---
+- name: remove Hawkular Agent (HOSA) components
+ command: >
+ {{ openshift.common.client_binary }} -n {{ openshift_metrics_hawkular_agent_namespace }} --config={{ mktemp.stdout }}/admin.kubeconfig
+ delete --ignore-not-found --selector=metrics-infra=agent
+ all,sa,secrets,templates,routes,pvc,rolebindings,clusterrolebindings
+ register: delete_metrics
+ changed_when: delete_metrics.stdout != 'No resources found'
+
+- name: remove rolebindings
+ command: >
+ {{ openshift.common.client_binary }} -n {{ openshift_metrics_hawkular_agent_namespace }} --config={{ mktemp.stdout }}/admin.kubeconfig
+ delete --ignore-not-found
+ clusterrolebinding/hawkular-openshift-agent-rb
+ changed_when: delete_metrics.stdout != 'No resources found'
diff --git a/roles/openshift_metrics/templates/hawkular_openshift_agent_cm.j2 b/roles/openshift_metrics/templates/hawkular_openshift_agent_cm.j2
new file mode 100644
index 000000000..bf472c066
--- /dev/null
+++ b/roles/openshift_metrics/templates/hawkular_openshift_agent_cm.j2
@@ -0,0 +1,54 @@
+id: hawkular-openshift-agent
+kind: ConfigMap
+apiVersion: v1
+name: Hawkular OpenShift Agent Configuration
+metadata:
+ name: hawkular-openshift-agent-configuration
+ labels:
+ metrics-infra: agent
+ namespace: {{openshift_metrics_hawkular_agent_namespace}}
+data:
+ config.yaml: |
+ kubernetes:
+ tenant: ${POD:namespace_name}
+ hawkular_server:
+ url: https://hawkular-metrics.openshift-infra.svc.cluster.local
+ credentials:
+ username: secret:openshift-infra/hawkular-metrics-account/hawkular-metrics.username
+ password: secret:openshift-infra/hawkular-metrics-account/hawkular-metrics.password
+ ca_cert_file: secret:openshift-infra/hawkular-metrics-certs/ca.crt
+ emitter:
+ status_enabled: false
+ collector:
+ minimum_collection_interval: 10s
+ default_collection_interval: 30s
+ metric_id_prefix: pod/${POD:uid}/custom/
+ tags:
+ metric_name: ${METRIC:name}
+ description: ${METRIC:description}
+ units: ${METRIC:units}
+ namespace_id: ${POD:namespace_uid}
+ namespace_name: ${POD:namespace_name}
+ node_name: ${POD:node_name}
+ pod_id: ${POD:uid}
+ pod_ip: ${POD:ip}
+ pod_name: ${POD:name}
+ pod_namespace: ${POD:namespace_name}
+ hostname: ${POD:hostname}
+ host_ip: ${POD:host_ip}
+ labels: ${POD:labels}
+ type: pod
+ collector: hawkular_openshift_agent
+ custom_metric: true
+ hawkular-openshift-agent: |
+ endpoints:
+ - type: prometheus
+ protocol: "http"
+ port: 8080
+ path: /metrics
+ collection_interval: 30s
+ metrics:
+ - name: hawkular_openshift_agent_metric_data_points_collected_total
+ - name: hawkular_openshift_agent_monitored_endpoints
+ - name: hawkular_openshift_agent_monitored_pods
+ - name: hawkular_openshift_agent_monitored_metrics
diff --git a/roles/openshift_metrics/templates/hawkular_openshift_agent_ds.j2 b/roles/openshift_metrics/templates/hawkular_openshift_agent_ds.j2
new file mode 100644
index 000000000..d65eaf9ae
--- /dev/null
+++ b/roles/openshift_metrics/templates/hawkular_openshift_agent_ds.j2
@@ -0,0 +1,91 @@
+apiVersion: extensions/v1beta1
+kind: DaemonSet
+metadata:
+ name: hawkular-openshift-agent
+ labels:
+ name: hawkular-openshift-agent
+ metrics-infra: agent
+ namespace: {{openshift_metrics_hawkular_agent_namespace}}
+spec:
+ selector:
+ matchLabels:
+ name: hawkular-openshift-agent
+ template:
+ metadata:
+ labels:
+ name: hawkular-openshift-agent
+ metrics-infra: agent
+ spec:
+ serviceAccount: hawkular-openshift-agent
+{% if node_selector is iterable and node_selector | length > 0 %}
+ nodeSelector:
+{% for key, value in node_selector.iteritems() %}
+ {{key}}: "{{value}}"
+{% endfor %}
+{% endif %}
+ containers:
+ - image: {{openshift_metrics_image_prefix}}metrics-hawkular-openshift-agent:{{openshift_metrics_image_version}}
+ imagePullPolicy: Always
+ name: hawkular-openshift-agent
+{% if ((openshift_metrics_hawkular_agent_limits_cpu is defined and openshift_metrics_hawkular_agent_limits_cpu is not none)
+ or (openshift_metrics_hawkular_agent_limits_memory is defined and openshift_metrics_hawkular_agent_limits_memory is not none)
+ or (openshift_metrics_hawkular_agent_requests_cpu is defined and openshift_metrics_hawkular_agent_requests_cpu is not none)
+ or (openshift_metrics_hawkular_agent_requests_memory is defined and openshift_metrics_hawkular_agent_requests_memory is not none))
+%}
+ resources:
+{% if (openshift_metrics_hawkular_agent_limits_cpu is not none
+ or openshift_metrics_hawkular_agent_limits_memory is not none)
+%}
+ limits:
+{% if openshift_metrics_hawkular_agent_limits_cpu is not none %}
+ cpu: "{{openshift_metrics_hawkular_agent_limits_cpu}}"
+{% endif %}
+{% if openshift_metrics_hawkular_agent_limits_memory is not none %}
+ memory: "{{openshift_metrics_hawkular_agent_limits_memory}}"
+{% endif %}
+{% endif %}
+{% if (openshift_metrics_hawkular_agent_requests_cpu is not none
+ or openshift_metrics_hawkular_agent_requests_memory is not none)
+%}
+ requests:
+{% if openshift_metrics_hawkular_agent_requests_cpu is not none %}
+ cpu: "{{openshift_metrics_hawkular_agent_requests_cpu}}"
+{% endif %}
+{% if openshift_metrics_hawkular_agent_requests_memory is not none %}
+ memory: "{{openshift_metrics_hawkular_agent_requests_memory}}"
+{% endif %}
+{% endif %}
+{% endif %}
+
+ livenessProbe:
+ httpGet:
+ scheme: HTTP
+ path: /health
+ port: 8080
+ initialDelaySeconds: 30
+ periodSeconds: 30
+ command:
+ - "hawkular-openshift-agent"
+ - "-config"
+ - "/hawkular-openshift-agent-configuration/config.yaml"
+ - "-v"
+ - "3"
+ env:
+ - name: K8S_POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: K8S_POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ volumeMounts:
+ - name: hawkular-openshift-agent-configuration
+ mountPath: "/hawkular-openshift-agent-configuration"
+ volumes:
+ - name: hawkular-openshift-agent-configuration
+ configMap:
+ name: hawkular-openshift-agent-configuration
+ - name: hawkular-openshift-agent
+ configMap:
+ name: hawkular-openshift-agent-configuration
diff --git a/roles/openshift_metrics/templates/hawkular_openshift_agent_role.j2 b/roles/openshift_metrics/templates/hawkular_openshift_agent_role.j2
new file mode 100644
index 000000000..24b8cd801
--- /dev/null
+++ b/roles/openshift_metrics/templates/hawkular_openshift_agent_role.j2
@@ -0,0 +1,25 @@
+apiVersion: v1
+kind: ClusterRole
+metadata:
+ name: hawkular-openshift-agent
+ labels:
+ metrics-infra: agent
+rules:
+- apiGroups:
+ - ""
+ resources:
+ - configmaps
+ - namespaces
+ - nodes
+ - pods
+ - projects
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - ""
+ resources:
+ - secrets
+ verbs:
+ - get
diff --git a/roles/openshift_metrics/templates/hawkular_openshift_agent_sa.j2 b/roles/openshift_metrics/templates/hawkular_openshift_agent_sa.j2
new file mode 100644
index 000000000..ec604d73c
--- /dev/null
+++ b/roles/openshift_metrics/templates/hawkular_openshift_agent_sa.j2
@@ -0,0 +1,7 @@
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: hawkular-openshift-agent
+ labels:
+ metrics-infra: agent
+ namespace: {{openshift_metrics_hawkular_agent_namespace}}
diff --git a/roles/openshift_metrics/templates/pvc.j2 b/roles/openshift_metrics/templates/pvc.j2
index c2e56ba21..0b801b33f 100644
--- a/roles/openshift_metrics/templates/pvc.j2
+++ b/roles/openshift_metrics/templates/pvc.j2
@@ -18,6 +18,13 @@ metadata:
{% endfor %}
{% endif %}
spec:
+{% if pv_selector is defined and pv_selector is mapping %}
+ selector:
+ matchLabels:
+{% for key,value in pv_selector.iteritems() %}
+ {{key}}: {{value}}
+{% endfor %}
+{% endif %}
accessModes:
{% for mode in access_modes %}
- {{ mode }}
diff --git a/roles/openshift_node/handlers/main.yml b/roles/openshift_node/handlers/main.yml
index 4dcf1eef8..a6bd12d4e 100644
--- a/roles/openshift_node/handlers/main.yml
+++ b/roles/openshift_node/handlers/main.yml
@@ -1,6 +1,8 @@
---
- name: restart openvswitch
- systemd: name=openvswitch state=restarted
+ systemd:
+ name: openvswitch
+ state: restarted
when: (not skip_node_svc_handlers | default(False) | bool) and not (ovs_service_status_changed | default(false) | bool) and openshift.common.use_openshift_sdn | bool
notify:
- restart openvswitch pause
@@ -10,8 +12,13 @@
when: (not skip_node_svc_handlers | default(False) | bool) and openshift.common.is_containerized | bool
- name: restart node
- systemd: name={{ openshift.common.service_type }}-node state=restarted
+ systemd:
+ name: "{{ openshift.common.service_type }}-node"
+ state: restarted
when: (not skip_node_svc_handlers | default(False) | bool) and not (node_service_status_changed | default(false) | bool)
- name: reload sysctl.conf
command: /sbin/sysctl -p
+
+- name: reload systemd units
+ command: systemctl daemon-reload
diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml
index a8beaa060..573051504 100644
--- a/roles/openshift_node/tasks/main.yml
+++ b/roles/openshift_node/tasks/main.yml
@@ -206,6 +206,7 @@
- name: Start and enable node dep
systemd:
+ daemon_reload: yes
name: "{{ openshift.common.service_type }}-node-dep"
enabled: yes
state: started
diff --git a/roles/openshift_node/tasks/systemd_units.yml b/roles/openshift_node/tasks/systemd_units.yml
index f58c803c4..e3ce5df3d 100644
--- a/roles/openshift_node/tasks/systemd_units.yml
+++ b/roles/openshift_node/tasks/systemd_units.yml
@@ -8,6 +8,9 @@
src: openshift.docker.node.dep.service
register: install_node_dep_result
when: openshift.common.is_containerized | bool
+ notify:
+ - reload systemd units
+ - restart node
- block:
- name: Pre-pull node image
@@ -21,6 +24,9 @@
dest: "/etc/systemd/system/{{ openshift.common.service_type }}-node.service"
src: openshift.docker.node.service
register: install_node_result
+ notify:
+ - reload systemd units
+ - restart node
when:
- openshift.common.is_containerized | bool
- not openshift.common.is_node_system_container | bool
@@ -31,6 +37,9 @@
src: "{{ openshift.common.service_type }}-node.service.j2"
register: install_node_result
when: not openshift.common.is_containerized | bool
+ notify:
+ - reload systemd units
+ - restart node
- name: Create the openvswitch service env file
template:
@@ -39,6 +48,7 @@
when: openshift.common.is_containerized | bool
register: install_ovs_sysconfig
notify:
+ - reload systemd units
- restart openvswitch
- name: Install Node system container
@@ -67,6 +77,7 @@
when: openshift.common.use_openshift_sdn | default(true) | bool
register: install_oom_fix_result
notify:
+ - reload systemd units
- restart openvswitch
- block:
@@ -81,6 +92,7 @@
dest: "/etc/systemd/system/openvswitch.service"
src: openvswitch.docker.service
notify:
+ - reload systemd units
- restart openvswitch
when:
- openshift.common.is_containerized | bool
@@ -119,8 +131,3 @@
when: ('http_proxy' in openshift.common and openshift.common.http_proxy != '')
notify:
- restart node
-
-- name: Reload systemd units
- command: systemctl daemon-reload
- notify:
- - restart node
diff --git a/roles/openshift_node_dnsmasq/templates/origin-dns.conf.j2 b/roles/openshift_node_dnsmasq/templates/origin-dns.conf.j2
index f397cbbf1..8bae9aaac 100644
--- a/roles/openshift_node_dnsmasq/templates/origin-dns.conf.j2
+++ b/roles/openshift_node_dnsmasq/templates/origin-dns.conf.j2
@@ -1,3 +1,5 @@
no-resolv
domain-needed
server=/{{ openshift.common.dns_domain }}/{{ openshift.common.kube_svc_ip }}
+no-negcache
+max-cache-ttl=1
diff --git a/roles/openshift_node_upgrade/tasks/rpm_upgrade.yml b/roles/openshift_node_upgrade/tasks/rpm_upgrade.yml
index 480e87d58..06a2d16ba 100644
--- a/roles/openshift_node_upgrade/tasks/rpm_upgrade.yml
+++ b/roles/openshift_node_upgrade/tasks/rpm_upgrade.yml
@@ -12,3 +12,18 @@
- name: Ensure python-yaml present for config upgrade
package: name=PyYAML state=present
when: not openshift.common.is_atomic | bool
+
+- name: Install Node service file
+ template:
+ dest: "/etc/systemd/system/{{ openshift.common.service_type }}-node.service"
+ src: "{{ openshift.common.service_type }}-node.service.j2"
+ register: l_node_unit
+
+# NOTE: This is needed to make sure we are using the correct set
+# of systemd unit files. The RPMs lay down defaults but
+# the install/upgrade may override them in /etc/systemd/system/.
+# NOTE: We don't use the systemd module as some versions of the module
+# require a service to be part of the call.
+- name: Reload systemd units
+ command: systemctl daemon-reload
+ when: l_node_unit | changed
diff --git a/roles/openshift_node_upgrade/templates/atomic-openshift-node.service.j2 b/roles/openshift_node_upgrade/templates/atomic-openshift-node.service.j2
new file mode 120000
index 000000000..6041fb13a
--- /dev/null
+++ b/roles/openshift_node_upgrade/templates/atomic-openshift-node.service.j2
@@ -0,0 +1 @@
+../../openshift_node/templates/atomic-openshift-node.service.j2 \ No newline at end of file
diff --git a/roles/openshift_node_upgrade/templates/origin-node.service.j2 b/roles/openshift_node_upgrade/templates/origin-node.service.j2
new file mode 120000
index 000000000..79c45a303
--- /dev/null
+++ b/roles/openshift_node_upgrade/templates/origin-node.service.j2
@@ -0,0 +1 @@
+../../openshift_node/templates/origin-node.service.j2 \ No newline at end of file
diff --git a/roles/openshift_persistent_volumes/templates/persistent-volume.yml.j2 b/roles/openshift_persistent_volumes/templates/persistent-volume.yml.j2
index 877e88002..9c5103597 100644
--- a/roles/openshift_persistent_volumes/templates/persistent-volume.yml.j2
+++ b/roles/openshift_persistent_volumes/templates/persistent-volume.yml.j2
@@ -7,6 +7,12 @@ items:
kind: PersistentVolume
metadata:
name: "{{ volume.name }}"
+{% if volume.labels is defined and volume.labels is mapping %}
+ labels:
+{% for key,value in volume.labels.iteritems() %}
+ {{ key }}: {{ value }}
+{% endfor %}
+{% endif %}
spec:
capacity:
storage: "{{ volume.capacity }}"
diff --git a/roles/openshift_repos/tasks/main.yaml b/roles/openshift_repos/tasks/main.yaml
index 023b1a9b7..8f8550e2d 100644
--- a/roles/openshift_repos/tasks/main.yaml
+++ b/roles/openshift_repos/tasks/main.yaml
@@ -4,7 +4,8 @@
path: /run/ostree-booted
register: ostree_booted
-- block:
+- when: not ostree_booted.stat.exists
+ block:
- name: Ensure libselinux-python is installed
package: name=libselinux-python state=present
@@ -24,41 +25,40 @@
- openshift_additional_repos | length == 0
notify: refresh cache
- # Note: OpenShift repositories under CentOS may be shipped through the
- # "centos-release-openshift-origin" package which configures the repository.
- # This task matches the file names provided by the package so that they are
- # not installed twice in different files and remains idempotent.
- - name: Configure origin gpg keys if needed
- copy:
- src: "{{ item.src }}"
- dest: "{{ item.dest }}"
- with_items:
- - src: origin/gpg_keys/openshift-ansible-CentOS-SIG-PaaS
- dest: /etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-SIG-PaaS
- - src: origin/repos/openshift-ansible-centos-paas-sig.repo
- dest: /etc/yum.repos.d/CentOS-OpenShift-Origin.repo
- notify: refresh cache
- when:
- - ansible_os_family == "RedHat"
- - ansible_distribution != "Fedora"
- - openshift_deployment_type == 'origin'
- - openshift_enable_origin_repo | default(true) | bool
-
# Singleton block
- - when: r_osr_first_run | default(true)
+ - when: r_openshift_repos_has_run is not defined
block:
+
+ # Note: OpenShift repositories under CentOS may be shipped through the
+ # "centos-release-openshift-origin" package which configures the repository.
+ # This task matches the file names provided by the package so that they are
+ # not installed twice in different files and remains idempotent.
+ - name: Configure origin gpg keys if needed
+ copy:
+ src: "{{ item.src }}"
+ dest: "{{ item.dest }}"
+ with_items:
+ - src: origin/gpg_keys/openshift-ansible-CentOS-SIG-PaaS
+ dest: /etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-SIG-PaaS
+ - src: origin/repos/openshift-ansible-centos-paas-sig.repo
+ dest: /etc/yum.repos.d/CentOS-OpenShift-Origin.repo
+ notify: refresh cache
+ when:
+ - ansible_os_family == "RedHat"
+ - ansible_distribution != "Fedora"
+ - openshift_deployment_type == 'origin'
+ - openshift_enable_origin_repo | default(true) | bool
+
- name: Ensure clean repo cache in the event repos have been changed manually
debug:
msg: "First run of openshift_repos"
changed_when: true
notify: refresh cache
- - name: Set fact r_osr_first_run false
+ - name: Record that openshift_repos already ran
set_fact:
- r_osr_first_run: false
+ r_openshift_repos_has_run: True
# Force running ALL handlers now, because we expect repo cache to be cleared
# if changes have been made.
- meta: flush_handlers
-
- when: not ostree_booted.stat.exists
diff --git a/roles/openshift_storage_glusterfs/README.md b/roles/openshift_storage_glusterfs/README.md
index 7b310dbf8..62fc35299 100644
--- a/roles/openshift_storage_glusterfs/README.md
+++ b/roles/openshift_storage_glusterfs/README.md
@@ -1,7 +1,31 @@
OpenShift GlusterFS Cluster
===========================
-OpenShift GlusterFS Cluster Installation
+OpenShift GlusterFS Cluster Configuration
+
+This role handles the configuration of GlusterFS clusters. It can handle
+two primary configuration scenarios:
+
+* Configuring a new, natively-hosted GlusterFS cluster. In this scenario,
+ GlusterFS pods are deployed on nodes in the OpenShift cluster which are
+ configured to provide storage.
+* Configuring a new, external GlusterFS cluster. In this scenario, the
+ cluster nodes have the GlusterFS software pre-installed but have not
+ been configured yet. The installer will take care of configuring the
+ cluster(s) for use by OpenShift applications.
+* Using existing GlusterFS clusters. In this scenario, one or more
+ GlusterFS clusters are assumed to be already setup. These clusters can
+ be either natively-hosted or external, but must be managed by a
+ [heketi service](https://github.com/heketi/heketi).
+
+As part of the configuration, a particular GlusterFS cluster may be
+specified to provide backend storage for a natively-hosted Docker
+registry.
+
+Unless configured otherwise, a StorageClass will be automatically
+created for each non-registry GlusterFS cluster. This will allow
+applications which can mount PersistentVolumes to request
+dynamically-provisioned GlusterFS volumes.
Requirements
------------
@@ -21,26 +45,50 @@ hosted Docker registry:
* `[glusterfs_registry]`
+Host Variables
+--------------
+
+For configuring new clusters, the following role variables are available.
+
+Each host in either of the above groups must have the following variable
+defined:
+
+| Name | Default value | Description |
+|-------------------|---------------|-----------------------------------------|
+| glusterfs_devices | None | A list of block devices that will be completely managed as part of a GlusterFS cluster. There must be at least one device listed. Each device must be bare, e.g. no partitions or LVM PVs. **Example:** '[ "/dev/sdb" ]'
+
+In addition, each host may specify the following variables to further control
+their configuration as GlusterFS nodes:
+
+| Name | Default value | Description |
+|--------------------|---------------------------|-----------------------------------------|
+| glusterfs_cluster | 1 | The ID of the cluster this node should belong to. This is useful when a single heketi service is expected to manage multiple distinct clusters. **NOTE:** For natively-hosted clusters, all pods will be in the same OpenShift namespace
+| glusterfs_hostname | openshift.common.hostname | A hostname (or IP address) that will be used for internal GlusterFS communication
+| glusterfs_ip | openshift.common.ip | An IP address that will be used by pods to communicate with the GlusterFS node
+| glusterfs_zone | 1 | A zone number for the node. Zones are used within the cluster for determining how to distribute the bricks of GlusterFS volumes. heketi will try to spread each volumes' bricks as evenly as possible across all zones
+
Role Variables
--------------
This role has the following variables that control the integration of a
GlusterFS cluster into a new or existing OpenShift cluster:
-| Name | Default value | |
+| Name | Default value | Description |
|--------------------------------------------------|-------------------------|-----------------------------------------|
| openshift_storage_glusterfs_timeout | 300 | Seconds to wait for pods to become ready
| openshift_storage_glusterfs_namespace | 'default' | Namespace in which to create GlusterFS resources
| openshift_storage_glusterfs_is_native | True | GlusterFS should be containerized
-| openshift_storage_glusterfs_nodeselector | 'storagenode=glusterfs' | Selector to determine which nodes will host GlusterFS pods in native mode
+| openshift_storage_glusterfs_name | 'storage' | A name to identify the GlusterFS cluster, which will be used in resource names
+| openshift_storage_glusterfs_nodeselector | 'glusterfs=storage-host'| Selector to determine which nodes will host GlusterFS pods in native mode. **NOTE:** The label value is taken from the cluster name
+| openshift_storage_glusterfs_storageclass | True | Automatically create a StorageClass for each GlusterFS cluster
| openshift_storage_glusterfs_image | 'gluster/gluster-centos'| Container image to use for GlusterFS pods, enterprise default is 'rhgs3/rhgs-server-rhel7'
| openshift_storage_glusterfs_version | 'latest' | Container image version to use for GlusterFS pods
| openshift_storage_glusterfs_wipe | False | Destroy any existing GlusterFS resources and wipe storage devices. **WARNING: THIS WILL DESTROY ANY DATA ON THOSE DEVICES.**
| openshift_storage_glusterfs_heketi_is_native | True | heketi should be containerized
| openshift_storage_glusterfs_heketi_image | 'heketi/heketi' | Container image to use for heketi pods, enterprise default is 'rhgs3/rhgs-volmanager-rhel7'
| openshift_storage_glusterfs_heketi_version | 'latest' | Container image version to use for heketi pods
-| openshift_storage_glusterfs_heketi_admin_key | '' | String to use as secret key for performing heketi commands as admin
-| openshift_storage_glusterfs_heketi_user_key | '' | String to use as secret key for performing heketi commands as user that can only view or modify volumes
+| openshift_storage_glusterfs_heketi_admin_key | auto-generated | String to use as secret key for performing heketi commands as admin
+| openshift_storage_glusterfs_heketi_user_key | auto-generated | String to use as secret key for performing heketi commands as user that can only view or modify volumes
| openshift_storage_glusterfs_heketi_topology_load | True | Load the GlusterFS topology information into heketi
| openshift_storage_glusterfs_heketi_url | Undefined | URL for the heketi REST API, dynamically determined in native mode
| openshift_storage_glusterfs_heketi_wipe | False | Destroy any existing heketi resources, defaults to the value of `openshift_storage_glusterfs_wipe`
@@ -52,17 +100,24 @@ registry. These variables start with the prefix
values in their corresponding non-registry variables. The following variables
are an exception:
-| Name | Default value | |
-|---------------------------------------------------|-----------------------|-----------------------------------------|
-| openshift_storage_glusterfs_registry_namespace | registry namespace | Default is to use the hosted registry's namespace, otherwise 'default'
-| openshift_storage_glusterfs_registry_nodeselector | 'storagenode=registry'| This allows for the logical separation of the registry GlusterFS cluster from any regular-use GlusterFS clusters
+| Name | Default value | Description |
+|-------------------------------------------------------|-----------------------|-----------------------------------------|
+| openshift_storage_glusterfs_registry_namespace | registry namespace | Default is to use the hosted registry's namespace, otherwise 'default'
+| openshift_storage_glusterfs_registry_name | 'registry' | This allows for the logical separation of the registry GlusterFS cluster from other GlusterFS clusters
+| openshift_storage_glusterfs_registry_storageclass | False | It is recommended to not create a StorageClass for GlusterFS clusters serving registry storage, so as to avoid performance penalties
+| openshift_storage_glusterfs_registry_heketi_admin_key | auto-generated | Separate from the above
+| openshift_storage_glusterfs_registry_heketi_user_key | auto-generated | Separate from the above
Additionally, this role's behavior responds to the following registry-specific
-variable:
-
-| Name | Default value | Description |
-|----------------------------------------------|---------------|------------------------------------------------------------------------------|
-| openshift_hosted_registry_glusterfs_swap | False | Whether to swap an existing registry's storage volume for a GlusterFS volume |
+variables:
+
+| Name | Default value | Description |
+|-----------------------------------------------|------------------------------|-----------------------------------------|
+| openshift_hosted_registry_glusterfs_endpoints | glusterfs-registry-endpoints | The name for the Endpoints resource that will point the registry to the GlusterFS nodes
+| openshift_hosted_registry_glusterfs_path | glusterfs-registry-volume | The name for the GlusterFS volume that will provide registry storage
+| openshift_hosted_registry_glusterfs_readonly | False | Whether the GlusterFS volume should be read-only
+| openshift_hosted_registry_glusterfs_swap | False | Whether to swap an existing registry's storage volume for a GlusterFS volume
+| openshift_hosted_registry_glusterfs_swapcopy | True | If swapping, copy the contents of the pre-existing registry storage to the new GlusterFS volume
Dependencies
------------
diff --git a/roles/openshift_storage_glusterfs/defaults/main.yml b/roles/openshift_storage_glusterfs/defaults/main.yml
index ebe9ca30b..468877e57 100644
--- a/roles/openshift_storage_glusterfs/defaults/main.yml
+++ b/roles/openshift_storage_glusterfs/defaults/main.yml
@@ -2,7 +2,9 @@
openshift_storage_glusterfs_timeout: 300
openshift_storage_glusterfs_namespace: 'default'
openshift_storage_glusterfs_is_native: True
-openshift_storage_glusterfs_nodeselector: 'storagenode=glusterfs'
+openshift_storage_glusterfs_name: 'storage'
+openshift_storage_glusterfs_nodeselector: "glusterfs={{ openshift_storage_glusterfs_name }}-host"
+openshift_storage_glusterfs_storageclass: True
openshift_storage_glusterfs_image: "{{ 'rhgs3/rhgs-server-rhel7' | quote if deployment_type == 'openshift-enterprise' else 'gluster/gluster-centos' | quote }}"
openshift_storage_glusterfs_version: 'latest'
openshift_storage_glusterfs_wipe: False
@@ -11,8 +13,8 @@ openshift_storage_glusterfs_heketi_is_missing: True
openshift_storage_glusterfs_heketi_deploy_is_missing: True
openshift_storage_glusterfs_heketi_image: "{{ 'rhgs3/rhgs-volmanager-rhel7' | quote if deployment_type == 'openshift-enterprise' else 'heketi/heketi' | quote }}"
openshift_storage_glusterfs_heketi_version: 'latest'
-openshift_storage_glusterfs_heketi_admin_key: ''
-openshift_storage_glusterfs_heketi_user_key: ''
+openshift_storage_glusterfs_heketi_admin_key: "{{ 32 | oo_generate_secret }}"
+openshift_storage_glusterfs_heketi_user_key: "{{ 32 | oo_generate_secret }}"
openshift_storage_glusterfs_heketi_topology_load: True
openshift_storage_glusterfs_heketi_wipe: "{{ openshift_storage_glusterfs_wipe }}"
openshift_storage_glusterfs_heketi_url: "{{ omit }}"
@@ -20,7 +22,9 @@ openshift_storage_glusterfs_heketi_url: "{{ omit }}"
openshift_storage_glusterfs_registry_timeout: "{{ openshift_storage_glusterfs_timeout }}"
openshift_storage_glusterfs_registry_namespace: "{{ openshift.hosted.registry.namespace | default('default') }}"
openshift_storage_glusterfs_registry_is_native: "{{ openshift_storage_glusterfs_is_native }}"
-openshift_storage_glusterfs_registry_nodeselector: 'storagenode=registry'
+openshift_storage_glusterfs_registry_name: 'registry'
+openshift_storage_glusterfs_registry_nodeselector: "glusterfs={{ openshift_storage_glusterfs_registry_name }}-host"
+openshift_storage_glusterfs_registry_storageclass: False
openshift_storage_glusterfs_registry_image: "{{ openshift_storage_glusterfs_image }}"
openshift_storage_glusterfs_registry_version: "{{ openshift_storage_glusterfs_version }}"
openshift_storage_glusterfs_registry_wipe: "{{ openshift_storage_glusterfs_wipe }}"
@@ -29,8 +33,8 @@ openshift_storage_glusterfs_registry_heketi_is_missing: "{{ openshift_storage_gl
openshift_storage_glusterfs_registry_heketi_deploy_is_missing: "{{ openshift_storage_glusterfs_heketi_deploy_is_missing }}"
openshift_storage_glusterfs_registry_heketi_image: "{{ openshift_storage_glusterfs_heketi_image }}"
openshift_storage_glusterfs_registry_heketi_version: "{{ openshift_storage_glusterfs_heketi_version }}"
-openshift_storage_glusterfs_registry_heketi_admin_key: "{{ openshift_storage_glusterfs_heketi_admin_key }}"
-openshift_storage_glusterfs_registry_heketi_user_key: "{{ openshift_storage_glusterfs_heketi_user_key }}"
+openshift_storage_glusterfs_registry_heketi_admin_key: "{{ 32 | oo_generate_secret }}"
+openshift_storage_glusterfs_registry_heketi_user_key: "{{ 32 | oo_generate_secret }}"
openshift_storage_glusterfs_registry_heketi_topology_load: "{{ openshift_storage_glusterfs_heketi_topology_load }}"
openshift_storage_glusterfs_registry_heketi_wipe: "{{ openshift_storage_glusterfs_heketi_wipe }}"
openshift_storage_glusterfs_registry_heketi_url: "{{ openshift_storage_glusterfs_heketi_url | default(omit) }}"
diff --git a/roles/openshift_storage_glusterfs/files/v3.6/deploy-heketi-template.yml b/roles/openshift_storage_glusterfs/files/v3.6/deploy-heketi-template.yml
index c9945be13..81b4fa5dc 100644
--- a/roles/openshift_storage_glusterfs/files/v3.6/deploy-heketi-template.yml
+++ b/roles/openshift_storage_glusterfs/files/v3.6/deploy-heketi-template.yml
@@ -9,49 +9,47 @@ metadata:
annotations:
description: Bootstrap Heketi installation
tags: glusterfs,heketi,installation
-labels:
- template: deploy-heketi
objects:
- kind: Service
apiVersion: v1
metadata:
- name: deploy-heketi
+ name: deploy-heketi-${CLUSTER_NAME}
labels:
- glusterfs: deploy-heketi-service
+ glusterfs: deploy-heketi-${CLUSTER_NAME}-service
deploy-heketi: support
annotations:
description: Exposes Heketi service
spec:
ports:
- - name: deploy-heketi
+ - name: deploy-heketi-${CLUSTER_NAME}
port: 8080
targetPort: 8080
selector:
- name: deploy-heketi
+ glusterfs: deploy-heketi-${CLUSTER_NAME}-pod
- kind: Route
apiVersion: v1
metadata:
- name: deploy-heketi
+ name: deploy-heketi-${CLUSTER_NAME}
labels:
- glusterfs: deploy-heketi-route
+ glusterfs: deploy-heketi-${CLUSTER_NAME}-route
deploy-heketi: support
spec:
to:
kind: Service
- name: deploy-heketi
+ name: deploy-heketi-${CLUSTER_NAME}
- kind: DeploymentConfig
apiVersion: v1
metadata:
- name: deploy-heketi
+ name: deploy-heketi-${CLUSTER_NAME}
labels:
- glusterfs: deploy-heketi-dc
+ glusterfs: deploy-heketi-${CLUSTER_NAME}-dc
deploy-heketi: support
annotations:
description: Defines how to deploy Heketi
spec:
replicas: 1
selector:
- name: deploy-heketi
+ glusterfs: deploy-heketi-${CLUSTER_NAME}-pod
triggers:
- type: ConfigChange
strategy:
@@ -60,13 +58,12 @@ objects:
metadata:
name: deploy-heketi
labels:
- name: deploy-heketi
- glusterfs: deploy-heketi-pod
+ glusterfs: deploy-heketi-${CLUSTER_NAME}-pod
deploy-heketi: support
spec:
- serviceAccountName: heketi-service-account
+ serviceAccountName: heketi-${CLUSTER_NAME}-service-account
containers:
- - name: deploy-heketi
+ - name: heketi
image: ${IMAGE_NAME}:${IMAGE_VERSION}
env:
- name: HEKETI_USER_KEY
@@ -81,11 +78,15 @@ objects:
value: '14'
- name: HEKETI_KUBE_GLUSTER_DAEMONSET
value: '1'
+ - name: HEKETI_KUBE_NAMESPACE
+ value: ${HEKETI_KUBE_NAMESPACE}
ports:
- containerPort: 8080
volumeMounts:
- name: db
mountPath: /var/lib/heketi
+ - name: topology
+ mountPath: ${TOPOLOGY_PATH}
readinessProbe:
timeoutSeconds: 3
initialDelaySeconds: 3
@@ -100,6 +101,9 @@ objects:
port: 8080
volumes:
- name: db
+ - name: topology
+ secret:
+ secretName: heketi-${CLUSTER_NAME}-topology-secret
parameters:
- name: HEKETI_USER_KEY
displayName: Heketi User Secret
@@ -107,9 +111,19 @@ parameters:
- name: HEKETI_ADMIN_KEY
displayName: Heketi Administrator Secret
description: Set secret for administration of the Heketi service as user _admin_
+- name: HEKETI_KUBE_NAMESPACE
+ displayName: Namespace
+ description: Set the namespace where the GlusterFS pods reside
+ value: default
- name: IMAGE_NAME
- displayName: GlusterFS container name
+ displayName: heketi container name
required: True
- name: IMAGE_VERSION
- displayName: GlusterFS container versiona
+ displayName: heketi container versiona
+ required: True
+- name: CLUSTER_NAME
+ displayName: GlusterFS cluster name
+ value: glusterfs
+- name: TOPOLOGY_PATH
+ displayName: heketi topology file location
required: True
diff --git a/roles/openshift_storage_glusterfs/files/v3.6/glusterfs-template.yml b/roles/openshift_storage_glusterfs/files/v3.6/glusterfs-template.yml
index c66705752..dc3d2250a 100644
--- a/roles/openshift_storage_glusterfs/files/v3.6/glusterfs-template.yml
+++ b/roles/openshift_storage_glusterfs/files/v3.6/glusterfs-template.yml
@@ -12,24 +12,24 @@ objects:
- kind: DaemonSet
apiVersion: extensions/v1beta1
metadata:
- name: glusterfs
+ name: glusterfs-${CLUSTER_NAME}
labels:
- glusterfs: daemonset
+ glusterfs: ${CLUSTER_NAME}-daemonset
annotations:
description: GlusterFS DaemonSet
tags: glusterfs
spec:
selector:
matchLabels:
- glusterfs-node: pod
+ glusterfs: ${CLUSTER_NAME}-pod
template:
metadata:
- name: glusterfs
+ name: glusterfs-${CLUSTER_NAME}
labels:
+ glusterfs: ${CLUSTER_NAME}-pod
glusterfs-node: pod
spec:
- nodeSelector:
- storagenode: glusterfs
+ nodeSelector: "${{NODE_LABELS}}"
hostNetwork: true
containers:
- name: glusterfs
@@ -63,26 +63,26 @@ objects:
privileged: true
readinessProbe:
timeoutSeconds: 3
- initialDelaySeconds: 100
+ initialDelaySeconds: 40
exec:
command:
- "/bin/bash"
- "-c"
- systemctl status glusterd.service
- periodSeconds: 10
+ periodSeconds: 25
successThreshold: 1
- failureThreshold: 3
+ failureThreshold: 15
livenessProbe:
timeoutSeconds: 3
- initialDelaySeconds: 100
+ initialDelaySeconds: 40
exec:
command:
- "/bin/bash"
- "-c"
- systemctl status glusterd.service
- periodSeconds: 10
+ periodSeconds: 25
successThreshold: 1
- failureThreshold: 3
+ failureThreshold: 15
resources: {}
terminationMessagePath: "/dev/termination-log"
volumes:
@@ -120,9 +120,16 @@ objects:
dnsPolicy: ClusterFirst
securityContext: {}
parameters:
+- name: NODE_LABELS
+ displayName: Daemonset Node Labels
+ description: Labels which define the daemonset node selector. Must contain at least one label of the format \'glusterfs=<CLUSTER_NAME>-host\'
+ value: '{ "glusterfs": "storage-host" }'
- name: IMAGE_NAME
displayName: GlusterFS container name
required: True
- name: IMAGE_VERSION
displayName: GlusterFS container versiona
required: True
+- name: CLUSTER_NAME
+ displayName: GlusterFS cluster name
+ value: storage
diff --git a/roles/openshift_storage_glusterfs/files/v3.6/heketi-template.yml b/roles/openshift_storage_glusterfs/files/v3.6/heketi-template.yml
index df045c170..1d8f1abdf 100644
--- a/roles/openshift_storage_glusterfs/files/v3.6/heketi-template.yml
+++ b/roles/openshift_storage_glusterfs/files/v3.6/heketi-template.yml
@@ -8,15 +8,13 @@ metadata:
annotations:
description: Heketi service deployment template
tags: glusterfs,heketi
-labels:
- template: heketi
objects:
- kind: Service
apiVersion: v1
metadata:
- name: heketi
+ name: heketi-${CLUSTER_NAME}
labels:
- glusterfs: heketi-service
+ glusterfs: heketi-${CLUSTER_NAME}-service
annotations:
description: Exposes Heketi service
spec:
@@ -25,40 +23,40 @@ objects:
port: 8080
targetPort: 8080
selector:
- glusterfs: heketi-pod
+ glusterfs: heketi-${CLUSTER_NAME}-pod
- kind: Route
apiVersion: v1
metadata:
- name: heketi
+ name: heketi-${CLUSTER_NAME}
labels:
- glusterfs: heketi-route
+ glusterfs: heketi-${CLUSTER_NAME}-route
spec:
to:
kind: Service
- name: heketi
+ name: heketi-${CLUSTER_NAME}
- kind: DeploymentConfig
apiVersion: v1
metadata:
- name: heketi
+ name: heketi-${CLUSTER_NAME}
labels:
- glusterfs: heketi-dc
+ glusterfs: heketi-${CLUSTER_NAME}-dc
annotations:
description: Defines how to deploy Heketi
spec:
replicas: 1
selector:
- glusterfs: heketi-pod
+ glusterfs: heketi-${CLUSTER_NAME}-pod
triggers:
- type: ConfigChange
strategy:
type: Recreate
template:
metadata:
- name: heketi
+ name: heketi-${CLUSTER_NAME}
labels:
- glusterfs: heketi-pod
+ glusterfs: heketi-${CLUSTER_NAME}-pod
spec:
- serviceAccountName: heketi-service-account
+ serviceAccountName: heketi-${CLUSTER_NAME}-service-account
containers:
- name: heketi
image: ${IMAGE_NAME}:${IMAGE_VERSION}
@@ -76,6 +74,8 @@ objects:
value: '14'
- name: HEKETI_KUBE_GLUSTER_DAEMONSET
value: '1'
+ - name: HEKETI_KUBE_NAMESPACE
+ value: ${HEKETI_KUBE_NAMESPACE}
ports:
- containerPort: 8080
volumeMounts:
@@ -96,7 +96,7 @@ objects:
volumes:
- name: db
glusterfs:
- endpoints: heketi-storage-endpoints
+ endpoints: heketi-db-${CLUSTER_NAME}-endpoints
path: heketidbstorage
parameters:
- name: HEKETI_USER_KEY
@@ -105,9 +105,16 @@ parameters:
- name: HEKETI_ADMIN_KEY
displayName: Heketi Administrator Secret
description: Set secret for administration of the Heketi service as user _admin_
+- name: HEKETI_KUBE_NAMESPACE
+ displayName: Namespace
+ description: Set the namespace where the GlusterFS pods reside
+ value: default
- name: IMAGE_NAME
- displayName: GlusterFS container name
+ displayName: heketi container name
required: True
- name: IMAGE_VERSION
- displayName: GlusterFS container versiona
+ displayName: heketi container versiona
required: True
+- name: CLUSTER_NAME
+ displayName: GlusterFS cluster name
+ value: glusterfs
diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml
index fa5fa2cb0..829c1f51b 100644
--- a/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml
+++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml
@@ -5,12 +5,6 @@
name: "{{ glusterfs_namespace }}"
when: glusterfs_is_native or glusterfs_heketi_is_native
-- include: glusterfs_deploy.yml
- when: glusterfs_is_native
-
-- name: Make sure heketi-client is installed
- package: name=heketi-client state=present
-
- name: Delete pre-existing heketi resources
oc_obj:
namespace: "{{ glusterfs_namespace }}"
@@ -21,12 +15,18 @@
with_items:
- kind: "template,route,service,dc,jobs,secret"
selector: "deploy-heketi"
- - kind: "template,route,service,dc"
- name: "heketi"
- - kind: "svc,ep"
+ - kind: "svc"
name: "heketi-storage-endpoints"
+ - kind: "secret"
+ name: "heketi-{{ glusterfs_name }}-topology-secret"
+ - kind: "template,route,service,dc"
+ name: "heketi-{{ glusterfs_name }}"
+ - kind: "svc"
+ name: "heketi-db-{{ glusterfs_name }}-endpoints"
- kind: "sa"
- name: "heketi-service-account"
+ name: "heketi-{{ glusterfs_name }}-service-account"
+ - kind: "secret"
+ name: "heketi-{{ glusterfs_name }}-user-secret"
failed_when: False
when: glusterfs_heketi_wipe
@@ -35,11 +35,11 @@
namespace: "{{ glusterfs_namespace }}"
kind: pod
state: list
- selector: "glusterfs=deploy-heketi-pod"
+ selector: "glusterfs=deploy-heketi-{{ glusterfs_name }}-pod"
register: heketi_pod
until: "heketi_pod.results.results[0]['items'] | count == 0"
delay: 10
- retries: "{{ (glusterfs_timeout / 10) | int }}"
+ retries: "{{ (glusterfs_timeout | int / 10) | int }}"
when: glusterfs_heketi_wipe
- name: Wait for heketi pods to terminate
@@ -47,23 +47,26 @@
namespace: "{{ glusterfs_namespace }}"
kind: pod
state: list
- selector: "glusterfs=heketi-pod"
+ selector: "glusterfs=heketi-{{ glusterfs_name }}-pod"
register: heketi_pod
until: "heketi_pod.results.results[0]['items'] | count == 0"
delay: 10
- retries: "{{ (glusterfs_timeout / 10) | int }}"
+ retries: "{{ (glusterfs_timeout | int / 10) | int }}"
when: glusterfs_heketi_wipe
+- include: glusterfs_deploy.yml
+ when: glusterfs_is_native
+
- name: Create heketi service account
oc_serviceaccount:
namespace: "{{ glusterfs_namespace }}"
- name: heketi-service-account
+ name: "heketi-{{ glusterfs_name }}-service-account"
state: present
when: glusterfs_heketi_is_native
- name: Add heketi service account to privileged SCC
oc_adm_policy_user:
- user: "system:serviceaccount:{{ glusterfs_namespace }}:heketi-service-account"
+ user: "system:serviceaccount:{{ glusterfs_namespace }}:heketi-{{ glusterfs_name }}-service-account"
resource_kind: scc
resource_name: privileged
state: present
@@ -71,7 +74,7 @@
- name: Allow heketi service account to view/edit pods
oc_adm_policy_user:
- user: "system:serviceaccount:{{ glusterfs_namespace }}:heketi-service-account"
+ user: "system:serviceaccount:{{ glusterfs_namespace }}:heketi-{{ glusterfs_name }}-service-account"
resource_kind: role
resource_name: edit
state: present
@@ -82,7 +85,7 @@
namespace: "{{ glusterfs_namespace }}"
state: list
kind: pod
- selector: "glusterfs=deploy-heketi-pod,deploy-heketi=support"
+ selector: "glusterfs=deploy-heketi-{{ glusterfs_name }}-pod"
register: heketi_pod
when: glusterfs_heketi_is_native
@@ -100,7 +103,7 @@
namespace: "{{ glusterfs_namespace }}"
state: list
kind: pod
- selector: "glusterfs=heketi-pod"
+ selector: "glusterfs=heketi-{{ glusterfs_name }}-pod"
register: heketi_pod
when: glusterfs_heketi_is_native
@@ -113,48 +116,35 @@
# heketi is not missing when there are one or more pods with matching labels whose 'Ready' status is True
- "heketi_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count > 0"
+- name: Generate topology file
+ template:
+ src: "{{ openshift.common.examples_content_version }}/topology.json.j2"
+ dest: "{{ mktemp.stdout }}/topology.json"
+ when:
+ - glusterfs_heketi_topology_load
+
- include: heketi_deploy_part1.yml
when:
- glusterfs_heketi_is_native
- glusterfs_heketi_deploy_is_missing
- glusterfs_heketi_is_missing
-- name: Determine heketi URL
- oc_obj:
- namespace: "{{ glusterfs_namespace }}"
- state: list
- kind: ep
- selector: "glusterfs in (deploy-heketi-service, heketi-service)"
- register: heketi_url
- until:
- - "heketi_url.results.results[0]['items'][0].subsets[0].addresses[0].ip != ''"
- - "heketi_url.results.results[0]['items'][0].subsets[0].ports[0].port != ''"
- delay: 10
- retries: "{{ (glusterfs_timeout / 10) | int }}"
- when:
- - glusterfs_heketi_is_native
- - glusterfs_heketi_url is undefined
-
- name: Set heketi URL
set_fact:
- glusterfs_heketi_url: "{{ heketi_url.results.results[0]['items'][0].subsets[0].addresses[0].ip }}:{{ heketi_url.results.results[0]['items'][0].subsets[0].ports[0].port }}"
+ glusterfs_heketi_url: "localhost:8080"
when:
- glusterfs_heketi_is_native
- - glusterfs_heketi_url is undefined
+
+- name: Set heketi-cli command
+ set_fact:
+ glusterfs_heketi_client: "{% if glusterfs_heketi_is_native %}oc rsh {{ heketi_pod.results.results[0]['items'][0]['metadata']['name'] }} {% endif %}heketi-cli -s http://{{ glusterfs_heketi_url }} --user admin --secret '{{ glusterfs_heketi_admin_key }}'"
- name: Verify heketi service
- command: "heketi-cli -s http://{{ glusterfs_heketi_url }} --user admin --secret '{{ glusterfs_heketi_admin_key }}' cluster list"
+ command: "{{ glusterfs_heketi_client }} cluster list"
changed_when: False
-- name: Generate topology file
- template:
- src: "{{ openshift.common.examples_content_version }}/topology.json.j2"
- dest: "{{ mktemp.stdout }}/topology.json"
- when:
- - glusterfs_heketi_topology_load
-
- name: Load heketi topology
- command: "heketi-cli -s http://{{ glusterfs_heketi_url }} --user admin --secret '{{ glusterfs_heketi_admin_key }}' topology load --json={{ mktemp.stdout }}/topology.json 2>&1"
+ command: "{{ glusterfs_heketi_client }} topology load --json={{ mktemp.stdout }}/topology.json 2>&1"
register: topology_load
failed_when: "topology_load.rc != 0 or 'Unable' in topology_load.stdout"
when:
@@ -164,3 +154,29 @@
when:
- glusterfs_heketi_is_native
- glusterfs_heketi_is_missing
+
+- name: Create heketi user secret
+ oc_secret:
+ namespace: "{{ glusterfs_namespace }}"
+ state: present
+ name: "heketi-{{ glusterfs_name }}-user-secret"
+ type: "kubernetes.io/glusterfs"
+ force: True
+ contents:
+ - path: key
+ data: "{{ glusterfs_heketi_user_key }}"
+
+- name: Generate GlusterFS StorageClass file
+ template:
+ src: "{{ openshift.common.examples_content_version }}/glusterfs-storageclass.yml.j2"
+ dest: "{{ mktemp.stdout }}/glusterfs-storageclass.yml"
+
+- name: Create GlusterFS StorageClass
+ oc_obj:
+ state: present
+ kind: storageclass
+ name: "glusterfs-{{ glusterfs_name }}"
+ files:
+ - "{{ mktemp.stdout }}/glusterfs-storageclass.yml"
+ when:
+ - glusterfs_storageclass
diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml
index 451990240..aa303d126 100644
--- a/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml
+++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml
@@ -3,7 +3,9 @@
glusterfs_timeout: "{{ openshift_storage_glusterfs_timeout }}"
glusterfs_namespace: "{{ openshift_storage_glusterfs_namespace }}"
glusterfs_is_native: "{{ openshift_storage_glusterfs_is_native }}"
- glusterfs_nodeselector: "{{ openshift_storage_glusterfs_nodeselector | map_from_pairs }}"
+ glusterfs_name: "{{ openshift_storage_glusterfs_name }}"
+ glusterfs_nodeselector: "{{ openshift_storage_glusterfs_nodeselector | default(['storagenode', openshift_storage_glusterfs_name] | join('=')) | map_from_pairs }}"
+ glusterfs_storageclass: "{{ openshift_storage_glusterfs_storageclass }}"
glusterfs_image: "{{ openshift_storage_glusterfs_image }}"
glusterfs_version: "{{ openshift_storage_glusterfs_version }}"
glusterfs_wipe: "{{ openshift_storage_glusterfs_wipe }}"
@@ -17,6 +19,6 @@
glusterfs_heketi_topology_load: "{{ openshift_storage_glusterfs_heketi_topology_load }}"
glusterfs_heketi_wipe: "{{ openshift_storage_glusterfs_heketi_wipe }}"
glusterfs_heketi_url: "{{ openshift_storage_glusterfs_heketi_url }}"
- glusterfs_nodes: "{{ g_glusterfs_hosts }}"
+ glusterfs_nodes: "{{ groups.glusterfs }}"
- include: glusterfs_common.yml
diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml
index 579112349..ea4dcc510 100644
--- a/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml
+++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml
@@ -1,23 +1,24 @@
---
- assert:
- that: "glusterfs_nodeselector.keys() | count == 1"
- msg: Only one GlusterFS nodeselector key pair should be provided
-
-- assert:
that: "glusterfs_nodes | count >= 3"
msg: There must be at least three GlusterFS nodes specified
- name: Delete pre-existing GlusterFS resources
oc_obj:
namespace: "{{ glusterfs_namespace }}"
- kind: "template,daemonset"
- name: glusterfs
+ kind: "{{ item.kind }}"
+ name: "{{ item.name }}"
state: absent
+ with_items:
+ - kind: template
+ name: glusterfs
+ - kind: daemonset
+ name: "glusterfs-{{ glusterfs_name }}"
when: glusterfs_wipe
- name: Unlabel any existing GlusterFS nodes
oc_label:
- name: "{{ item }}"
+ name: "{{ hostvars[item].openshift.common.hostname }}"
kind: node
state: absent
labels: "{{ glusterfs_nodeselector | oo_dict_to_list_of_dict }}"
@@ -40,11 +41,16 @@
failed_when: False
when: glusterfs_wipe
- # Runs "vgremove -fy <vg>; pvremove -fy <pv>" for every device found to be a physical volume.
+ # Runs "lvremove -ff <vg>; vgremove -fy <vg>; pvremove -fy <pv>" for every device found to be a physical volume.
- name: Clear GlusterFS storage device contents
- shell: "{% for line in item.stdout_lines %}{% set fields = line.split() %}{% if fields | count > 1 %}vgremove -fy {{ fields[1] }}; {% endif %}pvremove -fy {{ fields[0] }}; {% endfor %}"
+ shell: "{% for line in item.stdout_lines %}{% set fields = line.split() %}{% if fields | count > 1 %}lvremove -ff {{ fields[1] }}; vgremove -fy {{ fields[1] }}; {% endif %}pvremove -fy {{ fields[0] }}; {% endfor %}"
delegate_to: "{{ item.item }}"
with_items: "{{ devices_info.results }}"
+ register: clear_devices
+ until:
+ - "'contains a filesystem in use' not in clear_devices.stderr"
+ delay: 1
+ retries: 30
when:
- glusterfs_wipe
- item.stdout_lines | count > 0
@@ -61,13 +67,11 @@
- name: Label GlusterFS nodes
oc_label:
- name: "{{ glusterfs_host }}"
+ name: "{{ hostvars[item].openshift.common.hostname }}"
kind: node
state: add
labels: "{{ glusterfs_nodeselector | oo_dict_to_list_of_dict }}"
with_items: "{{ glusterfs_nodes | default([]) }}"
- loop_control:
- loop_var: glusterfs_host
- name: Copy GlusterFS DaemonSet template
copy:
@@ -78,7 +82,7 @@
oc_obj:
namespace: "{{ glusterfs_namespace }}"
kind: template
- name: glusterfs
+ name: "glusterfs"
state: present
files:
- "{{ mktemp.stdout }}/glusterfs-template.yml"
@@ -91,17 +95,19 @@
params:
IMAGE_NAME: "{{ glusterfs_image }}"
IMAGE_VERSION: "{{ glusterfs_version }}"
+ NODE_LABELS: "{{ glusterfs_nodeselector }}"
+ CLUSTER_NAME: "{{ glusterfs_name }}"
- name: Wait for GlusterFS pods
oc_obj:
namespace: "{{ glusterfs_namespace }}"
kind: pod
state: list
- selector: "glusterfs-node=pod"
+ selector: "glusterfs={{ glusterfs_name }}-pod"
register: glusterfs_pods
until:
- "glusterfs_pods.results.results[0]['items'] | count > 0"
# There must be as many pods with 'Ready' staus True as there are nodes expecting those pods
- "glusterfs_pods.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == glusterfs_nodes | count"
delay: 10
- retries: "{{ (glusterfs_timeout / 10) | int }}"
+ retries: "{{ (glusterfs_timeout | int / 10) | int }}"
diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml
index 392f4b65b..4c6891eeb 100644
--- a/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml
+++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml
@@ -3,7 +3,9 @@
glusterfs_timeout: "{{ openshift_storage_glusterfs_registry_timeout }}"
glusterfs_namespace: "{{ openshift_storage_glusterfs_registry_namespace }}"
glusterfs_is_native: "{{ openshift_storage_glusterfs_registry_is_native }}"
- glusterfs_nodeselector: "{{ openshift_storage_glusterfs_registry_nodeselector | map_from_pairs }}"
+ glusterfs_name: "{{ openshift_storage_glusterfs_registry_name }}"
+ glusterfs_nodeselector: "{{ openshift_storage_glusterfs_registry_nodeselector | default(['storagenode', openshift_storage_glusterfs_registry_name] | join('=')) | map_from_pairs }}"
+ glusterfs_storageclass: "{{ openshift_storage_glusterfs_registry_storageclass }}"
glusterfs_image: "{{ openshift_storage_glusterfs_registry_image }}"
glusterfs_version: "{{ openshift_storage_glusterfs_registry_version }}"
glusterfs_wipe: "{{ openshift_storage_glusterfs_registry_wipe }}"
@@ -17,21 +19,22 @@
glusterfs_heketi_topology_load: "{{ openshift_storage_glusterfs_registry_heketi_topology_load }}"
glusterfs_heketi_wipe: "{{ openshift_storage_glusterfs_registry_heketi_wipe }}"
glusterfs_heketi_url: "{{ openshift_storage_glusterfs_registry_heketi_url }}"
- glusterfs_nodes: "{{ g_glusterfs_registry_hosts }}"
+ glusterfs_nodes: "{{ groups.glusterfs_registry }}"
- include: glusterfs_common.yml
- when: g_glusterfs_registry_hosts != g_glusterfs_hosts
+ when:
+ - groups.glusterfs_registry | default([]) | count > 0
+ - "'glusterfs' not in groups or groups.glusterfs_registry != groups.glusterfs"
- name: Delete pre-existing GlusterFS registry resources
oc_obj:
namespace: "{{ glusterfs_namespace }}"
kind: "{{ item.kind }}"
- name: "{{ item.name | default(omit) }}"
- selector: "{{ item.selector | default(omit) }}"
+ name: "{{ item.name }}"
state: absent
with_items:
- - kind: "svc,ep"
- name: "glusterfs-registry-endpoints"
+ - kind: "svc"
+ name: "glusterfs-{{ glusterfs_name }}-endpoints"
failed_when: False
- name: Generate GlusterFS registry endpoints
@@ -40,8 +43,8 @@
dest: "{{ mktemp.stdout }}/glusterfs-registry-endpoints.yml"
- name: Copy GlusterFS registry service
- copy:
- src: "{{ openshift.common.examples_content_version }}/glusterfs-registry-service.yml"
+ template:
+ src: "{{ openshift.common.examples_content_version }}/glusterfs-registry-service.yml.j2"
dest: "{{ mktemp.stdout }}/glusterfs-registry-service.yml"
- name: Create GlusterFS registry endpoints
@@ -49,7 +52,7 @@
namespace: "{{ glusterfs_namespace }}"
state: present
kind: endpoints
- name: glusterfs-registry-endpoints
+ name: "glusterfs-{{ glusterfs_name }}-endpoints"
files:
- "{{ mktemp.stdout }}/glusterfs-registry-endpoints.yml"
@@ -58,14 +61,14 @@
namespace: "{{ glusterfs_namespace }}"
state: present
kind: service
- name: glusterfs-registry-endpoints
+ name: "glusterfs-{{ glusterfs_name }}-endpoints"
files:
- "{{ mktemp.stdout }}/glusterfs-registry-service.yml"
- name: Check if GlusterFS registry volume exists
- command: "heketi-cli -s http://{{ glusterfs_heketi_url }} --user admin --secret '{{ glusterfs_heketi_admin_key }}' volume list"
+ command: "{{ glusterfs_heketi_client }} volume list"
register: registry_volume
- name: Create GlusterFS registry volume
- command: "heketi-cli -s http://{{ glusterfs_heketi_url }} --user admin --secret '{{ glusterfs_heketi_admin_key }}' volume create --size={{ openshift.hosted.registry.storage.volume.size | replace('Gi','') }} --name={{ openshift.hosted.registry.storage.glusterfs.path }}"
+ command: "{{ glusterfs_heketi_client }} volume create --size={{ openshift.hosted.registry.storage.volume.size | replace('Gi','') }} --name={{ openshift.hosted.registry.storage.glusterfs.path }}"
when: "openshift.hosted.registry.storage.glusterfs.path not in registry_volume.stdout"
diff --git a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part1.yml b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part1.yml
index c14fcfb15..318d34b5d 100644
--- a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part1.yml
+++ b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part1.yml
@@ -6,11 +6,21 @@
with_items:
- "deploy-heketi-template.yml"
-- name: Create deploy-heketi resources
+- name: Create heketi topology secret
+ oc_secret:
+ namespace: "{{ glusterfs_namespace }}"
+ state: present
+ name: "heketi-{{ glusterfs_name }}-topology-secret"
+ force: True
+ files:
+ - name: topology.json
+ path: "{{ mktemp.stdout }}/topology.json"
+
+- name: Create deploy-heketi template
oc_obj:
namespace: "{{ glusterfs_namespace }}"
kind: template
- name: deploy-heketi
+ name: "deploy-heketi"
state: present
files:
- "{{ mktemp.stdout }}/deploy-heketi-template.yml"
@@ -25,17 +35,20 @@
IMAGE_VERSION: "{{ glusterfs_heketi_version }}"
HEKETI_USER_KEY: "{{ glusterfs_heketi_user_key }}"
HEKETI_ADMIN_KEY: "{{ glusterfs_heketi_admin_key }}"
+ HEKETI_KUBE_NAMESPACE: "{{ glusterfs_namespace }}"
+ CLUSTER_NAME: "{{ glusterfs_name }}"
+ TOPOLOGY_PATH: "{{ mktemp.stdout }}"
- name: Wait for deploy-heketi pod
oc_obj:
namespace: "{{ glusterfs_namespace }}"
kind: pod
state: list
- selector: "glusterfs=deploy-heketi-pod,deploy-heketi=support"
+ selector: "glusterfs=deploy-heketi-{{ glusterfs_name }}-pod"
register: heketi_pod
until:
- "heketi_pod.results.results[0]['items'] | count > 0"
# Pod's 'Ready' status must be True
- "heketi_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == 1"
delay: 10
- retries: "{{ (glusterfs_timeout / 10) | int }}"
+ retries: "{{ (glusterfs_timeout | int / 10) | int }}"
diff --git a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml
index 64410a9ab..3a9619d9d 100644
--- a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml
+++ b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml
@@ -1,8 +1,10 @@
---
- name: Create heketi DB volume
- command: "heketi-cli -s http://{{ glusterfs_heketi_url }} --user admin --secret '{{ glusterfs_heketi_admin_key }}' setup-openshift-heketi-storage --listfile {{ mktemp.stdout }}/heketi-storage.json"
+ command: "{{ glusterfs_heketi_client }} setup-openshift-heketi-storage --listfile /tmp/heketi-storage.json"
register: setup_storage
- failed_when: False
+
+- name: Copy heketi-storage list
+ shell: "{{ openshift.common.client_binary }} rsh {{ heketi_pod.results.results[0]['items'][0]['metadata']['name'] }} cat /tmp/heketi-storage.json > {{ mktemp.stdout }}/heketi-storage.json"
# This is used in the subsequent task
- name: Copy the admin client config
@@ -28,7 +30,7 @@
# Pod's 'Complete' status must be True
- "heketi_job.results.results | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Complete'}) | map('bool') | select | list | count == 1"
delay: 10
- retries: "{{ (glusterfs_timeout / 10) | int }}"
+ retries: "{{ (glusterfs_timeout | int / 10) | int }}"
failed_when:
- "'results' in heketi_job.results"
- "heketi_job.results.results | count > 0"
@@ -46,14 +48,45 @@
with_items:
- kind: "template,route,service,jobs,dc,secret"
selector: "deploy-heketi"
- failed_when: False
+ - kind: "svc"
+ name: "heketi-storage-endpoints"
+ - kind: "secret"
+ name: "heketi-{{ glusterfs_name }}-topology-secret"
+
+- name: Generate heketi endpoints
+ template:
+ src: "{{ openshift.common.examples_content_version }}/heketi-endpoints.yml.j2"
+ dest: "{{ mktemp.stdout }}/heketi-endpoints.yml"
+
+- name: Generate heketi service
+ template:
+ src: "{{ openshift.common.examples_content_version }}/heketi-service.yml.j2"
+ dest: "{{ mktemp.stdout }}/heketi-service.yml"
+
+- name: Create heketi endpoints
+ oc_obj:
+ namespace: "{{ glusterfs_namespace }}"
+ state: present
+ kind: endpoints
+ name: "heketi-db-{{ glusterfs_name }}-endpoints"
+ files:
+ - "{{ mktemp.stdout }}/heketi-endpoints.yml"
+
+- name: Create heketi service
+ oc_obj:
+ namespace: "{{ glusterfs_namespace }}"
+ state: present
+ kind: service
+ name: "heketi-db-{{ glusterfs_name }}-endpoints"
+ files:
+ - "{{ mktemp.stdout }}/heketi-service.yml"
- name: Copy heketi template
copy:
src: "{{ openshift.common.examples_content_version }}/heketi-template.yml"
dest: "{{ mktemp.stdout }}/heketi-template.yml"
-- name: Create heketi resources
+- name: Create heketi template
oc_obj:
namespace: "{{ glusterfs_namespace }}"
kind: template
@@ -72,38 +105,27 @@
IMAGE_VERSION: "{{ glusterfs_heketi_version }}"
HEKETI_USER_KEY: "{{ glusterfs_heketi_user_key }}"
HEKETI_ADMIN_KEY: "{{ glusterfs_heketi_admin_key }}"
+ HEKETI_KUBE_NAMESPACE: "{{ glusterfs_namespace }}"
+ CLUSTER_NAME: "{{ glusterfs_name }}"
- name: Wait for heketi pod
oc_obj:
namespace: "{{ glusterfs_namespace }}"
kind: pod
state: list
- selector: "glusterfs=heketi-pod"
+ selector: "glusterfs=heketi-{{ glusterfs_name }}-pod"
register: heketi_pod
until:
- "heketi_pod.results.results[0]['items'] | count > 0"
# Pod's 'Ready' status must be True
- "heketi_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == 1"
delay: 10
- retries: "{{ (glusterfs_timeout / 10) | int }}"
-
-- name: Determine heketi URL
- oc_obj:
- namespace: "{{ glusterfs_namespace }}"
- state: list
- kind: ep
- selector: "glusterfs=heketi-service"
- register: heketi_url
- until:
- - "heketi_url.results.results[0]['items'][0].subsets[0].addresses[0].ip != ''"
- - "heketi_url.results.results[0]['items'][0].subsets[0].ports[0].port != ''"
- delay: 10
- retries: "{{ (glusterfs_timeout / 10) | int }}"
+ retries: "{{ (glusterfs_timeout | int / 10) | int }}"
-- name: Set heketi URL
+- name: Set heketi-cli command
set_fact:
- glusterfs_heketi_url: "{{ heketi_url.results.results[0]['items'][0].subsets[0].addresses[0].ip }}:{{ heketi_url.results.results[0]['items'][0].subsets[0].ports[0].port }}"
+ glusterfs_heketi_client: "{% if glusterfs_heketi_is_native %}oc rsh {{ heketi_pod.results.results[0]['items'][0]['metadata']['name'] }} {% endif %}heketi-cli -s http://localhost:8080 --user admin --secret '{{ glusterfs_heketi_admin_key }}'"
- name: Verify heketi service
- command: "heketi-cli -s http://{{ glusterfs_heketi_url }} --user admin --secret '{{ glusterfs_heketi_admin_key }}' cluster list"
+ command: "{{ glusterfs_heketi_client }} cluster list"
changed_when: False
diff --git a/roles/openshift_storage_glusterfs/tasks/main.yml b/roles/openshift_storage_glusterfs/tasks/main.yml
index ebd8db453..c9bfdd1cd 100644
--- a/roles/openshift_storage_glusterfs/tasks/main.yml
+++ b/roles/openshift_storage_glusterfs/tasks/main.yml
@@ -7,12 +7,11 @@
- include: glusterfs_config.yml
when:
- - g_glusterfs_hosts | default([]) | count > 0
+ - groups.glusterfs | default([]) | count > 0
- include: glusterfs_registry.yml
when:
- - g_glusterfs_registry_hosts | default([]) | count > 0
- - "openshift.hosted.registry.storage.kind == 'glusterfs' or openshift.hosted.registry.glusterfs.swap"
+ - "groups.glusterfs_registry | default([]) | count > 0 or openshift.hosted.registry.storage.kind == 'glusterfs' or openshift.hosted.registry.glusterfs.swap"
- name: Delete temp directory
file:
diff --git a/roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-registry-endpoints.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-registry-endpoints.yml.j2
index 605627ab5..11c9195bb 100644
--- a/roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-registry-endpoints.yml.j2
+++ b/roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-registry-endpoints.yml.j2
@@ -1,7 +1,8 @@
+---
apiVersion: v1
kind: Endpoints
metadata:
- name: glusterfs-registry-endpoints
+ name: glusterfs-{{ glusterfs_name }}-endpoints
subsets:
- addresses:
{% for node in glusterfs_nodes %}
diff --git a/roles/openshift_storage_glusterfs/files/v3.6/glusterfs-registry-service.yml b/roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-registry-service.yml.j2
index 3f8d8f507..3f869d2b7 100644
--- a/roles/openshift_storage_glusterfs/files/v3.6/glusterfs-registry-service.yml
+++ b/roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-registry-service.yml.j2
@@ -2,7 +2,7 @@
apiVersion: v1
kind: Service
metadata:
- name: glusterfs-registry-endpoints
+ name: glusterfs-{{ glusterfs_name }}-endpoints
spec:
ports:
- port: 1
diff --git a/roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-storageclass.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-storageclass.yml.j2
new file mode 100644
index 000000000..9b8fae310
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-storageclass.yml.j2
@@ -0,0 +1,10 @@
+---
+apiVersion: storage.k8s.io/v1
+kind: StorageClass
+metadata:
+ name: glusterfs-{{ glusterfs_name }}
+provisioner: kubernetes.io/glusterfs
+parameters:
+ resturl: "http://{{ glusterfs_heketi_url }}:8081"
+ secretNamespace: "{{ glusterfs_namespace }}"
+ secretName: "heketi-{{ glusterfs_name }}-user-secret"
diff --git a/roles/openshift_storage_glusterfs/templates/v3.6/heketi-endpoints.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.6/heketi-endpoints.yml.j2
new file mode 100644
index 000000000..99cbdf748
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/templates/v3.6/heketi-endpoints.yml.j2
@@ -0,0 +1,12 @@
+---
+apiVersion: v1
+kind: Endpoints
+metadata:
+ name: heketi-db-{{ glusterfs_name }}-endpoints
+subsets:
+- addresses:
+{% for node in glusterfs_nodes %}
+ - ip: {{ hostvars[node].glusterfs_ip | default(hostvars[node].openshift.common.ip) }}
+{% endfor %}
+ ports:
+ - port: 1
diff --git a/roles/openshift_storage_glusterfs/templates/v3.6/heketi-service.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.6/heketi-service.yml.j2
new file mode 100644
index 000000000..dcb896441
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/templates/v3.6/heketi-service.yml.j2
@@ -0,0 +1,10 @@
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: heketi-db-{{ glusterfs_name }}-endpoints
+spec:
+ ports:
+ - port: 1
+status:
+ loadBalancer: {}
diff --git a/test/integration/openshift_health_checker/builds/aos-package-checks/Dockerfile b/test/integration/openshift_health_checker/builds/aos-package-checks/Dockerfile
index 8542029f6..0d8162c2e 100644
--- a/test/integration/openshift_health_checker/builds/aos-package-checks/Dockerfile
+++ b/test/integration/openshift_health_checker/builds/aos-package-checks/Dockerfile
@@ -25,6 +25,8 @@ RUN cd /root/rpmbuild/SOURCES && \
rpmbuild -bb /root/ose-3.3.spec && \
mkdir /mnt/localrepo/ose-3.{2,3} && \
cp /root/rpmbuild/RPMS/noarch/atomic-openshift*-3.2-1.noarch.rpm /mnt/localrepo/ose-3.2 && \
+ cp /root/rpmbuild/RPMS/noarch/{openvswitch-2.4,docker-1.10}-1.noarch.rpm /mnt/localrepo/ose-3.2 && \
createrepo /mnt/localrepo/ose-3.2 && \
cp /root/rpmbuild/RPMS/noarch/atomic-openshift*-3.3-1.noarch.rpm /mnt/localrepo/ose-3.3 && \
+ cp /root/rpmbuild/RPMS/noarch/{openvswitch-2.4,docker-1.10}-1.noarch.rpm /mnt/localrepo/ose-3.3 && \
createrepo /mnt/localrepo/ose-3.3
diff --git a/test/integration/openshift_health_checker/builds/aos-package-checks/root/root/ose-3.2.spec b/test/integration/openshift_health_checker/builds/aos-package-checks/root/root/ose-3.2.spec
index dbc9f0c8e..3b3eab696 100644
--- a/test/integration/openshift_health_checker/builds/aos-package-checks/root/root/ose-3.2.spec
+++ b/test/integration/openshift_health_checker/builds/aos-package-checks/root/root/ose-3.2.spec
@@ -12,6 +12,12 @@ BuildArch: noarch
Summary: package the critical aos packages
%package node
Summary: package the critical aos packages
+%package -n openvswitch
+Summary: package the critical aos packages
+Version: 2.4
+%package -n docker
+Summary: package the critical aos packages
+Version: 1.10
%description
Package for pretending to provide AOS
@@ -22,6 +28,12 @@ Package for pretending to provide AOS
%description node
Package for pretending to provide AOS
+%description -n openvswitch
+Package for pretending to provide openvswitch
+
+%description -n docker
+Package for pretending to provide docker
+
%prep
%setup -q
@@ -37,8 +49,9 @@ mkdir -p $RPM_BUILD_ROOT
%files
%files master
%files node
-%doc
-
+%files -n openvswitch
+%files -n docker
+%doc
%changelog
diff --git a/test/integration/openshift_health_checker/builds/aos-package-checks/root/root/ose-3.3.spec b/test/integration/openshift_health_checker/builds/aos-package-checks/root/root/ose-3.3.spec
index 9546e8430..66be0a862 100644
--- a/test/integration/openshift_health_checker/builds/aos-package-checks/root/root/ose-3.3.spec
+++ b/test/integration/openshift_health_checker/builds/aos-package-checks/root/root/ose-3.3.spec
@@ -12,6 +12,12 @@ BuildArch: noarch
Summary: package the critical aos packages
%package node
Summary: package the critical aos packages
+%package -n openvswitch
+Summary: package the critical aos packages
+Version: 2.4
+%package -n docker
+Summary: package the critical aos packages
+Version: 1.10
%description
Package for pretending to provide AOS
@@ -22,6 +28,12 @@ Package for pretending to provide AOS
%description node
Package for pretending to provide AOS
+%description -n openvswitch
+Package for pretending to provide openvswitch
+
+%description -n docker
+Package for pretending to provide docker
+
%prep
%setup -q
@@ -37,8 +49,9 @@ mkdir -p $RPM_BUILD_ROOT
%files
%files master
%files node
-%doc
-
+%files -n openvswitch
+%files -n docker
+%doc
%changelog
diff --git a/test/integration/openshift_health_checker/preflight/playbooks/package_version_matches.yml b/test/integration/openshift_health_checker/preflight/playbooks/package_version_matches.yml
index 66d43d809..58bed0fc0 100644
--- a/test/integration/openshift_health_checker/preflight/playbooks/package_version_matches.yml
+++ b/test/integration/openshift_health_checker/preflight/playbooks/package_version_matches.yml
@@ -20,6 +20,10 @@
post_tasks:
- block:
+ # disable extras so we control docker version
+ - include: tasks/enable_repo.yml
+ vars: { repo_file: "CentOS-Base", repo_name: "extras", repo_enabled: 0 }
+
- action: openshift_health_check
args:
checks: [ 'package_version' ]
diff --git a/test/integration/openshift_health_checker/preflight/playbooks/package_version_multiple.yml b/test/integration/openshift_health_checker/preflight/playbooks/package_version_multiple.yml
index c941413de..850a55a72 100644
--- a/test/integration/openshift_health_checker/preflight/playbooks/package_version_multiple.yml
+++ b/test/integration/openshift_health_checker/preflight/playbooks/package_version_multiple.yml
@@ -23,6 +23,10 @@
- include: tasks/enable_repo.yml
vars: { repo_name: "ose-3.3" }
+ # disable extras so we control docker version
+ - include: tasks/enable_repo.yml
+ vars: { repo_file: "CentOS-Base", repo_name: "extras", repo_enabled: 0 }
+
- action: openshift_health_check
args:
checks: [ 'package_version' ]
diff --git a/test/integration/openshift_health_checker/preflight/playbooks/tasks/enable_repo.yml b/test/integration/openshift_health_checker/preflight/playbooks/tasks/enable_repo.yml
index a41cb3c9a..6022f4289 100644
--- a/test/integration/openshift_health_checker/preflight/playbooks/tasks/enable_repo.yml
+++ b/test/integration/openshift_health_checker/preflight/playbooks/tasks/enable_repo.yml
@@ -3,7 +3,7 @@
# believe it or not we can't use the yum_repository module for this.
# https://github.com/ansible/ansible-modules-extras/issues/2384
ini_file:
- dest: /etc/yum.repos.d/{{ repo_name }}.repo
+ dest: /etc/yum.repos.d/{{ repo_file | default(repo_name) }}.repo
section: "{{ repo_name }}"
option: enabled
value: "{{ repo_enabled | default(1) }}"
diff --git a/test/integration/openshift_health_checker/preflight/preflight_test.go b/test/integration/openshift_health_checker/preflight/preflight_test.go
index 05ddf139f..9dfd713ec 100644
--- a/test/integration/openshift_health_checker/preflight/preflight_test.go
+++ b/test/integration/openshift_health_checker/preflight/preflight_test.go
@@ -66,7 +66,7 @@ func TestPackageVersionMismatches(t *testing.T) {
ExitCode: 2,
Output: []string{
"check \"package_version\":",
- "Not all of the required packages are available at requested version",
+ "Not all of the required packages are available at their requested version",
},
}.Run(t)
}