summaryrefslogtreecommitdiffstats
path: root/playbooks
diff options
context:
space:
mode:
Diffstat (limited to 'playbooks')
-rw-r--r--playbooks/adhoc/bootstrap-fedora.yml3
-rwxr-xr-xplaybooks/adhoc/docker_loopback_to_lvm/ops-docker-loopback-to-direct-lvm.yml2
-rw-r--r--playbooks/adhoc/uninstall.yml5
-rw-r--r--playbooks/aws/openshift-cluster/config.yml8
-rw-r--r--playbooks/aws/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml16
-rw-r--r--playbooks/byo/openshift-cluster/config.yml9
-rw-r--r--playbooks/byo/openshift-cluster/enable_dnsmasq.yml4
-rw-r--r--playbooks/byo/openshift-cluster/redeploy-certificates.yml22
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml83
l---------playbooks/byo/openshift-cluster/upgrades/docker/nuke_images.sh1
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_0_minor/README.md21
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_0_minor/upgrade.yml28
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml28
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_1_minor/README.md17
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_1_minor/upgrade.yml32
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_2/README.md (renamed from playbooks/byo/openshift-cluster/upgrades/v3_0_to_v3_1/README.md)6
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_2/upgrade.yml (renamed from playbooks/byo/openshift-cluster/upgrades/v3_1_to_v3_2/upgrade.yml)20
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_3/README.md (renamed from playbooks/byo/openshift-cluster/upgrades/v3_1_to_v3_2/README.md)6
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml67
-rw-r--r--playbooks/byo/openshift_facts.yml4
-rw-r--r--playbooks/common/openshift-cluster/additional_config.yml14
-rw-r--r--playbooks/common/openshift-cluster/config.yml24
-rw-r--r--playbooks/common/openshift-cluster/enable_dnsmasq.yml4
-rw-r--r--playbooks/common/openshift-cluster/evaluate_groups.yml4
-rw-r--r--playbooks/common/openshift-cluster/initialize_facts.yml2
-rw-r--r--playbooks/common/openshift-cluster/initialize_openshift_version.yml16
-rw-r--r--playbooks/common/openshift-cluster/openshift_hosted.yml46
-rw-r--r--playbooks/common/openshift-cluster/redeploy-certificates.yml245
l---------playbooks/common/openshift-cluster/upgrades/atomic-openshift-master.j21
-rw-r--r--playbooks/common/openshift-cluster/upgrades/containerized_node_upgrade.yml (renamed from playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/containerized_upgrade.yml)4
l---------playbooks/common/openshift-cluster/upgrades/docker-cluster1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/docker/upgrade.yml62
-rw-r--r--playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml51
-rw-r--r--playbooks/common/openshift-cluster/upgrades/files/nuke_images.sh (renamed from playbooks/byo/openshift-cluster/upgrades/docker/files/nuke_images.sh)8
-rw-r--r--playbooks/common/openshift-cluster/upgrades/files/openshift_container_versions.sh22
l---------playbooks/common/openshift-cluster/upgrades/master_docker1
l---------playbooks/common/openshift-cluster/upgrades/native-cluster1
l---------playbooks/common/openshift-cluster/upgrades/openshift.docker.node.dep.service1
l---------playbooks/common/openshift-cluster/upgrades/openshift.docker.node.service1
l---------playbooks/common/openshift-cluster/upgrades/openvswitch.docker.service1
l---------playbooks/common/openshift-cluster/upgrades/openvswitch.sysconfig.j21
-rw-r--r--playbooks/common/openshift-cluster/upgrades/post.yml (renamed from playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/post.yml)17
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre.yml (renamed from playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/pre.yml)178
-rw-r--r--playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml (renamed from playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/rpm_upgrade.yml)3
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade.yml (renamed from playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/upgrade.yml)195
l---------playbooks/common/openshift-cluster/upgrades/v3_0_minor/filter_plugins1
l---------playbooks/common/openshift-cluster/upgrades/v3_0_minor/library1
l---------playbooks/common/openshift-cluster/upgrades/v3_0_minor/lookup_plugins1
l---------playbooks/common/openshift-cluster/upgrades/v3_0_minor/roles1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_0_minor/upgrade.yml114
l---------playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/filter_plugins1
l---------playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/library1
l---------playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/lookup_plugins1
l---------playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/roles1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml646
l---------playbooks/common/openshift-cluster/upgrades/v3_1_minor/filter_plugins1
l---------playbooks/common/openshift-cluster/upgrades/v3_1_minor/library1
l---------playbooks/common/openshift-cluster/upgrades/v3_1_minor/lookup_plugins1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_1_minor/post.yml58
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_1_minor/pre.yml88
l---------playbooks/common/openshift-cluster/upgrades/v3_1_minor/roles1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_1_minor/upgrade.yml140
l---------playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/atomic-openshift-master.j21
l---------playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/docker1
l---------playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/docker-cluster1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/docker_upgrade.yml14
l---------playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/filter_plugins1
l---------playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/library1
l---------playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/lookup_plugins1
l---------playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/native-cluster1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/node_upgrade.yml24
l---------playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/nuke_images.sh1
l---------playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/roles1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml40
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_3/node_config_upgrade.yml21
-rw-r--r--playbooks/common/openshift-cluster/verify_ansible_version.yml10
-rw-r--r--playbooks/common/openshift-etcd/config.yml117
-rw-r--r--playbooks/common/openshift-master/config.yml271
-rw-r--r--playbooks/common/openshift-master/scaleup.yml9
-rw-r--r--playbooks/common/openshift-node/config.yml210
-rw-r--r--playbooks/gce/openshift-cluster/config.yml5
-rw-r--r--playbooks/libvirt/openshift-cluster/config.yml8
-rw-r--r--playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_network.yml36
-rw-r--r--playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_storage_pool.yml29
-rw-r--r--playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml26
-rw-r--r--playbooks/libvirt/openshift-cluster/templates/domain.xml26
-rw-r--r--playbooks/libvirt/openshift-cluster/templates/network.xml2
-rw-r--r--playbooks/libvirt/openshift-cluster/templates/storage-pool.xml6
-rw-r--r--playbooks/libvirt/openshift-cluster/templates/user-data16
-rw-r--r--playbooks/libvirt/openshift-cluster/terminate.yml12
-rw-r--r--playbooks/libvirt/openshift-cluster/update.yml4
-rw-r--r--playbooks/openstack/openshift-cluster/config.yml7
-rw-r--r--playbooks/openstack/openshift-cluster/dns.yml5
-rw-r--r--playbooks/openstack/openshift-cluster/files/heat_stack.yaml41
-rw-r--r--playbooks/openstack/openshift-cluster/launch.yml43
-rw-r--r--playbooks/openstack/openshift-cluster/list.yml2
-rw-r--r--playbooks/openstack/openshift-cluster/terminate.yml2
-rw-r--r--playbooks/openstack/openshift-cluster/vars.yml2
98 files changed, 1161 insertions, 2179 deletions
diff --git a/playbooks/adhoc/bootstrap-fedora.yml b/playbooks/adhoc/bootstrap-fedora.yml
index b380a74d6..b370d7fba 100644
--- a/playbooks/adhoc/bootstrap-fedora.yml
+++ b/playbooks/adhoc/bootstrap-fedora.yml
@@ -1,4 +1,5 @@
- hosts: OSEv3
+ gather_facts: false
tasks:
- name: install python and deps for ansible modules
- raw: dnf install -y python2 python2-dnf libselinux-python libsemanage-python python2-firewall
+ raw: dnf install -y python2 python2-dnf libselinux-python libsemanage-python python2-firewall pyOpenSSL python-cryptography
diff --git a/playbooks/adhoc/docker_loopback_to_lvm/ops-docker-loopback-to-direct-lvm.yml b/playbooks/adhoc/docker_loopback_to_lvm/ops-docker-loopback-to-direct-lvm.yml
index 72fcd77b3..1438fd7d5 100755
--- a/playbooks/adhoc/docker_loopback_to_lvm/ops-docker-loopback-to-direct-lvm.yml
+++ b/playbooks/adhoc/docker_loopback_to_lvm/ops-docker-loopback-to-direct-lvm.yml
@@ -16,7 +16,7 @@
# * You may need to re-deploy docker images after this is run (like monitoring)
- name: Fix docker to have a provisioned iops drive
- hosts: "{{ cli_name }}"
+ hosts: "{{ cli_host }}"
user: root
connection: ssh
gather_facts: no
diff --git a/playbooks/adhoc/uninstall.yml b/playbooks/adhoc/uninstall.yml
index 4edd44fe4..3be3a0e96 100644
--- a/playbooks/adhoc/uninstall.yml
+++ b/playbooks/adhoc/uninstall.yml
@@ -338,7 +338,10 @@
- /etc/ansible/facts.d/openshift.fact
- /etc/etcd
- /etc/systemd/system/etcd_container.service
- - /var/lib/etcd
+
+ - name: Remove etcd data
+ shell: rm -rf /var/lib/etcd/*
+ failed_when: false
- hosts: lb
become: yes
diff --git a/playbooks/aws/openshift-cluster/config.yml b/playbooks/aws/openshift-cluster/config.yml
index 4839c100b..647c72239 100644
--- a/playbooks/aws/openshift-cluster/config.yml
+++ b/playbooks/aws/openshift-cluster/config.yml
@@ -1,3 +1,6 @@
+---
+- include: ../../common/openshift-cluster/verify_ansible_version.yml
+
- hosts: localhost
gather_facts: no
tasks:
@@ -6,7 +9,7 @@
- add_host:
name: "{{ item }}"
groups: l_oo_all_hosts
- with_items: g_all_hosts
+ with_items: "{{ g_all_hosts | default([]) }}"
- hosts: l_oo_all_hosts
gather_facts: no
@@ -23,9 +26,8 @@
openshift_debug_level: "{{ debug_level }}"
openshift_deployment_type: "{{ deployment_type }}"
openshift_public_hostname: "{{ ec2_ip_address }}"
- openshift_registry_selector: 'type=infra'
+ openshift_hosted_registry_selector: 'type=infra'
openshift_hosted_router_selector: 'type=infra'
- openshift_infra_nodes: "{{ g_infra_hosts }}"
openshift_node_labels:
region: "{{ deployment_vars[deployment_type].region }}"
type: "{{ hostvars[inventory_hostname]['ec2_tag_sub-host-type'] if inventory_hostname in groups['tag_host-type_node'] else hostvars[inventory_hostname]['ec2_tag_host-type'] }}"
diff --git a/playbooks/aws/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml b/playbooks/aws/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml
deleted file mode 100644
index 44d9a3e25..000000000
--- a/playbooks/aws/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml
+++ /dev/null
@@ -1,16 +0,0 @@
----
-# Usage:
-# ansible-playbook playbooks/aws/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml -e deployment_type=<deployment_type> -e cluster_id=<cluster_id>
-- include: ../../../../common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml
- vars_files:
- - "{{lookup('file', '../../../../aws/openshift-cluster/vars.yml')}}"
- - "{{lookup('file', '../../../../aws/openshift-cluster/cluster_hosts.yml')}}"
- vars:
- g_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- g_sudo: "{{ deployment_vars[deployment_type].become }}"
- g_nodeonmaster: true
- openshift_cluster_id: "{{ cluster_id }}"
- openshift_debug_level: "{{ debug_level }}"
- openshift_deployment_type: "{{ deployment_type }}"
- openshift_hostname: "{{ ec2_private_ip_address }}"
- openshift_public_hostname: "{{ ec2_ip_address }}"
diff --git a/playbooks/byo/openshift-cluster/config.yml b/playbooks/byo/openshift-cluster/config.yml
index c5479d098..0b85b2485 100644
--- a/playbooks/byo/openshift-cluster/config.yml
+++ b/playbooks/byo/openshift-cluster/config.yml
@@ -1,17 +1,23 @@
---
+- include: ../../common/openshift-cluster/verify_ansible_version.yml
+
- hosts: localhost
connection: local
become: no
gather_facts: no
+ tags:
+ - always
tasks:
- include_vars: ../../byo/openshift-cluster/cluster_hosts.yml
- add_host:
name: "{{ item }}"
groups: l_oo_all_hosts
- with_items: g_all_hosts
+ with_items: "{{ g_all_hosts | default([]) }}"
- hosts: l_oo_all_hosts
gather_facts: no
+ tags:
+ - always
tasks:
- include_vars: ../../byo/openshift-cluster/cluster_hosts.yml
@@ -20,3 +26,4 @@
openshift_cluster_id: "{{ cluster_id | default('default') }}"
openshift_debug_level: "{{ debug_level | default(2) }}"
openshift_deployment_type: "{{ deployment_type }}"
+ openshift_deployment_subtype: "{{ deployment_subtype | default(none) }}"
diff --git a/playbooks/byo/openshift-cluster/enable_dnsmasq.yml b/playbooks/byo/openshift-cluster/enable_dnsmasq.yml
index 1c8d99341..0ba11a21b 100644
--- a/playbooks/byo/openshift-cluster/enable_dnsmasq.yml
+++ b/playbooks/byo/openshift-cluster/enable_dnsmasq.yml
@@ -1,4 +1,6 @@
---
+- include: ../../common/openshift-cluster/verify_ansible_version.yml
+
- hosts: localhost
connection: local
become: no
@@ -8,7 +10,7 @@
- add_host:
name: "{{ item }}"
groups: l_oo_all_hosts
- with_items: g_all_hosts
+ with_items: "{{ g_all_hosts | default([]) }}"
- hosts: l_oo_all_hosts
gather_facts: no
diff --git a/playbooks/byo/openshift-cluster/redeploy-certificates.yml b/playbooks/byo/openshift-cluster/redeploy-certificates.yml
new file mode 100644
index 000000000..6d1247e0f
--- /dev/null
+++ b/playbooks/byo/openshift-cluster/redeploy-certificates.yml
@@ -0,0 +1,22 @@
+---
+- include: ../../common/openshift-cluster/verify_ansible_version.yml
+
+- hosts: localhost
+ connection: local
+ become: no
+ gather_facts: no
+ tasks:
+ - include_vars: ../../byo/openshift-cluster/cluster_hosts.yml
+ - add_host:
+ name: "{{ item }}"
+ groups: l_oo_all_hosts
+ with_items: "{{ g_all_hosts | default([]) }}"
+
+- hosts: l_oo_all_hosts
+ gather_facts: no
+ tasks:
+ - include_vars: ../../byo/openshift-cluster/cluster_hosts.yml
+
+- include: ../../common/openshift-cluster/redeploy-certificates.yml
+ vars:
+ openshift_deployment_type: "{{ deployment_type }}"
diff --git a/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml
index d7798d304..3a285ab9f 100644
--- a/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml
@@ -1,106 +1,47 @@
-- name: Check for appropriate Docker versions for 1.9.x to 1.10.x upgrade
+- name: Check for appropriate Docker versions
hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config
roles:
- openshift_facts
tasks:
+ - set_fact:
+ repoquery_cmd: "{{ 'dnf repoquery --latest-limit 1 -d 0' if ansible_pkg_mgr == 'dnf' else 'repoquery --plugins' }}"
+
- fail:
msg: Cannot upgrade Docker on Atomic operating systems.
when: openshift.common.is_atomic | bool
- - name: Determine available Docker version
- script: ../../../../common/openshift-cluster/upgrades/files/rpm_versions.sh docker
- register: g_docker_version_result
-
- - name: Check if Docker is installed
- command: rpm -q docker
- register: pkg_check
- failed_when: pkg_check.rc > 1
- changed_when: no
-
- - set_fact:
- g_docker_version: "{{ g_docker_version_result.stdout | from_yaml }}"
-
- - name: Set fact if docker requires an upgrade
- set_fact:
- docker_upgrade: true
- when: pkg_check.rc == 0 and g_docker_version.curr_version | version_compare('1.10','<')
+ - include: ../../../../common/openshift-cluster/upgrades/docker/upgrade_check.yml
+ when: docker_upgrade is not defined or docker_upgrade | bool
- - fail:
- msg: This playbook requires access to Docker 1.10 or later
- when: g_docker_version.avail_version | default(g_docker_version.curr_version, true) | version_compare('1.10','<')
# If a node fails, halt everything, the admin will need to clean up and we
# don't want to carry on, potentially taking out every node. The playbook can safely be re-run
-# and will not take any action on a node already running 1.10+.
+# and will not take any action on a node already running the requested docker version.
- name: Evacuate and upgrade nodes
hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config
serial: 1
any_errors_fatal: true
tasks:
- - debug: var=docker_upgrade
-
- name: Prepare for Node evacuation
command: >
{{ openshift.common.admin_binary }} manage-node {{ openshift.common.hostname | lower }} --schedulable=false
delegate_to: "{{ groups.oo_first_master.0 }}"
- when: docker_upgrade is defined and docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_config
+ when: l_docker_upgrade is defined and l_docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_config
-# TODO: skip all node evac stuff for non-nodes (i.e. separate containerized etcd hosts)
- name: Evacuate Node for Kubelet upgrade
command: >
{{ openshift.common.admin_binary }} manage-node {{ openshift.common.hostname | lower }} --evacuate --force
delegate_to: "{{ groups.oo_first_master.0 }}"
- when: docker_upgrade is defined and docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_config
-
- - name: Stop containerized services
- service: name={{ item }} state=stopped
- with_items:
- - "{{ openshift.common.service_type }}-master"
- - "{{ openshift.common.service_type }}-master-api"
- - "{{ openshift.common.service_type }}-master-controllers"
- - "{{ openshift.common.service_type }}-node"
- - etcd_container
- - openvswitch
- failed_when: false
- when: docker_upgrade is defined and docker_upgrade | bool and openshift.common.is_containerized | bool
-
- - name: Remove all containers and images
- script: files/nuke_images.sh docker
- register: nuke_images_result
- when: docker_upgrade is defined and docker_upgrade | bool
-
- - name: Upgrade Docker
- command: "{{ ansible_pkg_mgr}} update -y docker"
- register: docker_upgrade_result
- when: docker_upgrade is defined and docker_upgrade | bool
-
- - name: Restart containerized services
- service: name={{ item }} state=started
- with_items:
- - etcd_container
- - openvswitch
- - "{{ openshift.common.service_type }}-master"
- - "{{ openshift.common.service_type }}-master-api"
- - "{{ openshift.common.service_type }}-master-controllers"
- - "{{ openshift.common.service_type }}-node"
- failed_when: false
- when: docker_upgrade is defined and docker_upgrade | bool and openshift.common.is_containerized | bool
+ when: l_docker_upgrade is defined and l_docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_config
- - name: Wait for master API to come back online
- become: no
- local_action:
- module: wait_for
- host="{{ inventory_hostname }}"
- state=started
- delay=10
- port="{{ openshift.master.api_port }}"
- when: docker_upgrade is defined and docker_upgrade | bool and inventory_hostname in groups.oo_masters_to_config
+ - include: ../../../../common/openshift-cluster/upgrades/docker/upgrade.yml
+ when: l_docker_upgrade is defined and l_docker_upgrade | bool
- name: Set node schedulability
command: >
{{ openshift.common.admin_binary }} manage-node {{ openshift.common.hostname | lower }} --schedulable=true
delegate_to: "{{ groups.oo_first_master.0 }}"
when: openshift.node.schedulable | bool
- when: docker_upgrade is defined and docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_config and openshift.node.schedulable | bool
+ when: l_docker_upgrade is defined and l_docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_config and openshift.node.schedulable | bool
diff --git a/playbooks/byo/openshift-cluster/upgrades/docker/nuke_images.sh b/playbooks/byo/openshift-cluster/upgrades/docker/nuke_images.sh
new file mode 120000
index 000000000..d5d864b63
--- /dev/null
+++ b/playbooks/byo/openshift-cluster/upgrades/docker/nuke_images.sh
@@ -0,0 +1 @@
+../../../../common/openshift-cluster/upgrades/files/nuke_images.sh \ No newline at end of file
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_0_minor/README.md b/playbooks/byo/openshift-cluster/upgrades/v3_0_minor/README.md
deleted file mode 100644
index c91a6cb96..000000000
--- a/playbooks/byo/openshift-cluster/upgrades/v3_0_minor/README.md
+++ /dev/null
@@ -1,21 +0,0 @@
-# v3.0 minor upgrade playbook
-**Note:** This playbook will re-run installation steps overwriting any local
-modifications. You should ensure that your inventory has been updated with any
-modifications you've made after your initial installation. If you find any items
-that cannot be configured via ansible please open an issue at
-https://github.com/openshift/openshift-ansible
-
-## Overview
-This playbook is available as a technical preview. It currently performs the
-following steps.
-
- * Upgrade and restart master services
- * Upgrade and restart node services
- * Applies latest configuration by re-running the installation playbook
- * Applies the latest cluster policies
- * Updates the default router if one exists
- * Updates the default registry if one exists
- * Updates image streams and quickstarts
-
-## Usage
-ansible-playbook -i ~/ansible-inventory openshift-ansible/playbooks/byo/openshift-cluster/upgrades/v3_0_minor/upgrade.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_0_minor/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_0_minor/upgrade.yml
deleted file mode 100644
index 76bfff9b6..000000000
--- a/playbooks/byo/openshift-cluster/upgrades/v3_0_minor/upgrade.yml
+++ /dev/null
@@ -1,28 +0,0 @@
----
-- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- tasks:
- - name: Verify Ansible version is greater than or equal to 1.9.4 and less than 2.0
- fail:
- msg: "Unsupported ansible version: {{ ansible_version }} found."
- when: ansible_version.full | version_compare('1.9.4', 'lt') or ansible_version.full | version_compare('2.0', 'ge')
- - include_vars: ../../../../byo/openshift-cluster/cluster_hosts.yml
- - add_host:
- name: "{{ item }}"
- groups: l_oo_all_hosts
- with_items: g_all_hosts
-
-- hosts: l_oo_all_hosts
- gather_facts: no
- tasks:
- - include_vars: ../../../../byo/openshift-cluster/cluster_hosts.yml
-
-- include: ../../../../common/openshift-cluster/upgrades/v3_0_minor/upgrade.yml
- vars:
- # Do not allow adding hosts during upgrade.
- g_new_master_hosts: []
- g_new_node_hosts: []
- openshift_cluster_id: "{{ cluster_id | default('default') }}"
- openshift_deployment_type: "{{ deployment_type }}"
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml
deleted file mode 100644
index c17446162..000000000
--- a/playbooks/byo/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml
+++ /dev/null
@@ -1,28 +0,0 @@
----
-- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- tasks:
- - name: Verify Ansible version is greater than or equal to 1.9.4 and less than 2.0
- fail:
- msg: "Unsupported ansible version: {{ ansible_version }} found."
- when: ansible_version.full | version_compare('1.9.4', 'lt') or ansible_version.full | version_compare('2.0', 'ge')
- - include_vars: ../../../../byo/openshift-cluster/cluster_hosts.yml
- - add_host:
- name: "{{ item }}"
- groups: l_oo_all_hosts
- with_items: g_all_hosts
-
-- hosts: l_oo_all_hosts
- gather_facts: no
- tasks:
- - include_vars: ../../../../byo/openshift-cluster/cluster_hosts.yml
-
-- include: ../../../../common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml
- vars:
- # Do not allow adding hosts during upgrade.
- g_new_master_hosts: []
- g_new_node_hosts: []
- openshift_cluster_id: "{{ cluster_id | default('default') }}"
- openshift_deployment_type: "{{ deployment_type }}"
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_1_minor/README.md b/playbooks/byo/openshift-cluster/upgrades/v3_1_minor/README.md
deleted file mode 100644
index b230835c3..000000000
--- a/playbooks/byo/openshift-cluster/upgrades/v3_1_minor/README.md
+++ /dev/null
@@ -1,17 +0,0 @@
-# v3.1 minor upgrade playbook
-This upgrade will preserve all locally made configuration modifications to the
-Masters and Nodes.
-
-## Overview
-This playbook is available as a technical preview. It currently performs the
-following steps.
-
- * Upgrade and restart master services
- * Upgrade and restart node services
- * Applies the latest cluster policies
- * Updates the default router if one exists
- * Updates the default registry if one exists
- * Updates image streams and quickstarts
-
-## Usage
-ansible-playbook -i ~/ansible-inventory openshift-ansible/playbooks/byo/openshift-cluster/upgrades/v3_1_minor/upgrade.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_1_minor/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_1_minor/upgrade.yml
deleted file mode 100644
index 99592d85a..000000000
--- a/playbooks/byo/openshift-cluster/upgrades/v3_1_minor/upgrade.yml
+++ /dev/null
@@ -1,32 +0,0 @@
----
-- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- tasks:
- - name: Verify Ansible version is greater than or equal to 1.9.4 and less than 2.0
- fail:
- msg: "Unsupported ansible version: {{ ansible_version }} found."
- when: ansible_version.full | version_compare('1.9.4', 'lt') or ansible_version.full | version_compare('2.0', 'ge')
- - include_vars: ../../../../byo/openshift-cluster/cluster_hosts.yml
- - add_host:
- name: "{{ item }}"
- groups: l_oo_all_hosts
- with_items: g_all_hosts
-
-- hosts: l_oo_all_hosts
- gather_facts: no
- tasks:
- - include_vars: ../../../../byo/openshift-cluster/cluster_hosts.yml
-
-- include: ../../../../common/openshift-cluster/evaluate_groups.yml
- vars:
- # Do not allow adding hosts during upgrade.
- g_new_master_hosts: []
- g_new_node_hosts: []
- openshift_cluster_id: "{{ cluster_id | default('default') }}"
- openshift_deployment_type: "{{ deployment_type }}"
-- include: ../../../../common/openshift-cluster/upgrades/v3_1_minor/pre.yml
-- include: ../../../../common/openshift-cluster/upgrades/v3_1_minor/upgrade.yml
-- include: ../../../openshift-master/restart.yml
-- include: ../../../../common/openshift-cluster/upgrades/v3_1_minor/post.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_0_to_v3_1/README.md b/playbooks/byo/openshift-cluster/upgrades/v3_2/README.md
index eb1f481d7..30603463a 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_0_to_v3_1/README.md
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_2/README.md
@@ -1,10 +1,12 @@
-# v3.0 to v3.1 upgrade playbook
+# v3.2 Major and Minor Upgrade Playbook
## Overview
This playbook currently performs the
following steps.
* Upgrade and restart master services
+ * Unschedule node.
+ * Upgrade and restart docker
* Upgrade and restart node services
* Modifies the subset of the configuration necessary
* Applies the latest cluster policies
@@ -13,4 +15,4 @@ following steps.
* Updates image streams and quickstarts
## Usage
-ansible-playbook -i ~/ansible-inventory openshift-ansible/playbooks/byo/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml
+ansible-playbook -i ~/ansible-inventory openshift-ansible/playbooks/byo/openshift-cluster/upgrades/v3_2/upgrade.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_1_to_v3_2/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_2/upgrade.yml
index 24617620b..5d549eee7 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_1_to_v3_2/upgrade.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_2/upgrade.yml
@@ -1,13 +1,11 @@
---
+- include: ../../../../common/openshift-cluster/verify_ansible_version.yml
+
- hosts: localhost
connection: local
become: no
gather_facts: no
tasks:
- - name: Verify Ansible version is greater than or equal to 1.9.4 and less than 2.0
- fail:
- msg: "Unsupported ansible version: {{ ansible_version }} found."
- when: ansible_version.full | version_compare('1.9.4', 'lt') or ansible_version.full | version_compare('2.0', 'ge')
- include_vars: ../../../../byo/openshift-cluster/cluster_hosts.yml
- add_host:
name: "{{ item }}"
@@ -49,11 +47,19 @@
openshift_docker_log_options: "{{ lookup('oo_option', 'docker_log_options') }}"
when: openshift_docker_log_options is not defined
-- include: ../../../../common/openshift-cluster/upgrades/v3_1_to_v3_2/pre.yml
+
+# Configure the upgrade target for the common upgrade tasks:
+- hosts: l_oo_all_hosts
+ tasks:
+ - set_fact:
+ openshift_upgrade_target: "{{ '1.2' if deployment_type == 'origin' else '3.2' }}"
+ openshift_upgrade_min: "{{ '1.1' if deployment_type == 'origin' else '3.1' }}"
+
+- include: ../../../../common/openshift-cluster/upgrades/pre.yml
vars:
openshift_deployment_type: "{{ deployment_type }}"
-- include: ../../../../common/openshift-cluster/upgrades/v3_1_to_v3_2/upgrade.yml
+- include: ../../../../common/openshift-cluster/upgrades/upgrade.yml
vars:
openshift_deployment_type: "{{ deployment_type }}"
- include: ../../../openshift-master/restart.yml
-- include: ../../../../common/openshift-cluster/upgrades/v3_1_to_v3_2/post.yml
+- include: ../../../../common/openshift-cluster/upgrades/post.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_1_to_v3_2/README.md b/playbooks/byo/openshift-cluster/upgrades/v3_3/README.md
index 62577c3df..6892f6324 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_1_to_v3_2/README.md
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_3/README.md
@@ -1,10 +1,12 @@
-# v3.1 to v3.2 upgrade playbook
+# v3.3 Major and Minor Upgrade Playbook
## Overview
This playbook currently performs the
following steps.
* Upgrade and restart master services
+ * Unschedule node.
+ * Upgrade and restart docker
* Upgrade and restart node services
* Modifies the subset of the configuration necessary
* Applies the latest cluster policies
@@ -13,4 +15,4 @@ following steps.
* Updates image streams and quickstarts
## Usage
-ansible-playbook -i ~/ansible-inventory openshift-ansible/playbooks/byo/openshift-cluster/upgrades/v3_1_to_v3_2/upgrade.yml
+ansible-playbook -i ~/ansible-inventory openshift-ansible/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml
new file mode 100644
index 000000000..e740b12c0
--- /dev/null
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml
@@ -0,0 +1,67 @@
+---
+- include: ../../../../common/openshift-cluster/verify_ansible_version.yml
+
+- hosts: localhost
+ connection: local
+ become: no
+ gather_facts: no
+ tasks:
+ - include_vars: ../../../../byo/openshift-cluster/cluster_hosts.yml
+ - add_host:
+ name: "{{ item }}"
+ groups: l_oo_all_hosts
+ with_items: g_all_hosts | default([])
+
+- hosts: l_oo_all_hosts
+ gather_facts: no
+ tasks:
+ - include_vars: ../../../../byo/openshift-cluster/cluster_hosts.yml
+
+- include: ../../../../common/openshift-cluster/evaluate_groups.yml
+ vars:
+ # Do not allow adding hosts during upgrade.
+ g_new_master_hosts: []
+ g_new_node_hosts: []
+ openshift_cluster_id: "{{ cluster_id | default('default') }}"
+ openshift_deployment_type: "{{ deployment_type }}"
+
+- name: Set oo_options
+ hosts: oo_all_hosts
+ tasks:
+ - set_fact:
+ openshift_docker_additional_registries: "{{ lookup('oo_option', 'docker_additional_registries') }}"
+ when: openshift_docker_additional_registries is not defined
+ - set_fact:
+ openshift_docker_insecure_registries: "{{ lookup('oo_option', 'docker_insecure_registries') }}"
+ when: openshift_docker_insecure_registries is not defined
+ - set_fact:
+ openshift_docker_blocked_registries: "{{ lookup('oo_option', 'docker_blocked_registries') }}"
+ when: openshift_docker_blocked_registries is not defined
+ - set_fact:
+ openshift_docker_options: "{{ lookup('oo_option', 'docker_options') }}"
+ when: openshift_docker_options is not defined
+ - set_fact:
+ openshift_docker_log_driver: "{{ lookup('oo_option', 'docker_log_driver') }}"
+ when: openshift_docker_log_driver is not defined
+ - set_fact:
+ openshift_docker_log_options: "{{ lookup('oo_option', 'docker_log_options') }}"
+ when: openshift_docker_log_options is not defined
+
+
+# Configure the upgrade target for the common upgrade tasks:
+- hosts: l_oo_all_hosts
+ tasks:
+ - set_fact:
+ openshift_upgrade_target: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}"
+ openshift_upgrade_min: "{{ '1.2' if deployment_type == 'origin' else '3.2' }}"
+
+- include: ../../../../common/openshift-cluster/upgrades/pre.yml
+ vars:
+ openshift_deployment_type: "{{ deployment_type }}"
+- include: ../../../../common/openshift-cluster/upgrades/upgrade.yml
+ vars:
+ openshift_deployment_type: "{{ deployment_type }}"
+ master_config_hook: "v3_3/master_config_upgrade.yml"
+ node_config_hook: "v3_3/node_config_upgrade.yml"
+- include: ../../../openshift-master/restart.yml
+- include: ../../../../common/openshift-cluster/upgrades/post.yml
diff --git a/playbooks/byo/openshift_facts.yml b/playbooks/byo/openshift_facts.yml
index db8703db6..8c0708df0 100644
--- a/playbooks/byo/openshift_facts.yml
+++ b/playbooks/byo/openshift_facts.yml
@@ -1,4 +1,6 @@
---
+- include: ../common/openshift-cluster/verify_ansible_version.yml
+
- hosts: localhost
connection: local
become: no
@@ -8,7 +10,7 @@
- add_host:
name: "{{ item }}"
groups: l_oo_all_hosts
- with_items: g_all_hosts
+ with_items: "{{ g_all_hosts }}"
- hosts: l_oo_all_hosts
gather_facts: no
diff --git a/playbooks/common/openshift-cluster/additional_config.yml b/playbooks/common/openshift-cluster/additional_config.yml
index a34322754..26b31d313 100644
--- a/playbooks/common/openshift-cluster/additional_config.yml
+++ b/playbooks/common/openshift-cluster/additional_config.yml
@@ -1,11 +1,3 @@
-- name: Configure flannel
- hosts: oo_first_master
- vars:
- etcd_urls: "{{ openshift.master.etcd_urls }}"
- roles:
- - role: flannel_register
- when: openshift.common.use_flannel | bool
-
- name: Additional master configuration
hosts: oo_first_master
vars:
@@ -19,14 +11,10 @@
- role: openshift_examples
registry_url: "{{ openshift.master.registry_url }}"
when: openshift.common.install_examples | bool
- - role: openshift_cluster_metrics
- when: openshift.common.use_cluster_metrics | bool
- role: openshift_manageiq
when: openshift.common.use_manageiq | bool
- role: cockpit
when: not openshift.common.is_atomic and ( deployment_type in ['atomic-enterprise','openshift-enterprise'] ) and
- (osm_use_cockpit | bool or osm_use_cockpit is undefined )
+ (osm_use_cockpit | bool or osm_use_cockpit is undefined ) and ( openshift.common.deployment_subtype != 'registry' )
- role: flannel_register
when: openshift.common.use_flannel | bool
-
-
diff --git a/playbooks/common/openshift-cluster/config.yml b/playbooks/common/openshift-cluster/config.yml
index 5fec11541..d6a99fcda 100644
--- a/playbooks/common/openshift-cluster/config.yml
+++ b/playbooks/common/openshift-cluster/config.yml
@@ -1,12 +1,22 @@
---
- include: evaluate_groups.yml
+ tags:
+ - always
- include: initialize_facts.yml
+ tags:
+ - always
- include: validate_hostnames.yml
+ tags:
+ - node
+
+- include: initialize_openshift_version.yml
- name: Set oo_options
hosts: oo_all_hosts
+ tags:
+ - always
tasks:
- set_fact:
openshift_docker_additional_registries: "{{ lookup('oo_option', 'docker_additional_registries') }}"
@@ -28,15 +38,29 @@
when: openshift_docker_log_options is not defined
- include: ../openshift-etcd/config.yml
+ tags:
+ - etcd
- include: ../openshift-nfs/config.yml
+ tags:
+ - nfs
- include: ../openshift-loadbalancer/config.yml
+ tags:
+ - loadbalancer
- include: ../openshift-master/config.yml
+ tags:
+ - master
- include: additional_config.yml
+ tags:
+ - master
- include: ../openshift-node/config.yml
+ tags:
+ - node
- include: openshift_hosted.yml
+ tags:
+ - hosted
diff --git a/playbooks/common/openshift-cluster/enable_dnsmasq.yml b/playbooks/common/openshift-cluster/enable_dnsmasq.yml
index f2bcc872f..4cfe8617e 100644
--- a/playbooks/common/openshift-cluster/enable_dnsmasq.yml
+++ b/playbooks/common/openshift-cluster/enable_dnsmasq.yml
@@ -8,11 +8,12 @@
post_tasks:
- fail: msg="This playbook requires a master version of at least Origin 1.1 or OSE 3.1"
when: not openshift.common.version_gte_3_1_1_or_1_1_1 | bool
-
+
- name: Reconfigure masters to listen on our new dns_port
hosts: oo_masters_to_config
handlers:
- include: ../../../roles/openshift_master/handlers/main.yml
+ static: yes
vars:
os_firewall_allow:
- service: skydns tcp
@@ -43,6 +44,7 @@
hosts: oo_nodes_to_config
handlers:
- include: ../../../roles/openshift_node/handlers/main.yml
+ static: yes
pre_tasks:
- openshift_facts:
role: "{{ item.role }}"
diff --git a/playbooks/common/openshift-cluster/evaluate_groups.yml b/playbooks/common/openshift-cluster/evaluate_groups.yml
index c5273b08f..b3e02fb97 100644
--- a/playbooks/common/openshift-cluster/evaluate_groups.yml
+++ b/playbooks/common/openshift-cluster/evaluate_groups.yml
@@ -35,7 +35,7 @@
groups: oo_all_hosts
ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
ansible_become: "{{ g_sudo | default(omit) }}"
- with_items: g_all_hosts | default([])
+ with_items: "{{ g_all_hosts | default([]) }}"
- name: Evaluate oo_masters
add_host:
@@ -77,7 +77,7 @@
ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
ansible_become: "{{ g_sudo | default(omit) }}"
with_items: "{{ g_master_hosts | default([]) }}"
- when: g_nodeonmaster | default(false) == true and g_new_node_hosts is not defined
+ when: g_nodeonmaster | default(false) | bool and not g_new_node_hosts | default(false) | bool
- name: Evaluate oo_first_etcd
add_host:
diff --git a/playbooks/common/openshift-cluster/initialize_facts.yml b/playbooks/common/openshift-cluster/initialize_facts.yml
index 37f523246..04dde632b 100644
--- a/playbooks/common/openshift-cluster/initialize_facts.yml
+++ b/playbooks/common/openshift-cluster/initialize_facts.yml
@@ -9,3 +9,5 @@
role: common
local_facts:
hostname: "{{ openshift_hostname | default(None) }}"
+ - set_fact:
+ openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}"
diff --git a/playbooks/common/openshift-cluster/initialize_openshift_version.yml b/playbooks/common/openshift-cluster/initialize_openshift_version.yml
new file mode 100644
index 000000000..7112a6084
--- /dev/null
+++ b/playbooks/common/openshift-cluster/initialize_openshift_version.yml
@@ -0,0 +1,16 @@
+---
+# NOTE: requires openshift_facts be run
+- name: Determine openshift_version to configure on first master
+ hosts: oo_first_master
+ roles:
+ - openshift_version
+
+# NOTE: We set this even on etcd hosts as they may also later run as masters,
+# and we don't want to install wrong version of docker and have to downgrade
+# later.
+- name: Set openshift_version for all hosts
+ hosts: oo_all_hosts:!oo_first_master
+ vars:
+ openshift_version: "{{ hostvars[groups.oo_first_master.0].openshift_version }}"
+ roles:
+ - openshift_version
diff --git a/playbooks/common/openshift-cluster/openshift_hosted.yml b/playbooks/common/openshift-cluster/openshift_hosted.yml
index 811b3d685..f65b7a2cd 100644
--- a/playbooks/common/openshift-cluster/openshift_hosted.yml
+++ b/playbooks/common/openshift-cluster/openshift_hosted.yml
@@ -1,13 +1,38 @@
-- name: Create persistent volumes and create hosted services
+---
+- name: Create persistent volumes
hosts: oo_first_master
+ tags:
+ - hosted
vars:
- attach_registry_volume: "{{ openshift.hosted.registry.storage.kind != None }}"
- deploy_infra: "{{ openshift.master.infra_nodes | default([]) | length > 0 }}"
persistent_volumes: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volumes(groups) }}"
persistent_volume_claims: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volume_claims }}"
roles:
- role: openshift_persistent_volumes
when: persistent_volumes | length > 0 or persistent_volume_claims | length > 0
+
+- name: Create Hosted Resources
+ hosts: oo_first_master
+ tags:
+ - hosted
+ pre_tasks:
+ - set_fact:
+ openshift_hosted_router_registryurl: "{{ hostvars[groups.oo_first_master.0].openshift.master.registry_url }}"
+ openshift_hosted_registry_registryurl: "{{ hostvars[groups.oo_first_master.0].openshift.master.registry_url }}"
+ when: "'master' in hostvars[groups.oo_first_master.0].openshift and 'registry_url' in hostvars[groups.oo_first_master.0].openshift.master"
+ roles:
+ - role: openshift_cli
+ - role: openshift_hosted_facts
+ - role: openshift_projects
+ # TODO: Move standard project definitions to openshift_hosted/vars/main.yml
+ # Vars are not accessible in meta/main.yml in ansible-1.9.x
+ openshift_projects: "{{ openshift_additional_projects | default({}) | oo_merge_dicts({'default':{'default_node_selector':''},'openshift-infra':{'default_node_selector':''},'logging':{'default_node_selector':''}}) }}"
+ - role: openshift_serviceaccounts
+ openshift_serviceaccounts_names:
+ - router
+ openshift_serviceaccounts_namespace: default
+ openshift_serviceaccounts_sccs:
+ - hostnetwork
+ when: openshift.common.version_gte_3_2_or_1_2
- role: openshift_serviceaccounts
openshift_serviceaccounts_names:
- router
@@ -15,16 +40,9 @@
openshift_serviceaccounts_namespace: default
openshift_serviceaccounts_sccs:
- privileged
- - role: openshift_registry
- registry_volume_claim: "{{ openshift.hosted.registry.storage.volume.name }}-claim"
- when: deploy_infra | bool and attach_registry_volume | bool
+ when: not openshift.common.version_gte_3_2_or_1_2
+ - role: openshift_hosted
- role: openshift_metrics
when: openshift.hosted.metrics.deploy | bool
-
-- name: Create Hosted Resources
- hosts: oo_first_master
- pre_tasks:
- - set_fact:
- openshift_hosted_router_registryurl: "{{ hostvars[groups.oo_first_master.0].openshift.master.registry_url }}"
- roles:
- - role: openshift_hosted
+ - role: cockpit-ui
+ when: ( openshift.common.deployment_subtype == 'registry' )
diff --git a/playbooks/common/openshift-cluster/redeploy-certificates.yml b/playbooks/common/openshift-cluster/redeploy-certificates.yml
new file mode 100644
index 000000000..b97906072
--- /dev/null
+++ b/playbooks/common/openshift-cluster/redeploy-certificates.yml
@@ -0,0 +1,245 @@
+---
+- include: evaluate_groups.yml
+
+- include: initialize_facts.yml
+
+- include: initialize_openshift_version.yml
+
+- name: Load openshift_facts
+ hosts: oo_etcd_to_config:oo_masters_to_config:oo_nodes_to_config
+ roles:
+ - openshift_facts
+
+- name: Redeploy etcd certificates
+ hosts: oo_etcd_to_config
+ any_errors_fatal: true
+ vars:
+ etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
+ etcd_conf_dir: /etc/etcd
+ etcd_generated_certs_dir: "{{ etcd_conf_dir }}/generated_certs"
+
+ pre_tasks:
+ - stat:
+ path: "{{ etcd_generated_certs_dir }}"
+ register: etcd_generated_certs_dir_stat
+ - name: Backup etcd certificates
+ command: >
+ tar -czvf /etc/etcd/etcd-certificate-backup-{{ ansible_date_time.epoch }}.tgz
+ {{ etcd_conf_dir }}/ca.crt
+ {{ etcd_conf_dir }}/ca
+ {{ etcd_generated_certs_dir }}
+ when: etcd_generated_certs_dir_stat.stat.exists
+ delegate_to: "{{ etcd_ca_host }}"
+ run_once: true
+ - name: Remove existing etcd certificates
+ file:
+ path: "{{ item }}"
+ state: absent
+ with_items:
+ - "{{ etcd_conf_dir }}/ca.crt"
+ - "{{ etcd_conf_dir }}/ca"
+ - "{{ etcd_generated_certs_dir }}"
+ roles:
+ - role: openshift_etcd_server_certificates
+ etcd_peers: "{{ groups.oo_etcd_to_config | default([], true) }}"
+ etcd_certificates_etcd_hosts: "{{ groups.oo_etcd_to_config | default([], true) }}"
+ etcd_certificates_redeploy: true
+
+- name: Redeploy master certificates
+ hosts: oo_masters_to_config
+ any_errors_fatal: true
+ vars:
+ openshift_ca_host: "{{ groups.oo_first_master.0 }}"
+ openshift_master_count: "{{ openshift.master.master_count | default(groups.oo_masters | length) }}"
+ pre_tasks:
+ - stat:
+ path: "{{ openshift_generated_configs_dir }}"
+ register: openshift_generated_configs_dir_stat
+ - name: Backup generated certificate and config directories
+ command: >
+ tar -czvf /etc/origin/master-node-cert-config-backup-{{ ansible_date_time.epoch }}.tgz
+ {{ openshift_generated_configs_dir }}
+ {{ openshift.common.config_base }}/master
+ when: openshift_generated_configs_dir_stat.stat.exists
+ delegate_to: "{{ openshift_ca_host }}"
+ run_once: true
+ - name: Remove generated certificate directories
+ file:
+ path: "{{ item }}"
+ state: absent
+ with_items:
+ - "{{ openshift_generated_configs_dir }}"
+ - name: Remove generated certificates
+ file:
+ path: "{{ openshift.common.config_base }}/master/{{ item }}"
+ state: absent
+ with_items:
+ - "{{ hostvars[inventory_hostname] | certificates_to_synchronize(include_keys=false) }}"
+ - "etcd.server.crt"
+ - "etcd.server.key"
+ - "master.etcd-client.crt"
+ - "master.etcd-client.key"
+ - "master.server.crt"
+ - "master.server.key"
+ - "openshift-master.crt"
+ - "openshift-master.key"
+ - "openshift-master.kubeconfig"
+ - name: Remove CA certificate
+ file:
+ path: "{{ openshift.common.config_base }}/master/{{ item }}"
+ state: absent
+ when: openshift_certificates_redeploy_ca | default(false) | bool
+ with_items:
+ - "ca.crt"
+ - "ca.key"
+ - "ca.serial.txt"
+ - "ca-bundle.crt"
+ roles:
+ - role: openshift_master_certificates
+ openshift_master_etcd_hosts: "{{ hostvars
+ | oo_select_keys(groups['oo_etcd_to_config'] | default([]))
+ | oo_collect('openshift.common.hostname')
+ | default(none, true) }}"
+ openshift_master_hostnames: "{{ hostvars
+ | oo_select_keys(groups['oo_masters_to_config'] | default([]))
+ | oo_collect('openshift.common.all_hostnames')
+ | oo_flatten | unique }}"
+ openshift_certificates_redeploy: true
+ - role: openshift_etcd_client_certificates
+ etcd_certificates_redeploy: true
+ etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
+ etcd_cert_subdir: "openshift-master-{{ openshift.common.hostname }}"
+ etcd_cert_config_dir: "{{ openshift.common.config_base }}/master"
+ etcd_cert_prefix: "master.etcd-"
+ when: groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config
+
+- name: Redeploy node certificates
+ hosts: oo_nodes_to_config
+ any_errors_fatal: true
+ pre_tasks:
+ - name: Remove CA certificate
+ file:
+ path: "{{ item }}"
+ state: absent
+ with_items:
+ - "{{ openshift.common.config_base }}/node/ca.crt"
+ roles:
+ - role: openshift_node_certificates
+ openshift_node_master_api_url: "{{ hostvars[groups.oo_first_master.0].openshift.master.api_url }}"
+ openshift_ca_host: "{{ groups.oo_first_master.0 }}"
+ openshift_certificates_redeploy: true
+
+- name: Restart etcd
+ hosts: oo_etcd_to_config
+ tasks:
+ - name: restart etcd
+ service: name=etcd state=restarted
+
+- name: Stop master services
+ hosts: oo_masters_to_config
+ vars:
+ openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}"
+ tasks:
+ - name: stop master
+ service: name={{ openshift.common.service_type }}-master state=stopped
+ when: not openshift_master_ha | bool
+ - name: stop master api
+ service: name={{ openshift.common.service_type }}-master-api state=stopped
+ when: openshift_master_ha | bool and openshift_master_cluster_method == 'native'
+ - name: stop master controllers
+ service: name={{ openshift.common.service_type }}-master-controllers state=stopped
+ when: openshift_master_ha | bool and openshift_master_cluster_method == 'native'
+
+- name: Start master services
+ hosts: oo_masters_to_config
+ serial: 1
+ vars:
+ openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}"
+ tasks:
+ - name: start master
+ service: name={{ openshift.common.service_type }}-master state=started
+ when: not openshift_master_ha | bool
+ - name: start master api
+ service: name={{ openshift.common.service_type }}-master-api state=started
+ when: openshift_master_ha | bool and openshift_master_cluster_method == 'native'
+ - name: start master controllers
+ service: name={{ openshift.common.service_type }}-master-controllers state=started
+ when: openshift_master_ha | bool and openshift_master_cluster_method == 'native'
+
+- name: Restart masters (pacemaker)
+ hosts: oo_first_master
+ vars:
+ openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}"
+ tasks:
+ - name: restart master
+ command: pcs resource restart master
+ when: openshift_master_ha | bool and openshift_master_cluster_method == 'pacemaker'
+
+- name: Restart nodes
+ hosts: oo_nodes_to_config
+ tasks:
+ - name: restart node
+ service: name={{ openshift.common.service_type }}-node state=restarted
+
+- name: Copy admin client config(s)
+ hosts: oo_first_master
+ tasks:
+ - name: Create temp directory for kubeconfig
+ command: mktemp -d /tmp/openshift-ansible-XXXXXX
+ register: mktemp
+ changed_when: False
+
+ - name: Copy admin client config(s)
+ command: >
+ cp {{ openshift.common.config_base }}/master//admin.kubeconfig {{ mktemp.stdout }}/admin.kubeconfig
+ changed_when: False
+
+- name: Serially evacuate all nodes to trigger redeployments
+ hosts: oo_nodes_to_config
+ serial: 1
+ any_errors_fatal: true
+ tasks:
+ - name: Determine if node is currently scheduleable
+ command: >
+ {{ openshift.common.client_binary }} --config={{ hostvars[groups.oo_first_master.0].mktemp.stdout }}/admin.kubeconfig
+ get node {{ openshift.common.hostname | lower }} -o json
+ register: node_output
+ when: openshift_certificates_redeploy_ca | default(false) | bool
+ delegate_to: "{{ groups.oo_first_master.0 }}"
+ changed_when: false
+
+ - set_fact:
+ was_schedulable: "{{ 'unschedulable' not in (node_output.stdout | from_json).spec }}"
+ when: openshift_certificates_redeploy_ca | default(false) | bool
+
+ - name: Prepare for node evacuation
+ command: >
+ {{ openshift.common.admin_binary }} --config={{ hostvars[groups.oo_first_master.0].mktemp.stdout }}/admin.kubeconfig
+ manage-node {{ openshift.common.hostname | lower }}
+ --schedulable=false
+ delegate_to: "{{ groups.oo_first_master.0 }}"
+ when: openshift_certificates_redeploy_ca | default(false) | bool and was_schedulable | bool
+
+ - name: Evacuate node
+ command: >
+ {{ openshift.common.admin_binary }} --config={{ hostvars[groups.oo_first_master.0].mktemp.stdout }}/admin.kubeconfig
+ manage-node {{ openshift.common.hostname | lower }}
+ --evacuate --force
+ delegate_to: "{{ groups.oo_first_master.0 }}"
+ when: openshift_certificates_redeploy_ca | default(false) | bool and was_schedulable | bool
+
+ - name: Set node schedulability
+ command: >
+ {{ openshift.common.admin_binary }} --config={{ hostvars[groups.oo_first_master.0].mktemp.stdout }}/admin.kubeconfig
+ manage-node {{ openshift.common.hostname | lower }} --schedulable=true
+ delegate_to: "{{ groups.oo_first_master.0 }}"
+ when: openshift_certificates_redeploy_ca | default(false) | bool and was_schedulable | bool
+
+- name: Delete temporary directory
+ hosts: oo_first_master
+ tasks:
+ - name: Delete temp directory
+ file:
+ name: "{{ mktemp.stdout }}"
+ state: absent
+ changed_when: False
diff --git a/playbooks/common/openshift-cluster/upgrades/atomic-openshift-master.j2 b/playbooks/common/openshift-cluster/upgrades/atomic-openshift-master.j2
new file mode 120000
index 000000000..2441f8887
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/atomic-openshift-master.j2
@@ -0,0 +1 @@
+../../../../roles/openshift_master/templates/atomic-openshift-master.j2 \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/containerized_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/containerized_node_upgrade.yml
index 319758a06..32a3636aa 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/containerized_upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/containerized_node_upgrade.yml
@@ -1,7 +1,7 @@
-- include_vars: ../../../../../roles/openshift_node/vars/main.yml
+- include_vars: ../../../../roles/openshift_node/vars/main.yml
- name: Update systemd units
- include: ../../../../../roles/openshift_node/tasks/systemd_units.yml openshift_version=v{{ g_new_version }}
+ include: ../../../../roles/openshift_node/tasks/systemd_units.yml openshift_version={{ openshift_image_tag }}
- name: Verifying the correct version was configured
shell: grep {{ verify_upgrade_version }} {{ item }}
diff --git a/playbooks/common/openshift-cluster/upgrades/docker-cluster b/playbooks/common/openshift-cluster/upgrades/docker-cluster
new file mode 120000
index 000000000..055ad09fc
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/docker-cluster
@@ -0,0 +1 @@
+../../../../roles/openshift_master/templates/docker-cluster \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/docker/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/docker/upgrade.yml
new file mode 100644
index 000000000..03e7b844c
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/docker/upgrade.yml
@@ -0,0 +1,62 @@
+---
+# We need docker service up to remove all the images, but these services will keep
+# trying to re-start and thus re-pull the images we're trying to delete.
+- name: Stop containerized services
+ service: name={{ item }} state=stopped
+ with_items:
+ - "{{ openshift.common.service_type }}-master"
+ - "{{ openshift.common.service_type }}-master-api"
+ - "{{ openshift.common.service_type }}-master-controllers"
+ - "{{ openshift.common.service_type }}-node"
+ - etcd_container
+ - openvswitch
+ failed_when: false
+ when: openshift.common.is_containerized | bool
+
+- name: Check Docker image count
+ shell: "docker images -aq | wc -l"
+ register: docker_image_count
+
+- debug: var=docker_image_count.stdout
+
+- name: Remove all containers and images
+ script: nuke_images.sh docker
+ register: nuke_images_result
+ when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool
+
+- name: Check Docker image count
+ shell: "docker images -aq | wc -l"
+ register: docker_image_count
+ when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool
+
+- debug: var=docker_image_count.stdout
+ when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool
+
+- service: name=docker state=stopped
+
+- name: Upgrade Docker
+ action: "{{ ansible_pkg_mgr }} name=docker{{ '-' + docker_version }} state=present"
+
+- service: name=docker state=started
+
+- name: Restart containerized services
+ service: name={{ item }} state=started
+ with_items:
+ - etcd_container
+ - openvswitch
+ - "{{ openshift.common.service_type }}-master"
+ - "{{ openshift.common.service_type }}-master-api"
+ - "{{ openshift.common.service_type }}-master-controllers"
+ - "{{ openshift.common.service_type }}-node"
+ failed_when: false
+ when: openshift.common.is_containerized | bool
+
+- name: Wait for master API to come back online
+ become: no
+ local_action:
+ module: wait_for
+ host="{{ inventory_hostname }}"
+ state=started
+ delay=10
+ port="{{ openshift.master.api_port }}"
+ when: inventory_hostname in groups.oo_masters_to_config
diff --git a/playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml b/playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml
new file mode 100644
index 000000000..06b3e244f
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml
@@ -0,0 +1,51 @@
+---
+
+# This snippet determines if a Docker upgrade is required by checking the inventory
+# variables, the available packages, and sets l_docker_version to True if so.
+
+- set_fact:
+ docker_upgrade: True
+ when: docker_upgrade is not defined
+
+- name: Check if Docker is installed
+ command: rpm -q docker
+ register: pkg_check
+ failed_when: pkg_check.rc > 1
+ changed_when: no
+
+- name: Get current version of Docker
+ command: "{{ repoquery_cmd }} --installed --qf '%{version}' docker"
+ register: curr_docker_version
+ changed_when: false
+
+- name: Get latest available version of Docker
+ command: >
+ {{ repoquery_cmd }} --qf '%{version}' "docker"
+ register: avail_docker_version
+ failed_when: false
+ changed_when: false
+
+- fail:
+ msg: This playbook requires access to Docker 1.10 or later
+ # Disable the 1.10 requirement if the user set a specific Docker version
+ when: avail_docker_version.stdout | version_compare('1.10','<') and docker_version is not defined
+
+# Default l_docker_upgrade to False, we'll set to True if an upgrade is required:
+- set_fact:
+ l_docker_upgrade: False
+
+# Make sure a docker_verison is set if none was requested:
+- set_fact:
+ docker_version: "{{ avail_docker_version.stdout }}"
+ when: docker_version is not defined
+
+- name: Flag for Docker upgrade if necessary
+ set_fact:
+ l_docker_upgrade: True
+ when: pkg_check.rc == 0 and curr_docker_version.stdout | version_compare(docker_version,'<')
+
+- name: Flag to delete all images prior to upgrade if crossing Docker 1.10 boundary
+ set_fact:
+ docker_upgrade_nuke_images: True
+ when: l_docker_upgrade | bool and docker_upgrade_nuke_images is not defined and curr_docker_version.stdout | version_compare('1.10','<') and docker_version | version_compare('1.10','>=')
+
diff --git a/playbooks/byo/openshift-cluster/upgrades/docker/files/nuke_images.sh b/playbooks/common/openshift-cluster/upgrades/files/nuke_images.sh
index 6b155f7fa..8635eab0d 100644
--- a/playbooks/byo/openshift-cluster/upgrades/docker/files/nuke_images.sh
+++ b/playbooks/common/openshift-cluster/upgrades/files/nuke_images.sh
@@ -15,9 +15,11 @@ then
fi
# Delete all images (forcefully)
-image_ids=`docker images -q`
+image_ids=`docker images -aq`
if test -n "$image_ids"
then
- # Taken from: https://gist.github.com/brianclements/f72b2de8e307c7b56689#gistcomment-1443144
- docker rmi $(docker images | grep "$2/\|/$2 \| $2 \|$2 \|$2-\|$2_" | awk '{print $1 ":" $2}') 2>/dev/null || echo "No images matching \"$2\" left to purge."
+ # Some layers are deleted recursively and are no longer present
+ # when docker goes to remove them:
+ docker rmi -f `docker images -aq` || true
fi
+
diff --git a/playbooks/common/openshift-cluster/upgrades/files/openshift_container_versions.sh b/playbooks/common/openshift-cluster/upgrades/files/openshift_container_versions.sh
deleted file mode 100644
index 9bbeff660..000000000
--- a/playbooks/common/openshift-cluster/upgrades/files/openshift_container_versions.sh
+++ /dev/null
@@ -1,22 +0,0 @@
-#!/bin/bash
-
-# Here we don't really care if this is a master, api, controller or node image.
-# We just need to know the version of one of them.
-unit_file=$(ls /etc/systemd/system/${1}*.service | grep -v node-dep | head -n1)
-
-if [ ${1} == "origin" ]; then
- image_name="openshift/origin"
-elif grep aep $unit_file 2>&1 > /dev/null; then
- image_name="aep3/node"
-elif grep openshift3 $unit_file 2>&1 > /dev/null; then
- image_name="openshift3/node"
-fi
-
-installed=$(docker run --rm --entrypoint=/bin/openshift ${image_name} version 2> /dev/null | grep openshift | awk '{ print $2 }' | cut -f1 -d"-" | tr -d 'v')
-
-docker pull ${image_name} 2>&1 > /dev/null
-available=$(docker run --rm --entrypoint=/bin/openshift ${image_name} version 2> /dev/null | grep openshift | awk '{ print $2 }' | cut -f1 -d"-" | tr -d 'v')
-
-echo "---"
-echo "curr_version: ${installed}"
-echo "avail_version: ${available}"
diff --git a/playbooks/common/openshift-cluster/upgrades/master_docker b/playbooks/common/openshift-cluster/upgrades/master_docker
new file mode 120000
index 000000000..6aeca2842
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/master_docker
@@ -0,0 +1 @@
+../../../../roles/openshift_master/templates/master_docker \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/native-cluster b/playbooks/common/openshift-cluster/upgrades/native-cluster
new file mode 120000
index 000000000..4af88e666
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/native-cluster
@@ -0,0 +1 @@
+../../../../roles/openshift_master/templates/native-cluster \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/openshift.docker.node.dep.service b/playbooks/common/openshift-cluster/upgrades/openshift.docker.node.dep.service
new file mode 120000
index 000000000..add8b7fa9
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/openshift.docker.node.dep.service
@@ -0,0 +1 @@
+../../../../roles/openshift_node/templates/openshift.docker.node.dep.service \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/openshift.docker.node.service b/playbooks/common/openshift-cluster/upgrades/openshift.docker.node.service
new file mode 120000
index 000000000..ed181633d
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/openshift.docker.node.service
@@ -0,0 +1 @@
+../../../../roles/openshift_node/templates/openshift.docker.node.service \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/openvswitch.docker.service b/playbooks/common/openshift-cluster/upgrades/openvswitch.docker.service
new file mode 120000
index 000000000..c21e895f2
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/openvswitch.docker.service
@@ -0,0 +1 @@
+../../../../roles/openshift_node/templates/openvswitch.docker.service \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/openvswitch.sysconfig.j2 b/playbooks/common/openshift-cluster/upgrades/openvswitch.sysconfig.j2
new file mode 120000
index 000000000..ead6904c4
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/openvswitch.sysconfig.j2
@@ -0,0 +1 @@
+../../../../roles/openshift_node/templates/openvswitch.sysconfig.j2 \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/post.yml b/playbooks/common/openshift-cluster/upgrades/post.yml
index c16965a35..e43954453 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/post.yml
+++ b/playbooks/common/openshift-cluster/upgrades/post.yml
@@ -6,8 +6,8 @@
hosts: oo_first_master
vars:
openshift_deployment_type: "{{ deployment_type }}"
- registry_image: "{{ openshift.master.registry_url | replace( '${component}', 'docker-registry' ) | replace ( '${version}', 'v' + g_new_version ) }}"
- router_image: "{{ openshift.master.registry_url | replace( '${component}', 'haproxy-router' ) | replace ( '${version}', 'v' + g_new_version ) }}"
+ registry_image: "{{ openshift.master.registry_url | replace( '${component}', 'docker-registry' ) | replace ( '${version}', openshift_image_tag ) }}"
+ router_image: "{{ openshift.master.registry_url | replace( '${component}', 'haproxy-router' ) | replace ( '${version}', openshift_image_tag ) }}"
oc_cmd: "{{ openshift.common.client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig"
roles:
- openshift_manageiq
@@ -57,3 +57,16 @@
'{"spec":{"template":{"spec":{"containers":[{"name":"registry","image":"{{ registry_image }}"}]}}}}'
--api-version=v1
+# Check for warnings to be printed at the end of the upgrade:
+- name: Check for warnings
+ hosts: oo_masters_to_config
+ tasks:
+ # Check if any masters are using pluginOrderOverride and warn if so, only for 1.3/3.3 and beyond:
+ - command: >
+ grep pluginOrderOverride {{ openshift.common.config_base }}/master/master-config.yaml
+ register: grep_plugin_order_override
+ when: openshift.common.version_gte_3_3_or_1_3 | bool
+ failed_when: false
+ - name: Warn if pluginOrderOverride is in use in master-config.yaml
+ debug: msg="WARNING pluginOrderOverride is being deprecated in master-config.yaml, please see https://docs.openshift.com/enterprise/latest/architecture/additional_concepts/admission_controllers.html for more information."
+ when: not grep_plugin_order_override | skipped and grep_plugin_order_override.rc == 0
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/pre.yml b/playbooks/common/openshift-cluster/upgrades/pre.yml
index f163cca86..42a24eaf8 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/pre.yml
+++ b/playbooks/common/openshift-cluster/upgrades/pre.yml
@@ -2,10 +2,12 @@
###############################################################################
# Evaluate host groups and gather facts
###############################################################################
-- name: Load openshift_facts and update repos
+
+- include: ../initialize_facts.yml
+
+- name: Update repos and initialize facts on all hosts
hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config:oo_lb_to_config
roles:
- - openshift_facts
- openshift_repos
- name: Set openshift_no_proxy_internal_hostnames
@@ -34,10 +36,9 @@
###############################################################################
# Pre-upgrade checks
###############################################################################
-- name: Verify upgrade can proceed
+- name: Verify upgrade can proceed on first master
hosts: oo_first_master
vars:
- target_version: "{{ '1.2' if deployment_type == 'origin' else '3.1.1.900' }}"
g_pacemaker_upgrade_url_segment: "{{ 'org/latest' if deployment_type =='origin' else '.com/enterprise/3.1' }}"
gather_facts: no
tasks:
@@ -53,17 +54,44 @@
https://docs.openshift.{{ g_pacemaker_upgrade_url_segment }}/install_config/upgrading/pacemaker_to_native_ha.html
when: openshift.master.cluster_method is defined and openshift.master.cluster_method == 'pacemaker'
+ # Error out in situations where the user has older versions specified in their
+ # inventory in any of the openshift_release, openshift_image_tag, and
+ # openshift_pkg_version variables. These must be removed or updated to proceed
+ # with upgrade.
+ # TODO: Should we block if you're *over* the next major release version as well?
- fail:
msg: >
openshift_pkg_version is {{ openshift_pkg_version }} which is not a
- valid version for a {{ target_version }} upgrade
- when: openshift_pkg_version is defined and openshift_pkg_version.split('-',1).1 | version_compare(target_version ,'<')
+ valid version for a {{ openshift_upgrade_target }} upgrade
+ when: openshift_pkg_version is defined and openshift_pkg_version.split('-',1).1 | version_compare(openshift_upgrade_target ,'<')
- fail:
msg: >
openshift_image_tag is {{ openshift_image_tag }} which is not a
- valid version for a {{ target_version }} upgrade
- when: openshift_image_tag is defined and openshift_image_tag.split('v',1).1 | version_compare(target_version ,'<')
+ valid version for a {{ openshift_upgrade_target }} upgrade
+ when: openshift_image_tag is defined and openshift_image_tag.split('v',1).1 | version_compare(openshift_upgrade_target ,'<')
+
+ - set_fact:
+ openshift_release: "{{ openshift_release[1:] }}"
+ when: openshift_release is defined and openshift_release[0] == 'v'
+
+ - fail:
+ msg: >
+ openshift_release is {{ openshift_release }} which is not a
+ valid release for a {{ openshift_upgrade_target }} upgrade
+ when: openshift_release is defined and not openshift_release | version_compare(openshift_upgrade_target ,'=')
+
+- include: ../../../common/openshift-cluster/initialize_openshift_version.yml
+ vars:
+ # Request specific openshift_release and let the openshift_version role handle converting this
+ # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
+ # defined, and overriding the normal behavior of protecting the installed version
+ openshift_release: "{{ openshift_upgrade_target }}"
+ openshift_protect_installed_version: False
+ # Docker role (a dependency) should be told not to do anything to installed version
+ # of docker, we handle this separately during upgrade. (the inventory may have a
+ # docker_version defined, we don't want to actually do it until later)
+ docker_protect_installed_version: True
- name: Verify master processes
hosts: oo_masters_to_config
@@ -100,6 +128,7 @@
hosts: oo_nodes_to_config
roles:
- openshift_facts
+ - openshift_docker_facts
tasks:
- name: Ensure Node is running
service:
@@ -111,19 +140,16 @@
- name: Verify upgrade targets
hosts: oo_masters_to_config:oo_nodes_to_config
vars:
- target_version: "{{ '1.2' if deployment_type == 'origin' else '3.1.1.900' }}"
openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}"
- upgrading: True
- handlers:
- - include: ../../../../../roles/openshift_master/handlers/main.yml
- - include: ../../../../../roles/openshift_node/handlers/main.yml
- roles:
- # We want the cli role to evaluate so that the containerized oc/oadm wrappers
- # are modified to use the correct image tag. However, this can trigger a
- # docker restart if new configuration is laid down which would immediately
- # pull the latest image and defeat the purpose of these tasks.
- - { role: openshift_cli }
pre_tasks:
+ - fail:
+ msg: Verify OpenShift is already installed
+ when: openshift.common.version is not defined
+
+ - fail:
+ msg: Verify the correct version was found
+ when: verify_upgrade_version is defined and openshift_version != verify_upgrade_version
+
- name: Clean package cache
command: "{{ ansible_pkg_mgr }} clean all"
when: not openshift.common.is_atomic | bool
@@ -132,93 +158,39 @@
g_new_service_name: "{{ 'origin' if deployment_type =='origin' else 'atomic-openshift' }}"
when: not openshift.common.is_containerized | bool
- - name: Determine available versions
- script: ../files/rpm_versions.sh {{ g_new_service_name }}
- register: g_rpm_versions_result
- when: not openshift.common.is_containerized | bool
-
- - set_fact:
- g_aos_versions: "{{ g_rpm_versions_result.stdout | from_yaml }}"
- when: not openshift.common.is_containerized | bool
-
- - name: Determine available versions
- script: ../files/openshift_container_versions.sh {{ openshift.common.service_type }}
- register: g_containerized_versions_result
- when: openshift.common.is_containerized | bool
-
- - set_fact:
- g_aos_versions: "{{ g_containerized_versions_result.stdout | from_yaml }}"
- when: openshift.common.is_containerized | bool
-
- - set_fact:
- g_new_version: "{{ g_aos_versions.curr_version.split('-', 1).0 if g_aos_versions.avail_version is none else g_aos_versions.avail_version.split('-', 1).0 }}"
- when: openshift_pkg_version is not defined
-
- - set_fact:
- g_new_version: "{{ openshift_pkg_version | replace('-','') }}"
- when: openshift_pkg_version is defined
-
- - set_fact:
- g_new_version: "{{ openshift_image_tag | replace('v','') }}"
- when: openshift_image_tag is defined
-
- - fail:
- msg: Verifying the correct version was found
- when: g_aos_versions.curr_version == ""
-
- - fail:
- msg: Verifying the correct version was found
- when: verify_upgrade_version is defined and g_new_version != verify_upgrade_version
-
- - include_vars: ../../../../../roles/openshift_master/vars/main.yml
- when: inventory_hostname in groups.oo_masters_to_config
-
- - name: Update systemd units
- include: ../../../../../roles/openshift_master/tasks/systemd_units.yml openshift_version=v{{ g_new_version }}
- when: inventory_hostname in groups.oo_masters_to_config
-
- - include_vars: ../../../../../roles/openshift_node/vars/main.yml
- when: inventory_hostname in groups.oo_nodes_to_config
-
- - name: Update systemd units
- include: ../../../../../roles/openshift_node/tasks/systemd_units.yml openshift_version=v{{ g_new_version }}
- when: inventory_hostname in groups.oo_nodes_to_config
-
- # Note: the version number is hardcoded here in hopes of catching potential
- # bugs in how g_aos_versions.curr_version is set
- - name: Verifying the correct version is installed for upgrade
- shell: grep 3.1.1.6 {{ item }}
- with_items:
- - /etc/sysconfig/openvswitch
- - /etc/sysconfig/{{ openshift.common.service_type }}*
- when: verify_upgrade_version is defined
-
- - name: Verifying the image version is used in the systemd unit
- shell: grep IMAGE_VERSION {{ item }}
- with_items:
- - /etc/systemd/system/openvswitch.service
- - /etc/systemd/system/{{ openshift.common.service_type }}*.service
+ - name: Verify containers are available for upgrade
+ command: >
+ docker pull {{ openshift.common.cli_image }}:{{ openshift_image_tag }}
+ register: pull_result
+ changed_when: "'Downloaded newer image' in pull_result.stdout"
when: openshift.common.is_containerized | bool
- - fail:
- msg: This playbook requires Origin 1.1 or later
- when: deployment_type == 'origin' and g_aos_versions.curr_version | version_compare('1.1','<')
+ - name: Check latest available OpenShift RPM version
+ command: >
+ {{ repoquery_cmd }} --qf '%{version}' "{{ openshift.common.service_type }}"
+ failed_when: false
+ changed_when: false
+ register: avail_openshift_version
+ when: not openshift.common.is_containerized | bool
- - fail:
- msg: This playbook requires Atomic Enterprise Platform/OpenShift Enterprise 3.1 or later
- when: deployment_type == 'atomic-openshift' and g_aos_versions.curr_version | version_compare('3.1','<')
+ - name: Verify OpenShift RPMs are available for upgrade
+ fail:
+ msg: "OpenShift {{ avail_openshift_version.stdout }} is available, but {{ openshift_upgrade_target }} or greater is required"
+ when: not openshift.common.is_containerized | bool and not avail_openshift_version | skipped and avail_openshift_version.stdout | default('0.0', True) | version_compare(openshift_release, '<')
- fail:
- msg: Upgrade packages not found
- when: openshift_image_tag is not defined and (g_aos_versions.avail_version | default(g_aos_versions.curr_version, true) | version_compare(target_version, '<'))
+ msg: "This upgrade playbook must be run against OpenShift {{ openshift_upgrade_min }} or later"
+ when: deployment_type == 'origin' and openshift.common.version | version_compare(openshift_upgrade_min,'<')
- name: Verify docker upgrade targets
hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config
tasks:
- - name: Determine available Docker
- script: ../files/rpm_versions.sh docker
- register: g_docker_version_result
- when: not openshift.common.is_atomic | bool
+ # Only check if docker upgrade is required if docker_upgrade is not
+ # already set to False.
+ - include: docker/upgrade_check.yml
+ when: docker_upgrade is not defined or docker_upgrade | bool and not openshift.common.is_atomic | bool
+
+ # Additional checks for Atomic hosts:
- name: Determine available Docker
shell: "rpm -q --queryformat '---\ncurr_version: %{VERSION}\navail_version: \n' docker"
@@ -226,18 +198,12 @@
when: openshift.common.is_atomic | bool
- set_fact:
- g_docker_version: "{{ g_docker_version_result.stdout | from_yaml }}"
- when: not openshift.common.is_atomic | bool
-
- - set_fact:
- g_docker_version: "{{ g_atomic_docker_version_result.stdout | from_yaml }}"
+ l_docker_version: "{{ g_atomic_docker_version_result.stdout | from_yaml }}"
when: openshift.common.is_atomic | bool
- fail:
- msg: This playbook requires access to Docker 1.9 or later
- when: g_docker_version.avail_version | default(g_docker_version.curr_version, true) | version_compare('1.9','<')
-
- # TODO: add check to upgrade ostree to get latest Docker
+ msg: This playbook requires access to Docker 1.10 or later
+ when: openshift.common.is_atomic | bool and l_docker_version.avail_version | default(l_docker_version.curr_version, true) | version_compare('1.10','<')
- set_fact:
pre_upgrade_complete: True
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/rpm_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml
index 5c96ad094..f5e4d807e 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/rpm_upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml
@@ -1,5 +1,6 @@
+# We verified latest rpm available is suitable, so just yum update.
- name: Upgrade packages
- command: "{{ ansible_pkg_mgr}} update -y {{ openshift.common.service_type }}-{{ component }}-{{ g_new_version }}"
+ action: "{{ ansible_pkg_mgr }} name={{ openshift.common.service_type }}-{{ component }}{{ openshift_pkg_version }} state=present"
- name: Ensure python-yaml present for config upgrade
action: "{{ ansible_pkg_mgr }} name=PyYAML state=present"
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/upgrade.yml
index 964257af5..be4e02c4a 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade.yml
@@ -3,48 +3,33 @@
# The restart playbook should be run after this playbook completes.
###############################################################################
-- name: Upgrade docker
- hosts: oo_masters_to_config:oo_nodes_to_config
- roles:
- - openshift_facts
- tasks:
- - include: docker_upgrade.yml
- when: not openshift.common.is_atomic | bool
- - name: Set post docker install facts
- openshift_facts:
- role: "{{ item.role }}"
- local_facts: "{{ item.local_facts }}"
- with_items:
- - role: docker
- local_facts:
- openshift_image_tag: "v{{ g_new_version }}"
- openshift_version: "{{ g_new_version }}"
-
-- name: Upgrade docker
- hosts: oo_etcd_to_config
- roles:
- - openshift_facts
- tasks:
- # Upgrade docker when host is not atomic and host is not a non-containerized etcd node
- - include: docker_upgrade.yml
- when: not openshift.common.is_atomic | bool and not ('oo_etcd_to_config' in group_names and not openshift.common.is_containerized)
-
-# The cli image is used by openshift_docker_facts to determine the currently installed
-# version. We need to explicitly pull the latest image to handle cases where
-# the locally cached 'latest' tag is older the g_new_version.
-- name: Download cli image
- hosts: oo_masters_to_config:oo_nodes_to_config
- roles:
- - { role: openshift_docker_facts }
- vars:
- openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}"
+# Separate step so we can execute in parallel and clear out anything unused
+# before we get into the serialized upgrade process which will then remove
+# remaining images if possible.
+- name: Cleanup unused Docker images
+ hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config
tasks:
- - name: Pull Images
- command: >
- docker pull {{ item }}:latest
- with_items:
- - "{{ openshift.common.cli_image }}"
- when: openshift.common.is_containerized | bool
+ - name: Check Docker image count
+ shell: "docker images -aq | wc -l"
+ register: docker_image_count
+ when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool
+
+ - debug: var=docker_image_count.stdout
+ when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool
+
+ - name: Remove unused Docker images for Docker 1.10+ migration
+ shell: "docker rmi `docker images -aq`"
+ # Will fail on images still in use:
+ failed_when: false
+ when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool
+
+ - name: Check Docker image count
+ shell: "docker images -aq | wc -l"
+ register: docker_image_count
+ when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool
+
+ - debug: var=docker_image_count.stdout
+ when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool
###############################################################################
# Upgrade Masters
@@ -52,17 +37,21 @@
- name: Upgrade master
hosts: oo_masters_to_config
handlers:
- - include: ../../../../../roles/openshift_master/handlers/main.yml
+ - include: ../../../../roles/openshift_master/handlers/main.yml
+ static: yes
roles:
- openshift_facts
tasks:
- include: rpm_upgrade.yml component=master
when: not openshift.common.is_containerized | bool
- - include_vars: ../../../../../roles/openshift_master/vars/main.yml
+ - include: "{{ master_config_hook }}"
+ when: master_config_hook is defined
+
+ - include_vars: ../../../../roles/openshift_master/vars/main.yml
- name: Update systemd units
- include: ../../../../../roles/openshift_master/tasks/systemd_units.yml openshift_version=v{{ g_new_version }}
+ include: ../../../../roles/openshift_master/tasks/systemd_units.yml
# - name: Upgrade master configuration
# openshift_upgrade_config:
@@ -71,6 +60,31 @@
# role: master
# config_base: "{{ hostvars[inventory_hostname].openshift.common.config_base }}"
+ - name: Check for ca-bundle.crt
+ stat:
+ path: "{{ openshift.common.config_base }}/master/ca-bundle.crt"
+ register: ca_bundle_stat
+ failed_when: false
+
+ - name: Check for ca.crt
+ stat:
+ path: "{{ openshift.common.config_base }}/master/ca.crt"
+ register: ca_crt_stat
+ failed_when: false
+
+ - name: Migrate ca.crt to ca-bundle.crt
+ command: mv ca.crt ca-bundle.crt
+ args:
+ chdir: "{{ openshift.common.config_base }}/master"
+ when: ca_crt_stat.stat.isreg and not ca_bundle_stat.stat.exists
+
+ - name: Link ca.crt to ca-bundle.crt
+ file:
+ src: "{{ openshift.common.config_base }}/master/ca-bundle.crt"
+ path: "{{ openshift.common.config_base }}/master/ca.crt"
+ state: link
+ when: ca_crt_stat.stat.isreg and not ca_bundle_stat.stat.exists
+
- name: Set master update status to complete
hosts: oo_masters_to_config
tasks:
@@ -96,52 +110,20 @@
when: master_update_failed | length > 0
###############################################################################
-# Upgrade Nodes
-###############################################################################
-- name: Upgrade nodes
- hosts: oo_nodes_to_config
- serial: 1
- roles:
- - openshift_facts
- handlers:
- - include: ../../../../../roles/openshift_node/handlers/main.yml
- tasks:
- - include: node_upgrade.yml
-
- - set_fact:
- node_update_complete: True
-
-##############################################################################
-# Gate on nodes update
-##############################################################################
-- name: Gate on nodes update
- hosts: localhost
- connection: local
- become: no
- tasks:
- - set_fact:
- node_update_completed: "{{ hostvars
- | oo_select_keys(groups.oo_nodes_to_config)
- | oo_collect('inventory_hostname', {'node_update_complete': true}) }}"
- - set_fact:
- node_update_failed: "{{ groups.oo_nodes_to_config | difference(node_update_completed) }}"
- - fail:
- msg: "Upgrade cannot continue. The following nodes did not finish updating: {{ node_update_failed | join(',') }}"
- when: node_update_failed | length > 0
-
-###############################################################################
# Reconcile Cluster Roles, Cluster Role Bindings and Security Context Constraints
###############################################################################
- name: Reconcile Cluster Roles and Cluster Role Bindings and Security Context Constraints
hosts: oo_masters_to_config
roles:
- - { role: openshift_cli, openshift_image_tag: "v{{ g_new_version }}" }
+ - { role: openshift_cli }
vars:
- origin_reconcile_bindings: "{{ deployment_type == 'origin' and g_new_version | version_compare('1.0.6', '>') }}"
+ origin_reconcile_bindings: "{{ deployment_type == 'origin' and openshift_version | version_compare('1.0.6', '>') }}"
ent_reconcile_bindings: true
openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}"
- upgrading: True
+ # Similar to pre.yml, we don't want to upgrade docker during the openshift_cli role,
+ # it will be updated when we perform node upgrade.
+ docker_protect_installed_version: True
tasks:
- name: Verifying the correct commandline tools are available
shell: grep {{ verify_upgrade_version }} {{ openshift.common.admin_binary}}
@@ -173,6 +155,57 @@
- set_fact:
reconcile_complete: True
+###############################################################################
+# Upgrade Nodes
+###############################################################################
+
+# Here we handle all tasks that might require a node evac. (upgrading docker, and the node service)
+- name: Perform upgrades that may require node evacuation
+ hosts: oo_masters_to_config:oo_etcd_to_config:oo_nodes_to_config
+ serial: 1
+ any_errors_fatal: true
+ roles:
+ - openshift_facts
+ handlers:
+ - include: ../../../../roles/openshift_node/handlers/main.yml
+ static: yes
+ tasks:
+ # TODO: To better handle re-trying failed upgrades, it would be nice to check if the node
+ # or docker actually needs an upgrade before proceeding. Perhaps best to save this until
+ # we merge upgrade functionality into the base roles and a normal config.yml playbook run.
+ - name: Mark unschedulable if host is a node
+ command: >
+ {{ openshift.common.admin_binary }} manage-node {{ openshift.common.hostname | lower }} --schedulable=false
+ delegate_to: "{{ groups.oo_first_master.0 }}"
+ when: inventory_hostname in groups.oo_nodes_to_config
+
+ - name: Evacuate Node for Kubelet upgrade
+ command: >
+ {{ openshift.common.admin_binary }} manage-node {{ openshift.common.hostname | lower }} --evacuate --force
+ delegate_to: "{{ groups.oo_first_master.0 }}"
+ when: inventory_hostname in groups.oo_nodes_to_config
+
+ - include: docker/upgrade.yml
+ when: l_docker_upgrade is defined and l_docker_upgrade | bool and not openshift.common.is_atomic | bool
+ - include: "{{ node_config_hook }}"
+ when: node_config_hook is defined and inventory_hostname in groups.oo_nodes_to_config
+
+ - include: rpm_upgrade.yml
+ vars:
+ component: "node"
+ openshift_version: "{{ openshift_pkg_version | default('') }}"
+ when: inventory_hostname in groups.oo_nodes_to_config and not openshift.common.is_containerized | bool
+
+ - include: containerized_node_upgrade.yml
+ when: inventory_hostname in groups.oo_nodes_to_config and openshift.common.is_containerized | bool
+
+ - name: Set node schedulability
+ command: >
+ {{ openshift.common.admin_binary }} manage-node {{ openshift.common.hostname | lower }} --schedulable=true
+ delegate_to: "{{ groups.oo_first_master.0 }}"
+ when: inventory_hostname in groups.oo_nodes_to_config and openshift.node.schedulable | bool
+
+
##############################################################################
# Gate on reconcile
##############################################################################
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_0_minor/filter_plugins b/playbooks/common/openshift-cluster/upgrades/v3_0_minor/filter_plugins
deleted file mode 120000
index 27ddaa18b..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_0_minor/filter_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_0_minor/library b/playbooks/common/openshift-cluster/upgrades/v3_0_minor/library
deleted file mode 120000
index 53bed9684..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_0_minor/library
+++ /dev/null
@@ -1 +0,0 @@
-../library \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_0_minor/lookup_plugins b/playbooks/common/openshift-cluster/upgrades/v3_0_minor/lookup_plugins
deleted file mode 120000
index cf407f69b..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_0_minor/lookup_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../../../lookup_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_0_minor/roles b/playbooks/common/openshift-cluster/upgrades/v3_0_minor/roles
deleted file mode 120000
index 6bc1a7aef..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_0_minor/roles
+++ /dev/null
@@ -1 +0,0 @@
-../../../../../roles \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_0_minor/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_0_minor/upgrade.yml
deleted file mode 100644
index e31e7f8a3..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_0_minor/upgrade.yml
+++ /dev/null
@@ -1,114 +0,0 @@
----
-- name: Evaluate groups
- include: ../../evaluate_groups.yml
-
-- name: Re-Run cluster configuration to apply latest configuration changes
- include: ../../config.yml
-
-- name: Upgrade masters
- hosts: oo_masters_to_config
- vars:
- openshift_version: "{{ openshift_pkg_version | default('') }}"
- tasks:
- - name: Upgrade master packages
- action: "{{ ansible_pkg_mgr }} name={{ openshift.common.service_type }}-master{{ openshift_version }} state=latest"
- - name: Restart master services
- service: name="{{ openshift.common.service_type}}-master" state=restarted
-
-- name: Upgrade nodes
- hosts: oo_nodes_to_config
- vars:
- openshift_version: "{{ openshift_pkg_version | default('') }}"
- tasks:
- - name: Upgrade node packages
- action: "{{ ansible_pkg_mgr }} name={{ openshift.common.service_type }}-node{{ openshift_version }} state=latest"
- - name: Restart node services
- service: name="{{ openshift.common.service_type }}-node" state=restarted
-
-- name: Determine new master version
- hosts: oo_first_master
- tasks:
- - name: Determine new version
- command: >
- rpm -q --queryformat '%{version}' {{ openshift.common.service_type }}-master
- register: _new_version
-
-- name: Ensure AOS 3.0.2 or Origin 1.0.6
- hosts: oo_first_master
- tasks:
- - fail:
- msg: "This playbook requires Origin 1.0.6 or Atomic OpenShift 3.0.2 or later"
- when: _new_version.stdout | version_compare('1.0.6','<') or ( _new_version.stdout | version_compare('3.0','>=' and _new_version.stdout | version_compare('3.0.2','<') )
-
-- name: Update cluster policy
- hosts: oo_first_master
- tasks:
- - name: oadm policy reconcile-cluster-roles --additive-only=true --confirm
- command: >
- {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig
- policy reconcile-cluster-roles --additive-only=true --confirm
-
-- name: Upgrade default router
- hosts: oo_first_master
- vars:
- - router_image: "{{ openshift.master.registry_url | replace( '${component}', 'haproxy-router' ) | replace ( '${version}', 'v' + _new_version.stdout ) }}"
- - oc_cmd: "{{ openshift.common.client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig"
- tasks:
- - name: Check for default router
- command: >
- {{ oc_cmd }} get -n default dc/router
- register: _default_router
- failed_when: false
- changed_when: false
- - name: Check for allowHostNetwork and allowHostPorts
- when: _default_router.rc == 0
- shell: >
- {{ oc_cmd }} get -o yaml scc/privileged | /usr/bin/grep -e allowHostPorts -e allowHostNetwork
- register: _scc
- - name: Grant allowHostNetwork and allowHostPorts
- when:
- - _default_router.rc == 0
- - "'false' in _scc.stdout"
- command: >
- {{ oc_cmd }} patch scc/privileged -p '{"allowHostPorts":true,"allowHostNetwork":true}' --loglevel=9
- - name: Update deployment config to 1.0.4/3.0.1 spec
- when: _default_router.rc == 0
- command: >
- {{ oc_cmd }} patch dc/router -p
- '{"spec":{"strategy":{"rollingParams":{"updatePercent":-10},"spec":{"serviceAccount":"router","serviceAccountName":"router"}}}}'
- - name: Switch to hostNetwork=true
- when: _default_router.rc == 0
- command: >
- {{ oc_cmd }} patch dc/router -p '{"spec":{"template":{"spec":{"hostNetwork":true}}}}'
- - name: Update router image to current version
- when: _default_router.rc == 0
- command: >
- {{ oc_cmd }} patch dc/router -p
- '{"spec":{"template":{"spec":{"containers":[{"name":"router","image":"{{ router_image }}"}]}}}}'
-
-- name: Upgrade default
- hosts: oo_first_master
- vars:
- - registry_image: "{{ openshift.master.registry_url | replace( '${component}', 'docker-registry' ) | replace ( '${version}', 'v' + _new_version.stdout ) }}"
- - oc_cmd: "{{ openshift.common.client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig"
- tasks:
- - name: Check for default registry
- command: >
- {{ oc_cmd }} get -n default dc/docker-registry
- register: _default_registry
- failed_when: false
- changed_when: false
- - name: Update registry image to current version
- when: _default_registry.rc == 0
- command: >
- {{ oc_cmd }} patch dc/docker-registry -p
- '{"spec":{"template":{"spec":{"containers":[{"name":"registry","image":"{{ registry_image }}"}]}}}}'
-
-- name: Update image streams and templates
- hosts: oo_first_master
- vars:
- openshift_examples_import_command: "update"
- openshift_deployment_type: "{{ deployment_type }}"
- registry_url: "{{ openshift.master.registry_url }}"
- roles:
- - openshift_examples
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/filter_plugins b/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/filter_plugins
deleted file mode 120000
index 27ddaa18b..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/filter_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/library b/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/library
deleted file mode 120000
index 53bed9684..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/library
+++ /dev/null
@@ -1 +0,0 @@
-../library \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/lookup_plugins b/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/lookup_plugins
deleted file mode 120000
index cf407f69b..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/lookup_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../../../lookup_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/roles b/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/roles
deleted file mode 120000
index 6bc1a7aef..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/roles
+++ /dev/null
@@ -1 +0,0 @@
-../../../../../roles \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml
deleted file mode 100644
index c3c1240d8..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml
+++ /dev/null
@@ -1,646 +0,0 @@
----
-###############################################################################
-# Evaluate host groups and gather facts
-###############################################################################
-- name: Evaluate host groups
- include: ../../evaluate_groups.yml
-
-- name: Load openshift_facts
- hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config:oo_lb_to_config
- roles:
- - openshift_facts
-
-- name: Evaluate additional groups for upgrade
- hosts: localhost
- connection: local
- become: no
- tasks:
- - name: Evaluate etcd_hosts_to_backup
- add_host:
- name: "{{ item }}"
- groups: etcd_hosts_to_backup
- with_items: groups.oo_etcd_to_config if groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config | length > 0 else groups.oo_first_master
-
-
-###############################################################################
-# Pre-upgrade checks
-###############################################################################
-- name: Verify upgrade can proceed
- hosts: oo_first_master
- vars:
- openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}"
- target_version: "{{ '1.1' if deployment_type == 'origin' else '3.1' }}"
- gather_facts: no
- tasks:
- # Pacemaker is currently the only supported upgrade path for multiple masters
- - fail:
- msg: "openshift_master_cluster_method must be set to 'pacemaker'"
- when: openshift_master_ha | bool and ((openshift_master_cluster_method is not defined) or (openshift_master_cluster_method is defined and openshift_master_cluster_method != "pacemaker"))
-
- - fail:
- msg: >
- This upgrade is only supported for origin, openshift-enterprise, and online
- deployment types
- when: deployment_type not in ['origin','openshift-enterprise', 'online']
-
- - fail:
- msg: >
- openshift_pkg_version is {{ openshift_pkg_version }} which is not a
- valid version for a {{ target_version }} upgrade
- when: openshift_pkg_version is defined and openshift_pkg_version.split('-',1).1 | version_compare(target_version ,'<')
-
- # If this script errors out ansible will show the default stdout/stderr
- # which contains details for the user:
- - script: ../files/pre-upgrade-check
-
-
-- name: Verify upgrade targets
- hosts: oo_masters_to_config:oo_nodes_to_config
- vars:
- target_version: "{{ '1.1' if deployment_type == 'origin' else '3.1' }}"
- tasks:
- - name: Clean package cache
- command: "{{ ansible_pkg_mgr }} clean all"
-
- - set_fact:
- g_new_service_name: "{{ 'origin' if deployment_type =='origin' else 'atomic-openshift' }}"
-
- - name: Determine available versions
- script: ../files/rpm_versions.sh {{ g_new_service_name }} openshift
- register: g_versions_result
-
- - set_fact:
- g_aos_versions: "{{ g_versions_result.stdout | from_yaml }}"
-
- - set_fact:
- g_new_version: "{{ g_aos_versions.curr_version.split('-', 1).0 if g_aos_versions.avail_version is none else g_aos_versions.avail_version.split('-', 1).0 }}"
- when: openshift_pkg_version is not defined
-
- - set_fact:
- g_new_version: "{{ openshift_pkg_version | replace('-','') }}"
- when: openshift_pkg_version is defined
-
- - fail:
- msg: This playbook requires Origin 1.0.6 or later
- when: deployment_type == 'origin' and g_aos_versions.curr_version | version_compare('1.0.6','<')
-
- - fail:
- msg: Upgrade packages not found
- when: (g_aos_versions.avail_version | default(g_aos_versions.curr_version, true) | version_compare(target_version, '<'))
-
- - set_fact:
- pre_upgrade_complete: True
-
-
-##############################################################################
-# Gate on pre-upgrade checks
-##############################################################################
-- name: Gate on pre-upgrade checks
- hosts: localhost
- connection: local
- become: no
- vars:
- pre_upgrade_hosts: "{{ groups.oo_masters_to_config | union(groups.oo_nodes_to_config) }}"
- tasks:
- - set_fact:
- pre_upgrade_completed: "{{ hostvars
- | oo_select_keys(pre_upgrade_hosts)
- | oo_collect('inventory_hostname', {'pre_upgrade_complete': true}) }}"
- - set_fact:
- pre_upgrade_failed: "{{ pre_upgrade_hosts | difference(pre_upgrade_completed) }}"
- - fail:
- msg: "Upgrade cannot continue. The following hosts did not complete pre-upgrade checks: {{ pre_upgrade_failed | join(',') }}"
- when: pre_upgrade_failed | length > 0
-
-
-
-###############################################################################
-# Backup etcd
-###############################################################################
-- name: Backup etcd
- hosts: etcd_hosts_to_backup
- vars:
- embedded_etcd: "{{ openshift.master.embedded_etcd }}"
- timestamp: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}"
- roles:
- - openshift_facts
- tasks:
- # Ensure we persist the etcd role for this host in openshift_facts
- - openshift_facts:
- role: etcd
- local_facts: {}
- when: "'etcd' not in openshift"
-
- - stat: path=/var/lib/openshift
- register: var_lib_openshift
-
- - stat: path=/var/lib/origin
- register: var_lib_origin
-
- - name: Create origin symlink if necessary
- file: src=/var/lib/openshift/ dest=/var/lib/origin state=link
- when: var_lib_openshift.stat.exists == True and var_lib_origin.stat.exists == False
-
- # TODO: replace shell module with command and update later checks
- # We assume to be using the data dir for all backups.
- - name: Check available disk space for etcd backup
- shell: df --output=avail -k {{ openshift.common.data_dir }} | tail -n 1
- register: avail_disk
-
- # TODO: replace shell module with command and update later checks
- - name: Check current embedded etcd disk usage
- shell: du -k {{ openshift.etcd.etcd_data_dir }} | tail -n 1 | cut -f1
- register: etcd_disk_usage
- when: embedded_etcd | bool
-
- - name: Abort if insufficient disk space for etcd backup
- fail:
- msg: >
- {{ etcd_disk_usage.stdout }} Kb disk space required for etcd backup,
- {{ avail_disk.stdout }} Kb available.
- when: (embedded_etcd | bool) and (etcd_disk_usage.stdout|int > avail_disk.stdout|int)
-
- - name: Install etcd (for etcdctl)
- action: "{{ ansible_pkg_mgr }} name=etcd state=latest"
-
- - name: Generate etcd backup
- command: >
- etcdctl backup --data-dir={{ openshift.etcd.etcd_data_dir }}
- --backup-dir={{ openshift.common.data_dir }}/etcd-backup-{{ timestamp }}
-
- - set_fact:
- etcd_backup_complete: True
-
- - name: Display location of etcd backup
- debug:
- msg: "Etcd backup created in {{ openshift.common.data_dir }}/etcd-backup-{{ timestamp }}"
-
-
-##############################################################################
-# Gate on etcd backup
-##############################################################################
-- name: Gate on etcd backup
- hosts: localhost
- connection: local
- become: no
- tasks:
- - set_fact:
- etcd_backup_completed: "{{ hostvars
- | oo_select_keys(groups.etcd_hosts_to_backup)
- | oo_collect('inventory_hostname', {'etcd_backup_complete': true}) }}"
- - set_fact:
- etcd_backup_failed: "{{ groups.etcd_hosts_to_backup | difference(etcd_backup_completed) }}"
- - fail:
- msg: "Upgrade cannot continue. The following hosts did not complete etcd backup: {{ etcd_backup_failed | join(',') }}"
- when: etcd_backup_failed | length > 0
-
-
-
-###############################################################################
-# Upgrade Masters
-###############################################################################
-- name: Create temp directory for syncing certs
- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- tasks:
- - name: Create local temp directory for syncing certs
- local_action: command mktemp -d /tmp/openshift-ansible-XXXXXXX
- register: g_master_mktemp
- changed_when: False
-
-- name: Update deployment type
- hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config
- vars:
- openshift_deployment_type: "{{ deployment_type }}"
- roles:
- - openshift_facts
-
-- name: Update master facts
- hosts: oo_masters_to_config
- roles:
- - openshift_facts
- post_tasks:
- - openshift_facts:
- role: master
- local_facts:
- cluster_method: "{{ openshift_master_cluster_method | default(None) }}"
-
-- name: Upgrade master packages and configuration
- hosts: oo_masters_to_config
- vars:
- openshift_version: "{{ openshift_pkg_version | default('') }}"
- roles:
- - openshift_facts
- tasks:
- - name: Upgrade to latest available kernel
- action: "{{ ansible_pkg_mgr}} name=kernel state=latest"
-
- - name: Upgrade master packages
- command: "{{ ansible_pkg_mgr}} update -y {{ openshift.common.service_type }}-master{{ openshift_version }}"
- when: openshift_pkg_version is not defined
-
- - name: Upgrade packages
- command: "{{ ansible_pkg_mgr}} install -y {{ openshift.common.installed_variant_rpms | oo_31_rpm_rename_conversion(openshift_version) | join (' ')}}"
- when: openshift_pkg_version is defined and deployment_type == 'openshift-enterprise'
-
- - name: Ensure python-yaml present for config upgrade
- action: "{{ ansible_pkg_mgr }} name=PyYAML state=present"
- when: not openshift.common.is_atomic | bool
-
- - name: Upgrade master configuration
- openshift_upgrade_config:
- from_version: '3.0'
- to_version: '3.1'
- role: master
- config_base: "{{ hostvars[inventory_hostname].openshift.common.config_base }}"
-
- - set_fact:
- openshift_master_certs_no_etcd:
- - admin.crt
- - master.kubelet-client.crt
- - "{{ 'master.proxy-client.crt' if openshift.common.version_gte_3_1_or_1_1 else omit }}"
- - master.server.crt
- - openshift-master.crt
- - openshift-registry.crt
- - openshift-router.crt
- - etcd.server.crt
- openshift_master_certs_etcd:
- - master.etcd-client.crt
-
- - set_fact:
- openshift_master_certs: "{{ (openshift_master_certs_no_etcd | union(openshift_master_certs_etcd)) if (groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config) else openshift_master_certs_no_etcd }}"
-
- - name: Check status of master certificates
- stat:
- path: "{{ openshift.common.config_base }}/master/{{ item }}"
- with_items: openshift_master_certs
- register: g_master_cert_stat_result
-
- - set_fact:
- master_certs_missing: "{{ False in (g_master_cert_stat_result.results
- | oo_collect(attribute='stat.exists')
- | list ) }}"
- master_cert_subdir: master-{{ openshift.common.hostname }}
- master_cert_config_dir: "{{ openshift.common.config_base }}/master"
-
-
-- name: Generate missing master certificates
- hosts: oo_first_master
- vars:
- master_hostnames: "{{ hostvars
- | oo_select_keys(groups.oo_masters_to_config)
- | oo_collect('openshift.common.all_hostnames')
- | oo_flatten | unique }}"
- master_generated_certs_dir: "{{ openshift.common.config_base }}/generated-configs"
- masters_needing_certs: "{{ hostvars
- | oo_select_keys(groups['oo_masters_to_config'] | difference(groups['oo_first_master']))
- | oo_filter_list(filter_attr='master_certs_missing') }}"
- sync_tmpdir: "{{ hostvars.localhost.g_master_mktemp.stdout }}"
- openshift_deployment_type: "{{ deployment_type }}"
- roles:
- - openshift_master_certificates
- post_tasks:
- - name: Remove generated etcd client certs when using external etcd
- file:
- path: "{{ master_generated_certs_dir }}/{{ item.0.master_cert_subdir }}/{{ item.1 }}"
- state: absent
- when: groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config
- with_nested:
- - masters_needing_certs
- - - master.etcd-client.crt
- - master.etcd-client.key
-
- - name: Create a tarball of the master certs
- command: >
- tar -czvf {{ master_generated_certs_dir }}/{{ item.master_cert_subdir }}.tgz
- -C {{ master_generated_certs_dir }}/{{ item.master_cert_subdir }} .
- with_items: masters_needing_certs
-
- - name: Retrieve the master cert tarball from the master
- fetch:
- src: "{{ master_generated_certs_dir }}/{{ item.master_cert_subdir }}.tgz"
- dest: "{{ sync_tmpdir }}/"
- flat: yes
- fail_on_missing: yes
- validate_checksum: yes
- with_items: masters_needing_certs
-
-
-- name: Sync generated certs, update service config and restart master services
- hosts: oo_masters_to_config
- vars:
- sync_tmpdir: "{{ hostvars.localhost.g_master_mktemp.stdout }}"
- openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}"
- openshift_deployment_type: "{{ deployment_type }}"
- tasks:
- - name: Unarchive the tarball on the master
- unarchive:
- src: "{{ sync_tmpdir }}/{{ master_cert_subdir }}.tgz"
- dest: "{{ master_cert_config_dir }}"
- when: inventory_hostname != groups.oo_first_master.0
-
- - name: Restart master service
- service: name="{{ openshift.common.service_type}}-master" state=restarted
- when: not openshift_master_ha | bool
-
- - name: Ensure the master service is enabled
- service: name="{{ openshift.common.service_type}}-master" state=started enabled=yes
- when: not openshift_master_ha | bool
-
- - name: Check for configured cluster
- stat:
- path: /etc/corosync/corosync.conf
- register: corosync_conf
- when: openshift_master_ha | bool
-
- - name: Destroy cluster
- command: pcs cluster destroy --all
- when: openshift_master_ha | bool and corosync_conf.stat.exists == true
- run_once: true
-
- - name: Start pcsd
- service: name=pcsd enabled=yes state=started
- when: openshift_master_ha | bool
-
-
-- name: Re-create cluster
- hosts: oo_first_master
- vars:
- openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}"
- openshift_deployment_type: "{{ deployment_type }}"
- omc_cluster_hosts: "{{ groups.oo_masters_to_config | join(' ') }}"
- roles:
- - role: openshift_master_cluster
- when: openshift_master_ha | bool
-
-
-- name: Delete temporary directory on localhost
- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- tasks:
- - file: name={{ g_master_mktemp.stdout }} state=absent
- changed_when: False
-
-
-- name: Set master update status to complete
- hosts: oo_masters_to_config
- tasks:
- - set_fact:
- master_update_complete: True
-
-
-##############################################################################
-# Gate on master update complete
-##############################################################################
-- name: Gate on master update
- hosts: localhost
- connection: local
- become: no
- tasks:
- - set_fact:
- master_update_completed: "{{ hostvars
- | oo_select_keys(groups.oo_masters_to_config)
- | oo_collect('inventory_hostname', {'master_update_complete': true}) }}"
- - set_fact:
- master_update_failed: "{{ groups.oo_masters_to_config | difference(master_update_completed) }}"
- - fail:
- msg: "Upgrade cannot continue. The following masters did not finish updating: {{ master_update_failed | join(',') }}"
- when: master_update_failed | length > 0
-
-
-###############################################################################
-# Upgrade Nodes
-###############################################################################
-- name: Upgrade nodes
- hosts: oo_nodes_to_config
- vars:
- openshift_version: "{{ openshift_pkg_version | default('') }}"
- roles:
- - openshift_facts
- tasks:
- - name: Upgrade node packages
- command: "{{ ansible_pkg_mgr }} update -y {{ openshift.common.service_type }}-node{{ openshift_version }}"
- when: openshift_pkg_version is not defined
-
- - name: Upgrade packages
- command: "{{ ansible_pkg_mgr}} install -y {{ openshift.common.installed_variant_rpms | oo_31_rpm_rename_conversion(openshift_version) | join (' ')}}"
- when: openshift_pkg_version is defined and deployment_type == 'openshift-enterprise'
-
- - name: Restart node service
- service: name="{{ openshift.common.service_type }}-node" state=restarted
-
- - name: Ensure node service enabled
- service: name="{{ openshift.common.service_type }}-node" state=started enabled=yes
-
- - name: Install Ceph storage plugin dependencies
- action: "{{ ansible_pkg_mgr }} name=ceph-common state=present"
-
- - name: Install GlusterFS storage plugin dependencies
- action: "{{ ansible_pkg_mgr }} name=glusterfs-fuse state=present"
-
- - name: Set sebooleans to allow gluster storage plugin access from containers
- seboolean:
- name: "{{ item }}"
- state: yes
- persistent: yes
- when: ansible_selinux and ansible_selinux.status == "enabled"
- with_items:
- - virt_use_fusefs
- - virt_sandbox_use_fusefs
- register: sebool_result
- failed_when: "'state' not in sebool_result and 'msg' in sebool_result and 'SELinux boolean {{ item }} does not exist' not in sebool_result.msg"
-
- - set_fact:
- node_update_complete: True
-
-
-##############################################################################
-# Gate on nodes update
-##############################################################################
-- name: Gate on nodes update
- hosts: localhost
- connection: local
- become: no
- tasks:
- - set_fact:
- node_update_completed: "{{ hostvars
- | oo_select_keys(groups.oo_nodes_to_config)
- | oo_collect('inventory_hostname', {'node_update_complete': true}) }}"
- - set_fact:
- node_update_failed: "{{ groups.oo_nodes_to_config | difference(node_update_completed) }}"
- - fail:
- msg: "Upgrade cannot continue. The following nodes did not finish updating: {{ node_update_failed | join(',') }}"
- when: node_update_failed | length > 0
-
-
-###############################################################################
-# Post upgrade - Reconcile Cluster Roles and Cluster Role Bindings
-###############################################################################
-- name: Reconcile Cluster Roles and Cluster Role Bindings
- hosts: oo_masters_to_config
- vars:
- origin_reconcile_bindings: "{{ deployment_type == 'origin' and g_new_version | version_compare('1.0.6', '>') }}"
- ent_reconcile_bindings: true
- openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}"
- tasks:
- - name: Reconcile Cluster Roles
- command: >
- {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig
- policy reconcile-cluster-roles --additive-only=true --confirm
- run_once: true
-
- - name: Reconcile Cluster Role Bindings
- command: >
- {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig
- policy reconcile-cluster-role-bindings
- --exclude-groups=system:authenticated
- --exclude-groups=system:authenticated:oauth
- --exclude-groups=system:unauthenticated
- --exclude-users=system:anonymous
- --additive-only=true --confirm
- when: origin_reconcile_bindings | bool or ent_reconcile_bindings | bool
- run_once: true
-
- - name: Restart master services
- service: name="{{ openshift.common.service_type}}-master" state=restarted
- when: not openshift_master_ha | bool
-
- - name: Restart master cluster
- command: pcs resource restart master
- when: openshift_master_ha | bool
- run_once: true
-
- - name: Wait for the clustered master service to be available
- wait_for:
- host: "{{ openshift_master_cluster_vip }}"
- port: 8443
- state: started
- timeout: 180
- delay: 90
- when: openshift_master_ha | bool
- run_once: true
-
- - set_fact:
- reconcile_complete: True
-
-
-##############################################################################
-# Gate on reconcile
-##############################################################################
-- name: Gate on reconcile
- hosts: localhost
- connection: local
- become: no
- tasks:
- - set_fact:
- reconcile_completed: "{{ hostvars
- | oo_select_keys(groups.oo_masters_to_config)
- | oo_collect('inventory_hostname', {'reconcile_complete': true}) }}"
- - set_fact:
- reconcile_failed: "{{ groups.oo_masters_to_config | difference(reconcile_completed) }}"
- - fail:
- msg: "Upgrade cannot continue. The following masters did not finish reconciling: {{ reconcile_failed | join(',') }}"
- when: reconcile_failed | length > 0
-
-
-
-
-###############################################################################
-# Post upgrade - Upgrade default router, default registry and examples
-###############################################################################
-- name: Upgrade default router and default registry
- hosts: oo_first_master
- vars:
- openshift_deployment_type: "{{ deployment_type }}"
- registry_image: "{{ openshift.master.registry_url | replace( '${component}', 'docker-registry' ) | replace ( '${version}', 'v' + g_new_version ) }}"
- router_image: "{{ openshift.master.registry_url | replace( '${component}', 'haproxy-router' ) | replace ( '${version}', 'v' + g_new_version ) }}"
- oc_cmd: "{{ openshift.common.client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig"
- roles:
- # Create the new templates shipped in 3.1, existing templates are left
- # unmodified. This prevents the subsequent role definition for
- # openshift_examples from failing when trying to replace templates that do
- # not already exist. We could have potentially done a replace --force to
- # create and update in one step.
- - openshift_examples
- # Update the existing templates
- - role: openshift_examples
- openshift_examples_import_command: replace
- registry_url: "{{ openshift.master.registry_url }}"
- pre_tasks:
- - name: Collect all routers
- command: >
- {{ oc_cmd }} get pods --all-namespaces -l 'router' -o json
- register: all_routers
- failed_when: false
- changed_when: false
-
- - set_fact: haproxy_routers="{{ (all_routers.stdout | from_json)['items'] | oo_pods_match_component(openshift_deployment_type, 'haproxy-router') | oo_select_keys_from_list(['metadata']) }}"
- when: all_routers.rc == 0
-
- - set_fact: haproxy_routers=[]
- when: all_routers.rc != 0
-
- - name: Check for allowHostNetwork and allowHostPorts
- when: all_routers.rc == 0
- shell: >
- {{ oc_cmd }} get -o yaml scc/privileged | /usr/bin/grep -e allowHostPorts -e allowHostNetwork
- register: _scc
-
- - name: Grant allowHostNetwork and allowHostPorts
- when:
- - all_routers.rc == 0
- - "'false' in _scc.stdout"
- command: >
- {{ oc_cmd }} patch scc/privileged -p
- '{"allowHostPorts":true,"allowHostNetwork":true}' --api-version=v1
-
- - name: Update deployment config to 1.0.4/3.0.1 spec
- when: all_routers.rc == 0
- command: >
- {{ oc_cmd }} patch dc/{{ item['labels']['deploymentconfig'] }} -p
- '{"spec":{"strategy":{"rollingParams":{"updatePercent":-10},"spec":{"serviceAccount":"router","serviceAccountName":"router"}}}}'
- --api-version=v1
- with_items: haproxy_routers
-
- - name: Switch to hostNetwork=true
- when: all_routers.rc == 0
- command: >
- {{ oc_cmd }} patch dc/{{ item['labels']['deploymentconfig'] }} -p '{"spec":{"template":{"spec":{"hostNetwork":true}}}}'
- --api-version=v1
- with_items: haproxy_routers
-
- - name: Update router image to current version
- when: all_routers.rc == 0
- command: >
- {{ oc_cmd }} patch dc/{{ item['labels']['deploymentconfig'] }} -p
- '{"spec":{"template":{"spec":{"containers":[{"name":"router","image":"{{ router_image }}"}]}}}}'
- --api-version=v1
- with_items: haproxy_routers
- when: not openshift.common.version_gte_3_1_1_or_1_1_1
-
- - name: Update router image to current version
- when: all_routers.rc == 0
- command: >
- {{ oc_cmd }} patch dc/{{ item['labels']['deploymentconfig'] }} -p
- '{"spec":{"template":{"spec":{"containers":[{"name":"router","image":"{{ router_image }}","livenessProbe":{"tcpSocket":null,"httpGet":{"path": "/healthz", "port": 1936, "host": "localhost", "scheme": "HTTP"},"initialDelaySeconds":10,"timeoutSeconds":1}}]}}}}'
- --api-version=v1
- with_items: haproxy_routers
- when: openshift.common.version_gte_3_1_1_or_1_1_1
-
- - name: Check for default registry
- command: >
- {{ oc_cmd }} get -n default dc/docker-registry
- register: _default_registry
- failed_when: false
- changed_when: false
-
- - name: Update registry image to current version
- when: _default_registry.rc == 0
- command: >
- {{ oc_cmd }} patch dc/docker-registry -p
- '{"spec":{"template":{"spec":{"containers":[{"name":"registry","image":"{{ registry_image }}"}]}}}}'
- --api-version=v1
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_minor/filter_plugins b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/filter_plugins
deleted file mode 120000
index 27ddaa18b..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_1_minor/filter_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_minor/library b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/library
deleted file mode 120000
index 53bed9684..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_1_minor/library
+++ /dev/null
@@ -1 +0,0 @@
-../library \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_minor/lookup_plugins b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/lookup_plugins
deleted file mode 120000
index cf407f69b..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_1_minor/lookup_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../../../lookup_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_minor/post.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/post.yml
deleted file mode 100644
index f030eed18..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_1_minor/post.yml
+++ /dev/null
@@ -1,58 +0,0 @@
----
-###############################################################################
-# Post upgrade - Upgrade default router, default registry and examples
-###############################################################################
-- name: Upgrade default router and default registry
- hosts: oo_first_master
- vars:
- openshift_deployment_type: "{{ deployment_type }}"
- registry_image: "{{ openshift.master.registry_url | replace( '${component}', 'docker-registry' ) | replace ( '${version}', 'v' + g_new_version ) }}"
- router_image: "{{ openshift.master.registry_url | replace( '${component}', 'haproxy-router' ) | replace ( '${version}', 'v' + g_new_version ) }}"
- oc_cmd: "{{ openshift.common.client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig"
- roles:
- # Create the new templates shipped in 3.1.z, existing templates are left
- # unmodified. This prevents the subsequent role definition for
- # openshift_examples from failing when trying to replace templates that do
- # not already exist. We could have potentially done a replace --force to
- # create and update in one step.
- - openshift_examples
- # Update the existing templates
- - role: openshift_examples
- openshift_examples_import_command: replace
- registry_url: "{{ openshift.master.registry_url }}"
- pre_tasks:
- - name: Collect all routers
- command: >
- {{ oc_cmd }} get pods --all-namespaces -l 'router' -o json
- register: all_routers
- failed_when: false
- changed_when: false
-
- - set_fact: haproxy_routers="{{ (all_routers.stdout | from_json)['items'] | oo_pods_match_component(openshift_deployment_type, 'haproxy-router') | oo_select_keys_from_list(['metadata']) }}"
- when: all_routers.rc == 0
-
- - set_fact: haproxy_routers=[]
- when: all_routers.rc != 0
-
- - name: Update router image to current version
- when: all_routers.rc == 0
- command: >
- {{ oc_cmd }} patch dc/{{ item['labels']['deploymentconfig'] }} -p
- '{"spec":{"template":{"spec":{"containers":[{"name":"router","image":"{{ router_image }}","livenessProbe":{"tcpSocket":null,"httpGet":{"path": "/healthz", "port": 1936, "host": "localhost", "scheme": "HTTP"},"initialDelaySeconds":10,"timeoutSeconds":1}}]}}}}'
- --api-version=v1
- with_items: haproxy_routers
-
- - name: Check for default registry
- command: >
- {{ oc_cmd }} get -n default dc/docker-registry
- register: _default_registry
- failed_when: false
- changed_when: false
-
- - name: Update registry image to current version
- when: _default_registry.rc == 0
- command: >
- {{ oc_cmd }} patch dc/docker-registry -p
- '{"spec":{"template":{"spec":{"containers":[{"name":"registry","image":"{{ registry_image }}"}]}}}}'
- --api-version=v1
-
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_minor/pre.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/pre.yml
deleted file mode 100644
index 85d7073f2..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_1_minor/pre.yml
+++ /dev/null
@@ -1,88 +0,0 @@
----
-###############################################################################
-# Evaluate host groups and gather facts
-###############################################################################
-- name: Load openshift_facts
- hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config:oo_lb_to_config
- roles:
- - openshift_facts
-
-###############################################################################
-# Pre-upgrade checks
-###############################################################################
-- name: Verify upgrade can proceed
- hosts: oo_first_master
- vars:
- openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}"
- target_version: "{{ '1.1.1' if deployment_type == 'origin' else '3.1.1' }}"
- gather_facts: no
- tasks:
- - fail:
- msg: >
- This upgrade is only supported for origin, openshift-enterprise, and online
- deployment types
- when: deployment_type not in ['origin','openshift-enterprise', 'online']
-
- - fail:
- msg: >
- openshift_pkg_version is {{ openshift_pkg_version }} which is not a
- valid version for a {{ target_version }} upgrade
- when: openshift_pkg_version is defined and openshift_pkg_version.split('-',1).1 | version_compare(target_version ,'<')
-
-- name: Verify upgrade targets
- hosts: oo_masters_to_config:oo_nodes_to_config
- vars:
- target_version: "{{ '1.1.1' if deployment_type == 'origin' else '3.1.1' }}"
- tasks:
- - name: Clean package cache
- command: "{{ ansible_pkg_mgr }} clean all"
- when: not openshift.common.is_atomic | bool
-
- - set_fact:
- g_new_service_name: "{{ 'origin' if deployment_type =='origin' else 'atomic-openshift' }}"
-
- - name: Determine available versions
- script: ../files/rpm_versions.sh {{ g_new_service_name }}
- register: g_versions_result
-
- - set_fact:
- g_aos_versions: "{{ g_versions_result.stdout | from_yaml }}"
-
- - set_fact:
- g_new_version: "{{ g_aos_versions.curr_version.split('-', 1).0 if g_aos_versions.avail_version is none else g_aos_versions.avail_version.split('-', 1).0 }}"
-
- - fail:
- msg: This playbook requires Origin 1.1 or later
- when: deployment_type == 'origin' and g_aos_versions.curr_version | version_compare('1.1','<')
-
- - fail:
- msg: This playbook requires Atomic Enterprise Platform/OpenShift Enterprise 3.1 or later
- when: deployment_type == 'atomic-openshift' and g_aos_versions.curr_version | version_compare('3.1','<')
-
- - fail:
- msg: Upgrade packages not found
- when: (g_aos_versions.avail_version | default(g_aos_versions.curr_version, true) | version_compare(target_version, '<'))
-
- - set_fact:
- pre_upgrade_complete: True
-
-
-##############################################################################
-# Gate on pre-upgrade checks
-##############################################################################
-- name: Gate on pre-upgrade checks
- hosts: localhost
- connection: local
- become: no
- vars:
- pre_upgrade_hosts: "{{ groups.oo_masters_to_config | union(groups.oo_nodes_to_config) }}"
- tasks:
- - set_fact:
- pre_upgrade_completed: "{{ hostvars
- | oo_select_keys(pre_upgrade_hosts)
- | oo_collect('inventory_hostname', {'pre_upgrade_complete': true}) }}"
- - set_fact:
- pre_upgrade_failed: "{{ pre_upgrade_hosts | difference(pre_upgrade_completed) }}"
- - fail:
- msg: "Upgrade cannot continue. The following hosts did not complete pre-upgrade checks: {{ pre_upgrade_failed | join(',') }}"
- when: pre_upgrade_failed | length > 0
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_minor/roles b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/roles
deleted file mode 120000
index 6bc1a7aef..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_1_minor/roles
+++ /dev/null
@@ -1 +0,0 @@
-../../../../../roles \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_minor/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/upgrade.yml
deleted file mode 100644
index e5cfa58aa..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_1_minor/upgrade.yml
+++ /dev/null
@@ -1,140 +0,0 @@
----
-###############################################################################
-# The restart playbook should be run after this playbook completes.
-###############################################################################
-
-###############################################################################
-# Upgrade Masters
-###############################################################################
-- name: Upgrade master packages and configuration
- hosts: oo_masters_to_config
- vars:
- openshift_version: "{{ openshift_pkg_version | default('') }}"
- tasks:
- - name: Upgrade master packages
- command: "{{ ansible_pkg_mgr}} update-to -y {{ openshift.common.service_type }}-master{{ openshift_version }} {{ openshift.common.service_type }}-sdn-ovs{{ openshift_version }}"
- when: not openshift.common.is_containerized | bool
-
- - name: Ensure python-yaml present for config upgrade
- action: "{{ ansible_pkg_mgr }} name=PyYAML state=present"
- when: not openshift.common.is_containerized | bool
-
-# Currently 3.1.1 does not have any new configuration settings
-#
-# - name: Upgrade master configuration
-# openshift_upgrade_config:
-# from_version: '3.0'
-# to_version: '3.1'
-# role: master
-# config_base: "{{ hostvars[inventory_hostname].openshift.common.config_base }}"
-
-- name: Set master update status to complete
- hosts: oo_masters_to_config
- tasks:
- - set_fact:
- master_update_complete: True
-
-##############################################################################
-# Gate on master update complete
-##############################################################################
-- name: Gate on master update
- hosts: localhost
- connection: local
- become: no
- tasks:
- - set_fact:
- master_update_completed: "{{ hostvars
- | oo_select_keys(groups.oo_masters_to_config)
- | oo_collect('inventory_hostname', {'master_update_complete': true}) }}"
- - set_fact:
- master_update_failed: "{{ groups.oo_masters_to_config | difference(master_update_completed) }}"
- - fail:
- msg: "Upgrade cannot continue. The following masters did not finish updating: {{ master_update_failed | join(',') }}"
- when: master_update_failed | length > 0
-
-###############################################################################
-# Upgrade Nodes
-###############################################################################
-- name: Upgrade nodes
- hosts: oo_nodes_to_config
- vars:
- openshift_version: "{{ openshift_pkg_version | default('') }}"
- roles:
- - openshift_facts
- tasks:
- - name: Upgrade node packages
- command: "{{ ansible_pkg_mgr }} update-to -y {{ openshift.common.service_type }}-node{{ openshift_version }} {{ openshift.common.service_type }}-sdn-ovs{{ openshift_version }}"
- when: not openshift.common.is_containerized | bool
-
- - name: Restart node service
- service: name="{{ openshift.common.service_type }}-node" state=restarted
-
- - set_fact:
- node_update_complete: True
-
-##############################################################################
-# Gate on nodes update
-##############################################################################
-- name: Gate on nodes update
- hosts: localhost
- connection: local
- become: no
- tasks:
- - set_fact:
- node_update_completed: "{{ hostvars
- | oo_select_keys(groups.oo_nodes_to_config)
- | oo_collect('inventory_hostname', {'node_update_complete': true}) }}"
- - set_fact:
- node_update_failed: "{{ groups.oo_nodes_to_config | difference(node_update_completed) }}"
- - fail:
- msg: "Upgrade cannot continue. The following nodes did not finish updating: {{ node_update_failed | join(',') }}"
- when: node_update_failed | length > 0
-
-###############################################################################
-# Reconcile Cluster Roles and Cluster Role Bindings
-###############################################################################
-- name: Reconcile Cluster Roles and Cluster Role Bindings
- hosts: oo_masters_to_config
- vars:
- origin_reconcile_bindings: "{{ deployment_type == 'origin' and g_new_version | version_compare('1.0.6', '>') }}"
- ent_reconcile_bindings: true
- openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}"
- tasks:
- - name: Reconcile Cluster Roles
- command: >
- {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig
- policy reconcile-cluster-roles --additive-only=true --confirm
- run_once: true
-
- - name: Reconcile Cluster Role Bindings
- command: >
- {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig
- policy reconcile-cluster-role-bindings
- --exclude-groups=system:authenticated
- --exclude-groups=system:authenticated:oauth
- --exclude-groups=system:unauthenticated
- --exclude-users=system:anonymous
- --additive-only=true --confirm
- when: origin_reconcile_bindings | bool or ent_reconcile_bindings | bool
- run_once: true
-
- - set_fact:
- reconcile_complete: True
-
-##############################################################################
-# Gate on reconcile
-##############################################################################
-- name: Gate on reconcile
- hosts: localhost
- connection: local
- become: no
- tasks:
- - set_fact:
- reconcile_completed: "{{ hostvars
- | oo_select_keys(groups.oo_masters_to_config)
- | oo_collect('inventory_hostname', {'reconcile_complete': true}) }}"
- - set_fact:
- reconcile_failed: "{{ groups.oo_masters_to_config | difference(reconcile_completed) }}"
- - fail:
- msg: "Upgrade cannot continue. The following masters did not finish reconciling: {{ reconcile_failed | join(',') }}"
- when: reconcile_failed | length > 0
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/atomic-openshift-master.j2 b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/atomic-openshift-master.j2
deleted file mode 120000
index cf20e8959..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/atomic-openshift-master.j2
+++ /dev/null
@@ -1 +0,0 @@
-../../../../../roles/openshift_master/templates/atomic-openshift-master.j2 \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/docker b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/docker
deleted file mode 120000
index 5a3dd12b3..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/docker
+++ /dev/null
@@ -1 +0,0 @@
-../../../../../roles/openshift_master/templates/docker \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/docker-cluster b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/docker-cluster
deleted file mode 120000
index 3ee319365..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/docker-cluster
+++ /dev/null
@@ -1 +0,0 @@
-../../../../../roles/openshift_master/templates/docker-cluster \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/docker_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/docker_upgrade.yml
deleted file mode 100644
index c7b18f51b..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/docker_upgrade.yml
+++ /dev/null
@@ -1,14 +0,0 @@
-- name: Check if Docker is installed
- command: rpm -q docker
- register: pkg_check
- failed_when: pkg_check.rc > 1
- changed_when: no
-
-- name: Upgrade Docker
- command: "{{ ansible_pkg_mgr}} update -y docker"
- when: pkg_check.rc == 0 and g_docker_version.curr_version | version_compare('1.9','<')
- register: docker_upgrade
-
-- name: Restart Docker
- command: systemctl restart docker
- when: docker_upgrade | changed
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/filter_plugins b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/filter_plugins
deleted file mode 120000
index 27ddaa18b..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/filter_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/library b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/library
deleted file mode 120000
index 53bed9684..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/library
+++ /dev/null
@@ -1 +0,0 @@
-../library \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/lookup_plugins b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/lookup_plugins
deleted file mode 120000
index cf407f69b..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/lookup_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../../../lookup_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/native-cluster b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/native-cluster
deleted file mode 120000
index f44f8eb4f..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/native-cluster
+++ /dev/null
@@ -1 +0,0 @@
-../../../../../roles/openshift_master/templates/native-cluster \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/node_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/node_upgrade.yml
deleted file mode 100644
index a911f12be..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/node_upgrade.yml
+++ /dev/null
@@ -1,24 +0,0 @@
-- name: Prepare for Node evacuation
- command: >
- {{ openshift.common.admin_binary }} manage-node {{ openshift.common.hostname | lower }} --schedulable=false
- delegate_to: "{{ groups.oo_first_master.0 }}"
-
-- name: Evacuate Node for Kubelet upgrade
- command: >
- {{ openshift.common.admin_binary }} manage-node {{ openshift.common.hostname | lower }} --evacuate --force
- delegate_to: "{{ groups.oo_first_master.0 }}"
-
-- include: rpm_upgrade.yml
- vars:
- component: "node"
- openshift_version: "{{ openshift_pkg_version | default('') }}"
- when: not openshift.common.is_containerized | bool
-
-- include: containerized_upgrade.yml
- when: openshift.common.is_containerized | bool
-
-- name: Set node schedulability
- command: >
- {{ openshift.common.admin_binary }} manage-node {{ openshift.common.hostname | lower }} --schedulable=true
- delegate_to: "{{ groups.oo_first_master.0 }}"
- when: openshift.node.schedulable | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/nuke_images.sh b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/nuke_images.sh
new file mode 120000
index 000000000..49a51bba9
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/nuke_images.sh
@@ -0,0 +1 @@
+../files/nuke_images.sh \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/roles b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/roles
deleted file mode 120000
index 6bc1a7aef..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/roles
+++ /dev/null
@@ -1 +0,0 @@
-../../../../../roles \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml
new file mode 100644
index 000000000..638ef23a8
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml
@@ -0,0 +1,40 @@
+---
+- modify_yaml:
+ dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
+ yaml_key: 'masterClients.externalKubernetesClientConnectionOverrides.acceptContentTypes'
+ yaml_value: 'application/vnd.kubernetes.protobuf,application/json'
+
+- modify_yaml:
+ dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
+ yaml_key: 'masterClients.externalKubernetesClientConnectionOverrides.contentType'
+ yaml_value: 'application/vnd.kubernetes.protobuf'
+
+- modify_yaml:
+ dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
+ yaml_key: 'masterClients.externalKubernetesClientConnectionOverrides.burst'
+ yaml_value: 400
+
+- modify_yaml:
+ dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
+ yaml_key: 'masterClients.externalKubernetesClientConnectionOverrides.ops'
+ yaml_value: 200
+
+- modify_yaml:
+ dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
+ yaml_key: 'masterClients.openshiftLoopbackClientConnectionOverrides.acceptContentTypes'
+ yaml_value: 'application/vnd.kubernetes.protobuf,application/json'
+
+- modify_yaml:
+ dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
+ yaml_key: 'masterClients.openshiftLoopbackClientConnectionOverrides.contentType'
+ yaml_value: 'application/vnd.kubernetes.protobuf'
+
+- modify_yaml:
+ dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
+ yaml_key: 'masterClients.openshiftLoopbackClientConnectionOverrides.burst'
+ yaml_value: 600
+
+- modify_yaml:
+ dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
+ yaml_key: 'masterClients.openshiftLoopbackClientConnectionOverrides.ops'
+ yaml_value: 300
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/node_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/node_config_upgrade.yml
new file mode 100644
index 000000000..1297938bc
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_3/node_config_upgrade.yml
@@ -0,0 +1,21 @@
+---
+- modify_yaml:
+ dest: "{{ openshift.common.config_base}}/node/node-config.yaml"
+ yaml_key: 'masterClientConnectionOverrides.acceptContentTypes'
+ yaml_value: 'application/vnd.kubernetes.protobuf,application/json'
+
+- modify_yaml:
+ dest: "{{ openshift.common.config_base}}/node/node-config.yaml"
+ yaml_key: 'masterClientConnectionOverrides.contentType'
+ yaml_value: 'application/vnd.kubernetes.protobuf'
+
+- modify_yaml:
+ dest: "{{ openshift.common.config_base}}/node/node-config.yaml"
+ yaml_key: 'masterClientConnectionOverrides.burst'
+ yaml_value: 40
+
+- modify_yaml:
+ dest: "{{ openshift.common.config_base}}/node/node-config.yaml"
+ yaml_key: 'masterClientConnectionOverrides.ops'
+ yaml_value: 20
+
diff --git a/playbooks/common/openshift-cluster/verify_ansible_version.yml b/playbooks/common/openshift-cluster/verify_ansible_version.yml
new file mode 100644
index 000000000..2a143b065
--- /dev/null
+++ b/playbooks/common/openshift-cluster/verify_ansible_version.yml
@@ -0,0 +1,10 @@
+---
+- hosts: localhost
+ connection: local
+ become: no
+ gather_facts: no
+ tasks:
+ - name: Verify Ansible version is greater than or equal to 2.1.0.0
+ fail:
+ msg: "Unsupported ansible version: {{ ansible_version.full }} found"
+ when: not ansible_version.full | version_compare('2.1.0.0', 'ge')
diff --git a/playbooks/common/openshift-etcd/config.yml b/playbooks/common/openshift-etcd/config.yml
index a95de8cf3..1b8106e0e 100644
--- a/playbooks/common/openshift-etcd/config.yml
+++ b/playbooks/common/openshift-etcd/config.yml
@@ -1,119 +1,10 @@
---
-- name: Set etcd facts needed for generating certs
+- name: Configure etcd
hosts: oo_etcd_to_config
any_errors_fatal: true
roles:
- - openshift_facts
- tasks:
- - openshift_facts:
- role: etcd
- local_facts:
- etcd_image: "{{ osm_etcd_image | default(None) }}"
- - name: Check status of etcd certificates
- stat:
- path: "{{ item }}"
- with_items:
- - /etc/etcd/server.crt
- - /etc/etcd/peer.crt
- - /etc/etcd/ca.crt
- register: g_etcd_server_cert_stat_result
- - set_fact:
- etcd_server_certs_missing: "{{ g_etcd_server_cert_stat_result.results | oo_collect(attribute='stat.exists')
- | list | intersect([false])}}"
- etcd_cert_subdir: etcd-{{ openshift.common.hostname }}
- etcd_cert_config_dir: /etc/etcd
- etcd_cert_prefix:
- etcd_hostname: "{{ openshift.common.hostname }}"
- etcd_ip: "{{ openshift.common.ip }}"
-
-- name: Create temp directory for syncing certs
- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- tasks:
- - name: Create local temp directory for syncing certs
- local_action: command mktemp -d /tmp/openshift-ansible-XXXXXXX
- register: g_etcd_mktemp
- changed_when: False
-
-- name: Configure etcd certificates
- hosts: oo_first_etcd
- vars:
- etcd_generated_certs_dir: /etc/etcd/generated_certs
- etcd_needing_server_certs: "{{ hostvars
- | oo_select_keys(groups['oo_etcd_to_config'])
- | oo_filter_list(filter_attr='etcd_server_certs_missing') }}"
- sync_tmpdir: "{{ hostvars.localhost.g_etcd_mktemp.stdout }}"
- roles:
- - openshift_etcd_certificates
- post_tasks:
- - name: Create a tarball of the etcd certs
- command: >
- tar -czvf {{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }}.tgz
- -C {{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }} .
- args:
- creates: "{{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }}.tgz"
- with_items: "{{ etcd_needing_server_certs | default([]) }}"
- - name: Retrieve the etcd cert tarballs
- fetch:
- src: "{{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }}.tgz"
- dest: "{{ sync_tmpdir }}/"
- flat: yes
- fail_on_missing: yes
- validate_checksum: yes
- with_items: "{{ etcd_needing_server_certs | default([]) }}"
-
-# Configure a first etcd host to avoid conflicts in choosing a leader
-# if other members come online too quickly.
-- name: Configure first etcd host
- hosts: oo_first_etcd
- vars:
- sync_tmpdir: "{{ hostvars.localhost.g_etcd_mktemp.stdout }}"
- etcd_url_scheme: https
- etcd_peer_url_scheme: https
- etcd_peers: "{{ groups.oo_etcd_to_config | default([], true) }}"
- pre_tasks:
- - name: Ensure certificate directory exists
- file:
- path: "{{ etcd_cert_config_dir }}"
- state: directory
- - name: Unarchive the tarball on the etcd host
- unarchive:
- src: "{{ sync_tmpdir }}/{{ etcd_cert_subdir }}.tgz"
- dest: "{{ etcd_cert_config_dir }}"
- when: etcd_server_certs_missing
- roles:
- - openshift_etcd
- - nickhammond.logrotate
-
-# Configure the remaining etcd hosts, skipping the first one we dealt with above.
-- name: Configure remaining etcd hosts
- hosts: oo_etcd_to_config:!oo_first_etcd
- vars:
- sync_tmpdir: "{{ hostvars.localhost.g_etcd_mktemp.stdout }}"
- etcd_url_scheme: https
- etcd_peer_url_scheme: https
+ - role: openshift_etcd
etcd_peers: "{{ groups.oo_etcd_to_config | default([], true) }}"
- pre_tasks:
- - name: Ensure certificate directory exists
- file:
- path: "{{ etcd_cert_config_dir }}"
- state: directory
- - name: Unarchive the tarball on the etcd host
- unarchive:
- src: "{{ sync_tmpdir }}/{{ etcd_cert_subdir }}.tgz"
- dest: "{{ etcd_cert_config_dir }}"
- when: etcd_server_certs_missing
- roles:
- - openshift_etcd
+ etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
+ etcd_certificates_etcd_hosts: "{{ groups.oo_etcd_to_config | default([], true) }}"
- role: nickhammond.logrotate
-
-- name: Delete temporary directory on localhost
- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- tasks:
- - file: name={{ g_etcd_mktemp.stdout }} state=absent
- changed_when: False
diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml
index 7a59f3ea3..7f60cd9e4 100644
--- a/playbooks/common/openshift-master/config.yml
+++ b/playbooks/common/openshift-master/config.yml
@@ -1,5 +1,5 @@
---
-- name: Set master facts and determine if external etcd certs need to be generated
+- name: Set master facts
hosts: oo_masters_to_config
vars:
t_oo_option_master_debug_level: "{{ lookup('oo_option', 'openshift_master_debug_level') }}"
@@ -48,6 +48,12 @@
- set_fact:
openshift_hosted_metrics_resolution: "{{ lookup('oo_option', 'openshift_hosted_metrics_resolution') | default('10s', true) }}"
when: openshift_hosted_metrics_resolution is not defined
+ - set_fact:
+ openshift_hosted_metrics_deployer_prefix: "{{ lookup('oo_option', 'openshift_hosted_metrics_deployer_prefix') | default('openshift') }}"
+ when: openshift_hosted_metrics_deployer_prefix is not defined
+ - set_fact:
+ openshift_hosted_metrics_deployer_version: "{{ lookup('oo_option', 'openshift_hosted_metrics_deployer_version') | default('latest') }}"
+ when: openshift_hosted_metrics_deployer_prefix is not defined
roles:
- openshift_facts
post_tasks:
@@ -73,23 +79,6 @@
openshift_env:
openshift_hosted_registry_storage_kind: 'nfs'
when: openshift_hosted_registry_storage_kind is not defined and groups.oo_nfs_to_config is defined and groups.oo_nfs_to_config | length > 0
- - name: Check status of external etcd certificatees
- stat:
- path: "{{ openshift.common.config_base }}/master/{{ item }}"
- with_items:
- - master.etcd-client.crt
- - master.etcd-ca.crt
- register: g_external_etcd_cert_stat_result
- - set_fact:
- etcd_client_certs_missing: "{{ g_external_etcd_cert_stat_result.results
- | oo_collect(attribute='stat.exists')
- | list | intersect([false])}}"
- etcd_cert_subdir: openshift-master-{{ openshift.common.hostname }}
- etcd_cert_config_dir: "{{ openshift.common.config_base }}/master"
- etcd_cert_prefix: master.etcd-
- etcd_hostname: "{{ openshift.common.hostname }}"
- etcd_ip: "{{ openshift.common.ip }}"
- when: groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config
- name: Create temp directory for syncing certs
hosts: localhost
@@ -102,139 +91,6 @@
register: g_master_mktemp
changed_when: False
-- name: Configure etcd certificates
- hosts: oo_first_etcd
- vars:
- etcd_generated_certs_dir: /etc/etcd/generated_certs
- etcd_needing_client_certs: "{{ hostvars
- | oo_select_keys(groups['oo_masters_to_config'])
- | default([])
- | oo_filter_list(filter_attr='etcd_client_certs_missing') }}"
- sync_tmpdir: "{{ hostvars.localhost.g_master_mktemp.stdout }}"
- roles:
- - openshift_etcd_certificates
- post_tasks:
- - name: Create a tarball of the etcd certs
- command: >
- tar -czvf {{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }}.tgz
- -C {{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }} .
- args:
- creates: "{{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }}.tgz"
- with_items: "{{ etcd_needing_client_certs | default([]) }}"
- - name: Retrieve the etcd cert tarballs
- fetch:
- src: "{{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }}.tgz"
- dest: "{{ sync_tmpdir }}/"
- flat: yes
- fail_on_missing: yes
- validate_checksum: yes
- with_items: "{{ etcd_needing_client_certs | default([]) }}"
-
-- name: Copy the external etcd certs to the masters
- hosts: oo_masters_to_config
- vars:
- sync_tmpdir: "{{ hostvars.localhost.g_master_mktemp.stdout }}"
- tasks:
- - name: Ensure certificate directory exists
- file:
- path: "{{ openshift.common.config_base }}/master"
- state: directory
- when: etcd_client_certs_missing is defined and etcd_client_certs_missing
- - name: Unarchive the tarball on the master
- unarchive:
- src: "{{ sync_tmpdir }}/{{ etcd_cert_subdir }}.tgz"
- dest: "{{ etcd_cert_config_dir }}"
- when: etcd_client_certs_missing is defined and etcd_client_certs_missing
- - file:
- path: "{{ etcd_cert_config_dir }}/{{ item }}"
- owner: root
- group: root
- mode: 0600
- with_items:
- - master.etcd-client.crt
- - master.etcd-client.key
- - master.etcd-ca.crt
- when: etcd_client_certs_missing is defined and etcd_client_certs_missing
-
-- name: Determine if master certificates need to be generated
- hosts: oo_first_master:oo_masters_to_config
- tasks:
- - set_fact:
- openshift_master_certs_no_etcd:
- - admin.crt
- - master.kubelet-client.crt
- - "{{ 'master.proxy-client.crt' if openshift.common.version_gte_3_1_or_1_1 else omit }}"
- - master.server.crt
- - openshift-master.crt
- - openshift-registry.crt
- - openshift-router.crt
- - etcd.server.crt
- openshift_master_certs_etcd:
- - master.etcd-client.crt
-
- - set_fact:
- openshift_master_certs: "{{ (openshift_master_certs_no_etcd | union(openshift_master_certs_etcd)) if (groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config) else openshift_master_certs_no_etcd }}"
-
- - name: Check status of master certificates
- stat:
- path: "{{ openshift.common.config_base }}/master/{{ item }}"
- with_items: "{{ openshift_master_certs }}"
- register: g_master_cert_stat_result
- - set_fact:
- master_certs_missing: "{{ False in (g_master_cert_stat_result.results
- | oo_collect(attribute='stat.exists')
- | list ) }}"
- master_cert_subdir: master-{{ openshift.common.hostname }}
- master_cert_config_dir: "{{ openshift.common.config_base }}/master"
- - set_fact:
- openshift_infra_nodes: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'])
- | oo_nodes_with_label('region', 'infra')
- | oo_collect('inventory_hostname') }}"
- when: openshift_infra_nodes is not defined and groups.oo_nodes_to_config | default([]) | length > 0
-
-- name: Configure master certificates
- hosts: oo_first_master
- vars:
- master_generated_certs_dir: "{{ openshift.common.config_base }}/generated-configs"
- masters_needing_certs: "{{ hostvars
- | oo_select_keys(groups['oo_masters_to_config'] | difference(groups['oo_first_master']))
- | oo_filter_list(filter_attr='master_certs_missing') }}"
- master_hostnames: "{{ hostvars
- | oo_select_keys(groups['oo_masters_to_config'])
- | oo_collect('openshift.common.all_hostnames')
- | oo_flatten | unique }}"
- sync_tmpdir: "{{ hostvars.localhost.g_master_mktemp.stdout }}"
- openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}"
- roles:
- - openshift_master_certificates
- post_tasks:
- - name: Remove generated etcd client certs when using external etcd
- file:
- path: "{{ master_generated_certs_dir }}/{{ item.0.master_cert_subdir }}/{{ item.1 }}"
- state: absent
- when: groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config
- with_nested:
- - "{{ masters_needing_certs | default([]) }}"
- - - master.etcd-client.crt
- - master.etcd-client.key
-
- - name: Create a tarball of the master certs
- command: >
- tar -czvf {{ master_generated_certs_dir }}/{{ item.master_cert_subdir }}.tgz
- -C {{ master_generated_certs_dir }}/{{ item.master_cert_subdir }} .
- args:
- creates: "{{ master_generated_certs_dir }}/{{ item.master_cert_subdir }}.tgz"
- with_items: "{{ masters_needing_certs | default([]) }}"
-
- - name: Retrieve the master cert tarball from the master
- fetch:
- src: "{{ master_generated_certs_dir }}/{{ item.master_cert_subdir }}.tgz"
- dest: "{{ sync_tmpdir }}/"
- flat: yes
- fail_on_missing: yes
- validate_checksum: yes
- with_items: "{{ masters_needing_certs | default([]) }}"
-
- name: Check for cached session secrets
hosts: oo_first_master
roles:
@@ -249,7 +105,7 @@
- name: Generate master session secrets
hosts: oo_first_master
vars:
- g_session_secrets_present: "{{ (openshift.master.session_auth_secrets | default([]) and openshift.master.session_encryption_secrets | default([])) | length > 0 }}"
+ g_session_secrets_present: "{{ (openshift.master.session_auth_secrets | default([])) | length > 0 and (openshift.master.session_encryption_secrets | default([])) | length > 0 }}"
g_session_auth_secrets: "{{ [ 24 | oo_generate_secret ] }}"
g_session_encryption_secrets: "{{ [ 24 | oo_generate_secret ] }}"
roles:
@@ -262,85 +118,66 @@
session_encryption_secrets: "{{ g_session_encryption_secrets }}"
when: not g_session_secrets_present | bool
-- name: Parse named certificates
- hosts: localhost
- connection: local
- become: no
- vars:
- internal_hostnames: "{{ hostvars[groups.oo_first_master.0].openshift.common.internal_hostnames }}"
- named_certificates: "{{ hostvars[groups.oo_first_master.0].openshift_master_named_certificates | default([]) }}"
- named_certificates_dir: "{{ hostvars[groups.oo_first_master.0].master_cert_config_dir }}/named_certificates/"
- tasks:
- - set_fact:
- parsed_named_certificates: "{{ named_certificates | oo_parse_named_certificates(named_certificates_dir, internal_hostnames) }}"
- when: named_certificates | length > 0
-
-- name: Deploy named certificates
- hosts: oo_masters_to_config
- vars:
- named_certs_dir: "{{ master_cert_config_dir }}/named_certificates/"
- named_certs_specified: "{{ openshift_master_named_certificates is defined }}"
- overwrite_named_certs: "{{ openshift_master_overwrite_named_certificates | default(false) }}"
- roles:
- - role: openshift_facts
- post_tasks:
- - openshift_facts:
- role: master
- local_facts:
- named_certificates: "{{ hostvars.localhost.parsed_named_certificates | default([]) }}"
- additive_facts_to_overwrite:
- - "{{ 'master.named_certificates' if overwrite_named_certs | bool else omit }}"
- - name: Clear named certificates
- file:
- path: "{{ named_certs_dir }}"
- state: absent
- when: overwrite_named_certs | bool
- - name: Ensure named certificate directory exists
- file:
- path: "{{ named_certs_dir }}"
- state: directory
- mode: 0700
- when: named_certs_specified | bool
- - name: Land named certificates
- copy: src="{{ item.certfile }}" dest="{{ named_certs_dir }}"
- with_items: openshift_master_named_certificates
- when: named_certs_specified | bool
- - name: Land named certificate keys
- copy: src="{{ item.keyfile }}" dest="{{ named_certs_dir }}" mode=0600
- with_items: openshift_master_named_certificates
- when: named_certs_specified | bool
-
-- name: Configure master instances
+- name: Configure masters
hosts: oo_masters_to_config
any_errors_fatal: true
- serial: 1
vars:
sync_tmpdir: "{{ hostvars.localhost.g_master_mktemp.stdout }}"
openshift_master_ha: "{{ openshift.master.ha }}"
openshift_master_count: "{{ openshift.master.master_count }}"
openshift_master_session_auth_secrets: "{{ hostvars[groups.oo_first_master.0].openshift.master.session_auth_secrets }}"
openshift_master_session_encryption_secrets: "{{ hostvars[groups.oo_first_master.0].openshift.master.session_encryption_secrets }}"
- openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}"
openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
| union(groups['oo_masters_to_config'])
| union(groups['oo_etcd_to_config'] | default([])))
| oo_collect('openshift.common.hostname') | default([]) | join (',')
}}"
- when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
- openshift_generate_no_proxy_hosts | default(True) | bool }}"
- pre_tasks:
- - name: Ensure certificate directory exists
- file:
- path: "{{ openshift.common.config_base }}/master"
- state: directory
- when: master_certs_missing | bool and 'oo_first_master' not in group_names
- - name: Unarchive the tarball on the master
- unarchive:
- src: "{{ sync_tmpdir }}/{{ master_cert_subdir }}.tgz"
- dest: "{{ master_cert_config_dir }}"
- when: master_certs_missing | bool and 'oo_first_master' not in group_names
roles:
- - openshift_master
+ - role: openshift_master_facts
+ - role: openshift_hosted_facts
+ - role: openshift_master_certificates
+ openshift_ca_host: "{{ groups.oo_first_master.0 }}"
+ openshift_master_etcd_hosts: "{{ hostvars
+ | oo_select_keys(groups['oo_etcd_to_config'] | default([]))
+ | oo_collect('openshift.common.hostname')
+ | default(none, true) }}"
+ openshift_master_hostnames: "{{ hostvars
+ | oo_select_keys(groups['oo_masters_to_config'] | default([]))
+ | oo_collect('openshift.common.all_hostnames')
+ | oo_flatten | unique }}"
+ - role: openshift_etcd_client_certificates
+ etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
+ etcd_cert_subdir: "openshift-master-{{ openshift.common.hostname }}"
+ etcd_cert_config_dir: "{{ openshift.common.config_base }}/master"
+ etcd_cert_prefix: "master.etcd-"
+ when: groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config
+ - role: openshift_clock
+ - role: openshift_cloud_provider
+ - role: openshift_builddefaults
+ - role: os_firewall
+ os_firewall_allow:
+ - service: etcd embedded
+ port: 4001/tcp
+ - service: api server https
+ port: "{{ openshift.master.api_port }}/tcp"
+ - service: api controllers https
+ port: "{{ openshift.master.controllers_port }}/tcp"
+ - service: skydns tcp
+ port: "{{ openshift.master.dns_port }}/tcp"
+ - service: skydns udp
+ port: "{{ openshift.master.dns_port }}/udp"
+ - service: Fluentd td-agent tcp
+ port: 24224/tcp
+ - service: Fluentd td-agent udp
+ port: 24224/udp
+ - service: pcsd
+ port: 2224/tcp
+ - service: Corosync UDP
+ port: 5404/udp
+ - service: Corosync UDP
+ port: 5405/udp
+ - role: openshift_master
+ openshift_master_hosts: "{{ groups.oo_masters_to_config }}"
- role: nickhammond.logrotate
- role: nuage_master
when: openshift.common.use_nuage | bool
diff --git a/playbooks/common/openshift-master/scaleup.yml b/playbooks/common/openshift-master/scaleup.yml
index 6e6cb3e01..7304fca56 100644
--- a/playbooks/common/openshift-master/scaleup.yml
+++ b/playbooks/common/openshift-master/scaleup.yml
@@ -33,7 +33,12 @@
service: name={{ openshift.common.service_type }}-master-controllers state=restarted
- name: verify api server
command: >
- curl --silent --cacert {{ openshift.common.config_base }}/master/ca.crt
+ curl --silent
+ {% if openshift.common.version_gte_3_2_or_1_2 | bool %}
+ --cacert {{ openshift.common.config_base }}/master/ca-bundle.crt
+ {% else %}
+ --cacert {{ openshift.common.config_base }}/master/ca.crt
+ {% endif %}
{{ openshift.master.api_url }}/healthz/ready
register: api_available_output
until: api_available_output.stdout == 'ok'
@@ -53,4 +58,6 @@
- include: ../openshift-master/config.yml
+- include: ../openshift-loadbalancer/config.yml
+
- include: ../openshift-node/config.yml
diff --git a/playbooks/common/openshift-node/config.yml b/playbooks/common/openshift-node/config.yml
index 80659dc52..e7c7ffb38 100644
--- a/playbooks/common/openshift-node/config.yml
+++ b/playbooks/common/openshift-node/config.yml
@@ -19,23 +19,6 @@
labels: "{{ openshift_node_labels | default(None) }}"
annotations: "{{ openshift_node_annotations | default(None) }}"
schedulable: "{{ openshift_schedulable | default(openshift_scheduleable) | default(None) }}"
- - name: Check status of node certificates
- stat:
- path: "{{ openshift.common.config_base }}/node/{{ item }}"
- with_items:
- - "system:node:{{ openshift.common.hostname }}.crt"
- - "system:node:{{ openshift.common.hostname }}.key"
- - "system:node:{{ openshift.common.hostname }}.kubeconfig"
- - ca.crt
- - server.key
- - server.crt
- register: stat_result
- - set_fact:
- certs_missing: "{{ stat_result.results | oo_collect(attribute='stat.exists')
- | list | intersect([false])}}"
- node_subdir: node-{{ openshift.common.hostname }}
- config_dir: "{{ openshift.common.config_base }}/generated-configs/node-{{ openshift.common.hostname }}"
- node_cert_dir: "{{ openshift.common.config_base }}/node"
- name: Create temp directory for syncing certs
hosts: localhost
@@ -48,53 +31,6 @@
register: mktemp
changed_when: False
-- name: Create node certificates
- hosts: oo_first_master
- vars:
- nodes_needing_certs: "{{ hostvars
- | oo_select_keys(groups['oo_nodes_to_config']
- | default([]))
- | oo_filter_list(filter_attr='certs_missing') }}"
- sync_tmpdir: "{{ hostvars.localhost.mktemp.stdout }}"
- roles:
- - openshift_node_certificates
- post_tasks:
- - name: Create a tarball of the node config directories
- command: >
- tar -czvf {{ item.config_dir }}.tgz
- --transform 's|system:{{ item.node_subdir }}|node|'
- -C {{ item.config_dir }} .
- args:
- creates: "{{ item.config_dir }}.tgz"
- with_items: "{{ nodes_needing_certs | default([]) }}"
-
- - name: Retrieve the node config tarballs from the master
- fetch:
- src: "{{ item.config_dir }}.tgz"
- dest: "{{ sync_tmpdir }}/"
- flat: yes
- fail_on_missing: yes
- validate_checksum: yes
- with_items: "{{ nodes_needing_certs | default([]) }}"
-
-- name: Deploy node certificates
- hosts: oo_nodes_to_config
- vars:
- sync_tmpdir: "{{ hostvars.localhost.mktemp.stdout }}"
- tasks:
- - name: Ensure certificate directory exists
- file:
- path: "{{ node_cert_dir }}"
- state: directory
- # TODO: notify restart node
- # possibly test service started time against certificate/config file
- # timestamps in node to trigger notify
- - name: Unarchive the tarball on the node
- unarchive:
- src: "{{ sync_tmpdir }}/{{ node_subdir }}.tgz"
- dest: "{{ node_cert_dir }}"
- when: certs_missing
-
- name: Evaluate node groups
hosts: localhost
become: no
@@ -107,7 +43,7 @@
ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
ansible_become: "{{ g_sudo | default(omit) }}"
with_items: "{{ groups.oo_nodes_to_config | default([]) }}"
- when: hostvars[item].openshift.common.is_containerized | bool and (item in groups.oo_nodes_to_config and item in groups.oo_masters_to_config)
+ when: hostvars[item].openshift.common is defined and hostvars[item].openshift.common.is_containerized | bool and (item in groups.oo_nodes_to_config and item in groups.oo_masters_to_config)
- name: Configure node instances
hosts: oo_containerized_master_nodes
@@ -124,7 +60,30 @@
when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
openshift_generate_no_proxy_hosts | default(True) | bool }}"
roles:
- - openshift_node
+ - role: openshift_clock
+ - role: openshift_docker
+ - role: openshift_node_certificates
+ openshift_ca_host: "{{ groups.oo_first_master.0 }}"
+ - role: openshift_cloud_provider
+ - role: openshift_common
+ - role: openshift_node_dnsmasq
+ when: openshift.common.use_dnsmasq
+ - role: os_firewall
+ os_firewall_allow:
+ - service: Kubernetes kubelet
+ port: 10250/tcp
+ - service: http
+ port: 80/tcp
+ - service: https
+ port: 443/tcp
+ - service: Openshift kubelet ReadOnlyPort
+ port: 10255/tcp
+ - service: Openshift kubelet ReadOnlyPort udp
+ port: 10255/udp
+ - service: OpenShift OVS sdn
+ port: 4789/udp
+ when: openshift.node.use_openshift_sdn | bool
+ - role: openshift_node
- name: Configure node instances
hosts: oo_nodes_to_config:!oo_containerized_master_nodes
@@ -140,96 +99,42 @@
when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
openshift_generate_no_proxy_hosts | default(True) | bool }}"
roles:
- - openshift_node
-
-- name: Gather and set facts for flannel certificatess
- hosts: oo_nodes_to_config
- tasks:
- - name: Check status of flannel external etcd certificates
- stat:
- path: "{{ openshift.common.config_base }}/node/{{ item }}"
- with_items:
- - node.etcd-client.crt
- - node.etcd-ca.crt
- register: g_external_etcd_flannel_cert_stat_result
- when: groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config and (openshift.common.use_flannel | bool)
- - set_fact:
- etcd_client_flannel_certs_missing: "{{ False in g_external_etcd_flannel_cert_stat_result.results
- | oo_collect(attribute='stat.exists')
- | list }}"
- etcd_cert_subdir: openshift-node-{{ openshift.common.hostname }}
- etcd_cert_config_dir: "{{ openshift.common.config_base }}/node"
- etcd_cert_prefix: node.etcd-
- etcd_hostname: "{{ openshift.common.hostname }}"
- etcd_ip: "{{ openshift.common.ip }}"
- when: groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config | length > 0 and (openshift.common.use_flannel | bool)
-
-- name: Configure flannel etcd certificates
- hosts: oo_first_etcd
- vars:
- etcd_generated_certs_dir: /etc/etcd/generated_certs
- sync_tmpdir: "{{ hostvars.localhost.mktemp.stdout }}"
- pre_tasks:
- - set_fact:
- etcd_needing_client_certs: "{{ hostvars
- | oo_select_keys(groups['oo_nodes_to_config'])
- | oo_filter_list('etcd_client_flannel_certs_missing') | default([]) }}"
- roles:
- - role: openshift_etcd_certificates
- when: openshift_use_flannel | default(false) | bool
- post_tasks:
- - name: Create a tarball of the etcd flannel certs
- command: >
- tar -czvf {{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }}.tgz
- -C {{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }} .
- args:
- creates: "{{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }}.tgz"
- with_items: "{{ etcd_needing_client_certs | default([]) }}"
- - name: Retrieve the etcd cert tarballs
- fetch:
- src: "{{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }}.tgz"
- dest: "{{ sync_tmpdir }}/"
- flat: yes
- fail_on_missing: yes
- validate_checksum: yes
- with_items: "{{ etcd_needing_client_certs | default([]) }}"
-
-- name: Copy the external etcd flannel certs to the nodes
- hosts: oo_nodes_to_config
- vars:
- sync_tmpdir: "{{ hostvars.localhost.mktemp.stdout }}"
- tasks:
- - name: Ensure certificate directory exists
- file:
- path: "{{ openshift.common.config_base }}/node"
- state: directory
- when: etcd_client_flannel_certs_missing | default(false) | bool
- - name: Unarchive the tarball on the master
- unarchive:
- src: "{{ sync_tmpdir }}/{{ etcd_cert_subdir }}.tgz"
- dest: "{{ etcd_cert_config_dir }}"
- when: etcd_client_flannel_certs_missing | default(false) | bool
- - file:
- path: "{{ etcd_cert_config_dir }}/{{ item }}"
- owner: root
- group: root
- mode: 0600
- with_items:
- - node.etcd-client.crt
- - node.etcd-client.key
- - node.etcd-ca.crt
- when: etcd_client_flannel_certs_missing | default(false) | bool
-
+ - role: openshift_clock
+ - role: openshift_docker
+ - role: openshift_node_certificates
+ openshift_ca_host: "{{ groups.oo_first_master.0 }}"
+ - role: openshift_cloud_provider
+ - role: openshift_common
+ - role: openshift_node_dnsmasq
+ when: openshift.common.use_dnsmasq
+ - role: os_firewall
+ os_firewall_allow:
+ - service: Kubernetes kubelet
+ port: 10250/tcp
+ - service: http
+ port: 80/tcp
+ - service: https
+ port: 443/tcp
+ - service: Openshift kubelet ReadOnlyPort
+ port: 10255/tcp
+ - service: Openshift kubelet ReadOnlyPort udp
+ port: 10255/udp
+ - service: OpenShift OVS sdn
+ port: 4789/udp
+ when: openshift.node.use_openshift_sdn | bool
+ - role: openshift_node
- name: Additional node config
hosts: oo_nodes_to_config
vars:
- # TODO: Prefix flannel role variables.
openshift_node_master_api_url: "{{ hostvars[groups.oo_first_master.0].openshift.master.api_url }}"
- etcd_urls: "{{ hostvars[groups.oo_first_master.0].openshift.master.etcd_urls }}"
- embedded_etcd: "{{ hostvars[groups.oo_first_master.0].openshift.master.embedded_etcd }}"
roles:
- role: flannel
+ etcd_urls: "{{ hostvars[groups.oo_first_master.0].openshift.master.etcd_urls }}"
+ embedded_etcd: "{{ hostvars[groups.oo_first_master.0].openshift.master.embedded_etcd }}"
+ etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
+ etcd_cert_subdir: "openshift-node-{{ openshift.common.hostname }}"
+ etcd_cert_config_dir: "{{ openshift.common.config_base }}/node"
when: openshift.common.use_flannel | bool
- role: nuage_node
when: openshift.common.use_nuage | bool
@@ -263,7 +168,12 @@
# Using curl here since the uri module requires python-httplib2 and
# wait_for port doesn't provide health information.
command: >
- curl --silent --cacert {{ openshift.common.config_base }}/master/ca.crt
+ curl --silent
+ {% if openshift.common.version_gte_3_2_or_1_2 | bool %}
+ --cacert {{ openshift.common.config_base }}/master/ca-bundle.crt
+ {% else %}
+ --cacert {{ openshift.common.config_base }}/master/ca.crt
+ {% endif %}
{{ openshift.master.api_url }}/healthz/ready
register: api_available_output
until: api_available_output.stdout == 'ok'
diff --git a/playbooks/gce/openshift-cluster/config.yml b/playbooks/gce/openshift-cluster/config.yml
index b973c513f..8e46c5919 100644
--- a/playbooks/gce/openshift-cluster/config.yml
+++ b/playbooks/gce/openshift-cluster/config.yml
@@ -9,7 +9,7 @@
groups: l_oo_all_hosts
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
ansible_become: "{{ deployment_vars[deployment_type].become }}"
- with_items: g_all_hosts
+ with_items: "{{ g_all_hosts | default([]) }}"
- hosts: l_oo_all_hosts
gather_facts: no
@@ -26,9 +26,8 @@
openshift_debug_level: "{{ debug_level }}"
openshift_deployment_type: "{{ deployment_type }}"
openshift_hostname: "{{ gce_private_ip }}"
- openshift_registry_selector: 'type=infra'
+ openshift_hosted_registry_selector: 'type=infra'
openshift_hosted_router_selector: 'type=infra'
- openshift_infra_nodes: "{{ g_infra_hosts }}"
openshift_master_cluster_method: 'native'
openshift_use_openshift_sdn: "{{ lookup('oo_option', 'use_openshift_sdn') }}"
os_sdn_network_plugin_name: "{{ lookup('oo_option', 'sdn_network_plugin_name') }}"
diff --git a/playbooks/libvirt/openshift-cluster/config.yml b/playbooks/libvirt/openshift-cluster/config.yml
index 032d4cf68..299325fc4 100644
--- a/playbooks/libvirt/openshift-cluster/config.yml
+++ b/playbooks/libvirt/openshift-cluster/config.yml
@@ -2,6 +2,9 @@
# TODO: need to figure out a plan for setting hostname, currently the default
# is localhost, so no hostname value (or public_hostname) value is getting
# assigned
+
+- include: ../../common/openshift-cluster/verify_ansible_version.yml
+
- hosts: localhost
gather_facts: no
tasks:
@@ -10,7 +13,7 @@
- add_host:
name: "{{ item }}"
groups: l_oo_all_hosts
- with_items: g_all_hosts
+ with_items: "{{ g_all_hosts | default([]) }}"
- hosts: l_oo_all_hosts
gather_facts: no
@@ -26,9 +29,8 @@
openshift_cluster_id: "{{ cluster_id }}"
openshift_debug_level: "{{ debug_level }}"
openshift_deployment_type: "{{ deployment_type }}"
- openshift_registry_selector: 'type=infra'
+ openshift_hosted_registry_selector: 'type=infra'
openshift_hosted_router_selector: 'type=infra'
- openshift_infra_nodes: "{{ g_infra_hosts }}"
openshift_master_cluster_method: 'native'
openshift_use_openshift_sdn: "{{ lookup('oo_option', 'use_openshift_sdn') }}"
os_sdn_network_plugin_name: "{{ lookup('oo_option', 'sdn_network_plugin_name') }}"
diff --git a/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_network.yml b/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_network.yml
index 3117d9edc..b42ca83af 100644
--- a/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_network.yml
+++ b/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_network.yml
@@ -1,27 +1,11 @@
---
-- name: Test if libvirt network for openshift already exists
- command: "virsh -c {{ libvirt_uri }} net-info {{ libvirt_network }}"
- register: net_info_result
- changed_when: False
- failed_when: "net_info_result.rc != 0 and 'no network with matching name' not in net_info_result.stderr"
-
-- name: Create a temp directory for the template xml file
- command: "mktemp -d /tmp/openshift-ansible-XXXXXXX"
- register: mktemp
- when: net_info_result.rc == 1
-
-- name: Create network xml file
- template:
- src: templates/network.xml
- dest: "{{ mktemp.stdout }}/network.xml"
- when: net_info_result.rc == 1
-
-- name: Create libvirt network for openshift
- command: "virsh -c {{ libvirt_uri }} net-create {{ mktemp.stdout }}/network.xml"
- when: net_info_result.rc == 1
-
-- name: Remove the temp directory
- file:
- path: "{{ mktemp.stdout }}"
- state: absent
- when: net_info_result.rc == 1
+- name: Create the libvirt network for OpenShift
+ virt_net:
+ name: '{{ libvirt_network }}'
+ state: '{{ item }}'
+ autostart: 'yes'
+ xml: "{{ lookup('template', 'network.xml') }}"
+ uri: '{{ libvirt_uri }}'
+ with_items:
+ - present
+ - active
diff --git a/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_storage_pool.yml b/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_storage_pool.yml
index 397158b9e..8685624ec 100644
--- a/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_storage_pool.yml
+++ b/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_storage_pool.yml
@@ -6,22 +6,25 @@
# We need to set permissions on the directory and any items created under the directory, so we need to call the acl module with and without default set.
- acl:
- default: "{{ item }}"
+ default: '{{ item.default }}'
entity: kvm
etype: group
name: "{{ libvirt_storage_pool_path }}"
- permissions: rwx
+ permissions: '{{ item.permissions }}'
state: present
with_items:
- - no
- - yes
+ - default: no
+ permissions: x
+ - default: yes
+ permissions: rwx
-- name: Test if libvirt storage pool for openshift already exists
- command: "virsh -c {{ libvirt_uri }} pool-info {{ libvirt_storage_pool }}"
- register: pool_info_result
- changed_when: False
- failed_when: "pool_info_result.rc != 0 and 'no storage pool with matching name' not in pool_info_result.stderr"
-
-- name: Create the libvirt storage pool for openshift
- command: 'virsh -c {{ libvirt_uri }} pool-create-as {{ libvirt_storage_pool }} dir --target {{ libvirt_storage_pool_path }}'
- when: pool_info_result.rc == 1
+- name: Create the libvirt storage pool for OpenShift
+ virt_pool:
+ name: '{{ libvirt_storage_pool }}'
+ state: '{{ item }}'
+ autostart: 'yes'
+ xml: "{{ lookup('template', 'storage-pool.xml') }}"
+ uri: '{{ libvirt_uri }}'
+ with_items:
+ - present
+ - active
diff --git a/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml b/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml
index 833586ffa..e0afc43ba 100644
--- a/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml
+++ b/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml
@@ -39,14 +39,14 @@
file:
dest: '{{ libvirt_storage_pool_path }}/{{ item }}_configdrive/'
state: directory
- with_items: instances
+ with_items: '{{ instances }}'
- name: Create the cloud-init config drive files
template:
src: '{{ item[1] }}'
dest: '{{ libvirt_storage_pool_path }}/{{ item[0] }}_configdrive/{{ item[1] }}'
with_nested:
- - instances
+ - '{{ instances }}'
- [ user-data, meta-data ]
- name: Create the cloud-init config drive
@@ -54,18 +54,18 @@
args:
chdir: '{{ libvirt_storage_pool_path }}/{{ item }}_configdrive/'
creates: '{{ libvirt_storage_pool_path }}/{{ item }}_cloud-init.iso'
- with_items: instances
+ with_items: '{{ instances }}'
- name: Refresh the libvirt storage pool for openshift
command: 'virsh -c {{ libvirt_uri }} pool-refresh {{ libvirt_storage_pool }}'
- name: Create VM drives
command: 'virsh -c {{ libvirt_uri }} vol-create-as {{ libvirt_storage_pool }} {{ item }}.qcow2 10G --format qcow2 --backing-vol {{ image_name }} --backing-vol-format qcow2'
- with_items: instances
+ with_items: '{{ instances }}'
- name: Create VM docker drives
command: 'virsh -c {{ libvirt_uri }} vol-create-as {{ libvirt_storage_pool }} {{ item }}-docker.qcow2 10G --format qcow2 --allocation 0'
- with_items: instances
+ with_items: '{{ instances }}'
- name: Create VMs
virt:
@@ -73,14 +73,14 @@
command: define
xml: "{{ lookup('template', '../templates/domain.xml') }}"
uri: '{{ libvirt_uri }}'
- with_items: instances
+ with_items: '{{ instances }}'
- name: Start VMs
virt:
name: '{{ item }}'
state: running
uri: '{{ libvirt_uri }}'
- with_items: instances
+ with_items: '{{ instances }}'
- name: Wait for the VMs to get an IP
shell: 'virsh -c {{ libvirt_uri }} net-dhcp-leases {{ libvirt_network }} | egrep -c ''{{ instances | join("|") }}'''
@@ -93,7 +93,7 @@
- name: Collect IP addresses of the VMs
shell: 'virsh -c {{ libvirt_uri }} net-dhcp-leases {{ libvirt_network }} | awk ''$6 == "{{ item }}" {gsub(/\/.*/, "", $5); print $5}'''
register: scratch_ip
- with_items: instances
+ with_items: '{{ instances }}'
- set_fact:
ips: "{{ scratch_ip.results | default([]) | oo_collect('stdout') }}"
@@ -117,14 +117,14 @@
groups: "tag_environment-{{ cluster_env }}, tag_host-type-{{ type }}, tag_sub-host-type-{{ g_sub_host_type }}, tag_clusterid-{{ cluster_id }}"
openshift_node_labels: "{{ node_label }}"
with_together:
- - instances
- - ips
+ - '{{ instances }}'
+ - '{{ ips }}'
- name: Wait for ssh
wait_for:
host: '{{ item }}'
port: 22
- with_items: ips
+ with_items: '{{ ips }}'
- name: Wait for openshift user setup
command: 'ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null openshift@{{ item.1 }} echo openshift user is setup'
@@ -133,5 +133,5 @@
retries: 30
delay: 1
with_together:
- - instances
- - ips
+ - '{{ instances }}'
+ - '{{ ips }}'
diff --git a/playbooks/libvirt/openshift-cluster/templates/domain.xml b/playbooks/libvirt/openshift-cluster/templates/domain.xml
index 8e96cec8d..88504a5f6 100644
--- a/playbooks/libvirt/openshift-cluster/templates/domain.xml
+++ b/playbooks/libvirt/openshift-cluster/templates/domain.xml
@@ -19,6 +19,9 @@
<apic/>
<pae/>
</features>
+ <cpu mode='host-model'>
+ <model fallback='allow'/>
+ </cpu>
<clock offset='utc'>
<timer name='rtc' tickpolicy='catchup'/>
<timer name='pit' tickpolicy='delay'/>
@@ -30,22 +33,22 @@
<devices>
<emulator>/usr/bin/qemu-system-x86_64</emulator>
<disk type='file' device='disk'>
- <driver name='qemu' type='qcow2'/>
+ <driver name='qemu' type='qcow2' discard='unmap'/>
<source file='{{ libvirt_storage_pool_path }}/{{ item }}.qcow2'/>
- <target dev='vda' bus='virtio'/>
+ <target dev='sda' bus='scsi'/>
</disk>
<disk type='file' device='disk'>
- <driver name='qemu' type='qcow2'/>
+ <driver name='qemu' type='qcow2' discard='unmap'/>
<source file='{{ libvirt_storage_pool_path }}/{{ item }}-docker.qcow2'/>
- <target dev='vdb' bus='virtio'/>
+ <target dev='sdb' bus='scsi'/>
</disk>
<disk type='file' device='cdrom'>
<driver name='qemu' type='raw'/>
<source file='{{ libvirt_storage_pool_path }}/{{ item }}_cloud-init.iso'/>
- <target dev='vdc' bus='virtio'/>
+ <target dev='sdc' bus='scsi'/>
<readonly/>
</disk>
- <controller type='usb' index='0' />
+ <controller type='scsi' model='virtio-scsi' />
<interface type='network'>
<source network='{{ libvirt_network }}'/>
<model type='virtio'/>
@@ -56,17 +59,6 @@
<console type='pty'>
<target type='serial' port='0'/>
</console>
- <channel type='spicevmc'>
- <target type='virtio' name='com.redhat.spice.0'/>
- </channel>
- <input type='tablet' bus='usb' />
- <input type='mouse' bus='ps2'/>
- <graphics type='spice' autoport='yes' />
- <video>
- <model type='qxl' ram='65536' vram='65536' vgamem='16384' heads='1'/>
- </video>
- <redirdev bus='usb' type='spicevmc'>
- </redirdev>
<memballoon model='virtio'>
</memballoon>
</devices>
diff --git a/playbooks/libvirt/openshift-cluster/templates/network.xml b/playbooks/libvirt/openshift-cluster/templates/network.xml
index 050bc7ab9..0ce2a8342 100644
--- a/playbooks/libvirt/openshift-cluster/templates/network.xml
+++ b/playbooks/libvirt/openshift-cluster/templates/network.xml
@@ -1,5 +1,5 @@
<network>
- <name>openshift-ansible</name>
+ <name>{{ libvirt_network }}</name>
<forward mode='nat'>
<nat>
<port start='1024' end='65535'/>
diff --git a/playbooks/libvirt/openshift-cluster/templates/storage-pool.xml b/playbooks/libvirt/openshift-cluster/templates/storage-pool.xml
new file mode 100644
index 000000000..da139afd0
--- /dev/null
+++ b/playbooks/libvirt/openshift-cluster/templates/storage-pool.xml
@@ -0,0 +1,6 @@
+<pool type='dir'>
+ <name>{{ libvirt_storage_pool }}</name>
+ <target>
+ <path>{{ libvirt_storage_pool_path }}</path>
+ </target>
+</pool>
diff --git a/playbooks/libvirt/openshift-cluster/templates/user-data b/playbooks/libvirt/openshift-cluster/templates/user-data
index 8b79940f4..fbcf7c886 100644
--- a/playbooks/libvirt/openshift-cluster/templates/user-data
+++ b/playbooks/libvirt/openshift-cluster/templates/user-data
@@ -5,7 +5,7 @@ hostname: {{ item[0] }}
fqdn: {{ item[0] }}.example.com
mounts:
-- [ vdb ]
+- [ sdb ]
users:
- default
@@ -26,12 +26,18 @@ write_files:
permissions: 440
content: |
Defaults:openshift !requiretty
- - content: |
- DEVS=/dev/vdb
- VG=docker_vg
- path: /etc/sysconfig/docker-storage-setup
+ - path: /etc/sysconfig/docker-storage-setup
owner: root:root
permissions: '0644'
+ content: |
+ DEVS=/dev/sdb
+ VG=docker_vg
+ EXTRA_DOCKER_STORAGE_OPTIONS='--storage-opt dm.blkdiscard=true'
+ - path: /etc/systemd/system/fstrim.timer.d/hourly.conf
+ content: |
+ [Timer]
+ OnCalendar=hourly
runcmd:
- NETWORK_CONFIG=/etc/sysconfig/network-scripts/ifcfg-eth0; if ! grep DHCP_HOSTNAME ${NETWORK_CONFIG}; then echo 'DHCP_HOSTNAME="{{ item[0] }}.example.com"' >> ${NETWORK_CONFIG}; fi; pkill -9 dhclient; service network restart
+ - systemctl enable --now fstrim.timer
diff --git a/playbooks/libvirt/openshift-cluster/terminate.yml b/playbooks/libvirt/openshift-cluster/terminate.yml
index baef911f9..df5c52f2d 100644
--- a/playbooks/libvirt/openshift-cluster/terminate.yml
+++ b/playbooks/libvirt/openshift-cluster/terminate.yml
@@ -15,7 +15,7 @@
groups: oo_hosts_to_terminate
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
ansible_become: "{{ deployment_vars[deployment_type].become }}"
- with_items: groups[cluster_group] | default([])
+ with_items: '{{ groups[cluster_group] | default([]) }}'
- name: Unsubscribe VMs
hosts: oo_hosts_to_terminate
@@ -42,30 +42,30 @@
command: '{{ item[1] }}'
uri: '{{ libvirt_uri }}'
with_nested:
- - groups['oo_hosts_to_terminate']
+ - "{{ groups['oo_hosts_to_terminate'] }}"
- [ destroy, undefine ]
- name: Delete VM drives
command: 'virsh -c {{ libvirt_uri }} vol-delete --pool {{ libvirt_storage_pool }} {{ item }}.qcow2'
args:
removes: '{{ libvirt_storage_pool_path }}/{{ item }}.qcow2'
- with_items: groups['oo_hosts_to_terminate']
+ with_items: "{{ groups['oo_hosts_to_terminate'] }}"
- name: Delete VM docker drives
command: 'virsh -c {{ libvirt_uri }} vol-delete --pool {{ libvirt_storage_pool }} {{ item }}-docker.qcow2'
args:
removes: '{{ libvirt_storage_pool_path }}/{{ item }}-docker.qcow2'
- with_items: groups['oo_hosts_to_terminate']
+ with_items: "{{ groups['oo_hosts_to_terminate'] }}"
- name: Delete the VM cloud-init image
file:
path: '{{ libvirt_storage_pool_path }}/{{ item }}_cloud-init.iso'
state: absent
- with_items: groups['oo_hosts_to_terminate']
+ with_items: "{{ groups['oo_hosts_to_terminate'] }}"
- name: Remove the cloud-init config directory
file:
path: '{{ libvirt_storage_pool_path }}/{{ item }}_configdrive/'
state: absent
- with_items: groups['oo_hosts_to_terminate']
+ with_items: "{{ groups['oo_hosts_to_terminate'] }}"
diff --git a/playbooks/libvirt/openshift-cluster/update.yml b/playbooks/libvirt/openshift-cluster/update.yml
index 28362c984..a152135fc 100644
--- a/playbooks/libvirt/openshift-cluster/update.yml
+++ b/playbooks/libvirt/openshift-cluster/update.yml
@@ -7,7 +7,7 @@
- add_host:
name: "{{ item }}"
groups: l_oo_all_hosts
- with_items: g_all_hosts
+ with_items: '{{ g_all_hosts }}'
- hosts: l_oo_all_hosts
gather_facts: no
@@ -30,7 +30,7 @@
groups: oo_hosts_to_update
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
ansible_become: "{{ deployment_vars[deployment_type].become }}"
- with_items: g_all_hosts | default([])
+ with_items: '{{ g_all_hosts | default([]) }}'
- include: ../../common/openshift-cluster/update_repos_and_packages.yml
diff --git a/playbooks/openstack/openshift-cluster/config.yml b/playbooks/openstack/openshift-cluster/config.yml
index 6e4f414d6..f6550b2c4 100644
--- a/playbooks/openstack/openshift-cluster/config.yml
+++ b/playbooks/openstack/openshift-cluster/config.yml
@@ -1,4 +1,6 @@
---
+- include: ../../common/openshift-cluster/verify_ansible_version.yml
+
- hosts: localhost
gather_facts: no
tasks:
@@ -7,7 +9,7 @@
- add_host:
name: "{{ item }}"
groups: l_oo_all_hosts
- with_items: g_all_hosts
+ with_items: "{{ g_all_hosts | default([]) }}"
- hosts: l_oo_all_hosts
gather_facts: no
@@ -23,9 +25,8 @@
openshift_cluster_id: "{{ cluster_id }}"
openshift_debug_level: "{{ debug_level }}"
openshift_deployment_type: "{{ deployment_type }}"
- openshift_registry_selector: 'type=infra'
+ openshift_hosted_registry_selector: 'type=infra'
openshift_hosted_router_selector: 'type=infra'
- openshift_infra_nodes: "{{ g_infra_hosts }}"
openshift_master_cluster_method: 'native'
openshift_use_openshift_sdn: "{{ lookup('oo_option', 'use_openshift_sdn') }}"
os_sdn_network_plugin_name: "{{ lookup('oo_option', 'sdn_network_plugin_name') }}"
diff --git a/playbooks/openstack/openshift-cluster/dns.yml b/playbooks/openstack/openshift-cluster/dns.yml
index 31113d5f0..446a1846f 100644
--- a/playbooks/openstack/openshift-cluster/dns.yml
+++ b/playbooks/openstack/openshift-cluster/dns.yml
@@ -35,6 +35,11 @@
- vars.yml
- cluster_hosts.yml
roles:
+ # Explicitly calling openshift_facts because it appears that when
+ # rhel_subscribe is skipped that the openshift_facts dependency for
+ # openshift_repos is also skipped (this is the case at least for Ansible
+ # 2.0.2)
+ - openshift_facts
- role: rhel_subscribe
when: deployment_type in ["enterprise", "atomic-enterprise", "openshift-enterprise"] and
ansible_distribution == "RedHat" and
diff --git a/playbooks/openstack/openshift-cluster/files/heat_stack.yaml b/playbooks/openstack/openshift-cluster/files/heat_stack.yaml
index 422e6dafe..458cf5ac7 100644
--- a/playbooks/openstack/openshift-cluster/files/heat_stack.yaml
+++ b/playbooks/openstack/openshift-cluster/files/heat_stack.yaml
@@ -42,6 +42,12 @@ parameters:
description: Source of legitimate ssh connections
default: 0.0.0.0/0
+ node_port_incoming:
+ type: string
+ label: Source of node port connections
+ description: Authorized sources targetting node ports
+ default: 0.0.0.0/0
+
num_etcd:
type: number
label: Number of etcd nodes
@@ -280,6 +286,10 @@ resources:
port_range_max: 8443
- direction: ingress
protocol: tcp
+ port_range_min: 8444
+ port_range_max: 8444
+ - direction: ingress
+ protocol: tcp
port_range_min: 53
port_range_max: 53
- direction: ingress
@@ -302,6 +312,22 @@ resources:
protocol: udp
port_range_min: 24224
port_range_max: 24224
+ - direction: ingress
+ protocol: tcp
+ port_range_min: 2224
+ port_range_max: 2224
+ - direction: ingress
+ protocol: udp
+ port_range_min: 5404
+ port_range_max: 5404
+ - direction: ingress
+ protocol: udp
+ port_range_min: 5405
+ port_range_max: 5405
+ - direction: ingress
+ protocol: tcp
+ port_range_min: 9090
+ port_range_max: 9090
etcd-secgrp:
type: OS::Neutron::SecurityGroup
@@ -359,10 +385,25 @@ resources:
port_range_max: 10250
remote_mode: remote_group_id
- direction: ingress
+ protocol: tcp
+ port_range_min: 10255
+ port_range_max: 10255
+ remote_mode: remote_group_id
+ - direction: ingress
+ protocol: udp
+ port_range_min: 10255
+ port_range_max: 10255
+ remote_mode: remote_group_id
+ - direction: ingress
protocol: udp
port_range_min: 4789
port_range_max: 4789
remote_mode: remote_group_id
+ - direction: ingress
+ protocol: tcp
+ port_range_min: 30000
+ port_range_max: 32767
+ remote_ip_prefix: { get_param: node_port_incoming }
infra-secgrp:
type: OS::Neutron::SecurityGroup
diff --git a/playbooks/openstack/openshift-cluster/launch.yml b/playbooks/openstack/openshift-cluster/launch.yml
index b9aae2f4c..5cf543204 100644
--- a/playbooks/openstack/openshift-cluster/launch.yml
+++ b/playbooks/openstack/openshift-cluster/launch.yml
@@ -33,6 +33,7 @@
-P external_net={{ openstack_network_external_net }}
-P ssh_public_key="{{ openstack_ssh_public_key }}"
-P ssh_incoming={{ openstack_ssh_access_from }}
+ -P node_port_incoming={{ openstack_node_port_access_from }}
-P num_etcd={{ num_etcd }}
-P num_masters={{ num_masters }}
-P num_nodes={{ num_nodes }}
@@ -48,6 +49,8 @@
-P infra_flavor={{ openstack_flavor["infra"] }}
-P dns_flavor={{ openstack_flavor["dns"] }}
openshift-ansible-{{ cluster_id }}-stack'
+ args:
+ chdir: '{{ playbook_dir }}'
- name: Wait for OpenStack Stack readiness
shell: 'heat stack-show openshift-ansible-{{ cluster_id }}-stack | awk ''$2 == "stack_status" {print $4}'''
@@ -107,9 +110,9 @@
openshift_node_labels:
type: "etcd"
with_together:
- - parsed_outputs.etcd_names
- - parsed_outputs.etcd_ips
- - parsed_outputs.etcd_floating_ips
+ - '{{ parsed_outputs.etcd_names }}'
+ - '{{ parsed_outputs.etcd_ips }}'
+ - '{{ parsed_outputs.etcd_floating_ips }}'
- name: Add new master instances groups and variables
add_host:
@@ -121,9 +124,9 @@
openshift_node_labels:
type: "master"
with_together:
- - parsed_outputs.master_names
- - parsed_outputs.master_ips
- - parsed_outputs.master_floating_ips
+ - '{{ parsed_outputs.master_names }}'
+ - '{{ parsed_outputs.master_ips }}'
+ - '{{ parsed_outputs.master_floating_ips }}'
- name: Add new node instances groups and variables
add_host:
@@ -135,9 +138,9 @@
openshift_node_labels:
type: "compute"
with_together:
- - parsed_outputs.node_names
- - parsed_outputs.node_ips
- - parsed_outputs.node_floating_ips
+ - '{{ parsed_outputs.node_names }}'
+ - '{{ parsed_outputs.node_ips }}'
+ - '{{ parsed_outputs.node_floating_ips }}'
- name: Add new infra instances groups and variables
add_host:
@@ -149,9 +152,9 @@
openshift_node_labels:
type: "infra"
with_together:
- - parsed_outputs.infra_names
- - parsed_outputs.infra_ips
- - parsed_outputs.infra_floating_ips
+ - '{{ parsed_outputs.infra_names }}'
+ - '{{ parsed_outputs.infra_ips }}'
+ - '{{ parsed_outputs.infra_floating_ips }}'
- name: Add DNS groups and variables
add_host:
@@ -166,10 +169,10 @@
host: '{{ item }}'
port: 22
with_flattened:
- - parsed_outputs.master_floating_ips
- - parsed_outputs.node_floating_ips
- - parsed_outputs.infra_floating_ips
- - parsed_outputs.dns_floating_ip
+ - '{{ parsed_outputs.master_floating_ips }}'
+ - '{{ parsed_outputs.node_floating_ips }}'
+ - '{{ parsed_outputs.infra_floating_ips }}'
+ - '{{ parsed_outputs.dns_floating_ip }}'
- name: Wait for user setup
command: 'ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null {{ deployment_vars[deployment_type].ssh_user }}@{{ item }} echo {{ deployment_vars[deployment_type].ssh_user }} user is setup'
@@ -178,10 +181,10 @@
retries: 30
delay: 1
with_flattened:
- - parsed_outputs.master_floating_ips
- - parsed_outputs.node_floating_ips
- - parsed_outputs.infra_floating_ips
- - parsed_outputs.dns_floating_ip
+ - '{{ parsed_outputs.master_floating_ips }}'
+ - '{{ parsed_outputs.node_floating_ips }}'
+ - '{{ parsed_outputs.infra_floating_ips }}'
+ - '{{ parsed_outputs.dns_floating_ip }}'
- include: update.yml
diff --git a/playbooks/openstack/openshift-cluster/list.yml b/playbooks/openstack/openshift-cluster/list.yml
index ba9c6bf9c..60372e262 100644
--- a/playbooks/openstack/openshift-cluster/list.yml
+++ b/playbooks/openstack/openshift-cluster/list.yml
@@ -17,7 +17,7 @@
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
ansible_ssh_host: "{{ hostvars[item].ansible_ssh_host | default(item) }}"
ansible_become: "{{ deployment_vars[deployment_type].become }}"
- with_items: groups[scratch_group] | default([]) | difference(['localhost'])
+ with_items: "{{ groups[scratch_group] | default([]) | difference(['localhost']) }}"
- name: List Hosts
hosts: oo_list_hosts
diff --git a/playbooks/openstack/openshift-cluster/terminate.yml b/playbooks/openstack/openshift-cluster/terminate.yml
index 5bd8476f1..980ab7337 100644
--- a/playbooks/openstack/openshift-cluster/terminate.yml
+++ b/playbooks/openstack/openshift-cluster/terminate.yml
@@ -11,7 +11,7 @@
groups: oo_hosts_to_terminate
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
ansible_become: "{{ deployment_vars[deployment_type].become }}"
- with_items: (groups['tag_environment_' ~ cluster_env]|default([])) | intersect(groups['tag_clusterid_' ~ cluster_id ]|default([]))
+ with_items: "{{ (groups['tag_environment_' ~ cluster_env]|default([])) | intersect(groups['tag_clusterid_' ~ cluster_id ]|default([])) }}"
- name: Unsubscribe VMs
hosts: oo_hosts_to_terminate
diff --git a/playbooks/openstack/openshift-cluster/vars.yml b/playbooks/openstack/openshift-cluster/vars.yml
index bc53a51b0..17063ef34 100644
--- a/playbooks/openstack/openshift-cluster/vars.yml
+++ b/playbooks/openstack/openshift-cluster/vars.yml
@@ -12,6 +12,8 @@ openstack_ssh_public_key: "{{ lookup('file', lookup('oo_option', 'public_k
default('~/.ssh/id_rsa.pub', True)) }}"
openstack_ssh_access_from: "{{ lookup('oo_option', 'ssh_from') |
default('0.0.0.0/0', True) }}"
+openstack_node_port_access_from: "{{ lookup('oo_option', 'node_port_from') |
+ default('0.0.0.0/0', True) }}"
openstack_flavor:
dns: "{{ lookup('oo_option', 'dns_flavor' ) | default('m1.small', True) }}"
etcd: "{{ lookup('oo_option', 'etcd_flavor' ) | default('m1.small', True) }}"