From 0c31d72be3bf32f848eedad9859a81ba858f8c8f Mon Sep 17 00:00:00 2001 From: Devan Goodwin Date: Wed, 1 Jun 2016 15:34:18 -0300 Subject: Docker 1.10 Upgrade Adds a separate playbook for Docker 1.10 upgrade that can be run standalone on a pre-existing 3.2 cluster. The upgrade will take each node out of rotation, and remove *all* containers and images on it, as this is reportedly faster and more storage efficient than performing the in place 1.10 upgrade. This process is integrated into the 3.1 to 3.2 upgrade process. Normal config playbooks now become 3.2 only, and require Docker 1.10. Users of older environments will have to use an appropriate openshift-ansible version. Config playbooks no longer are in the business of upgrading or downgrading docker. --- .../upgrades/docker/docker_upgrade.yml | 105 +++++++++++++++++++++ .../upgrades/docker/files/nuke_images.sh | 23 +++++ .../byo/openshift-cluster/upgrades/docker/roles | 1 + .../openshift-cluster/upgrades/docker/upgrade.yml | 29 ++++++ .../upgrades/v3_1_to_v3_2/upgrade.yml | 4 - .../upgrades/v3_1_to_v3_2/docker_upgrade.yml | 15 --- .../upgrades/v3_1_to_v3_2/upgrade.yml | 6 +- roles/docker/tasks/main.yml | 59 ++++++------ roles/etcd/tasks/main.yml | 12 +-- roles/openshift_docker_facts/tasks/main.yml | 11 --- roles/openshift_master/tasks/main.yml | 9 +- 11 files changed, 199 insertions(+), 75 deletions(-) create mode 100644 playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml create mode 100644 playbooks/byo/openshift-cluster/upgrades/docker/files/nuke_images.sh create mode 120000 playbooks/byo/openshift-cluster/upgrades/docker/roles create mode 100644 playbooks/byo/openshift-cluster/upgrades/docker/upgrade.yml delete mode 100644 playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/docker_upgrade.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml new file mode 100644 index 000000000..6c12e8245 --- /dev/null +++ b/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml @@ -0,0 +1,105 @@ + +- name: Check for appropriate Docker versions for 1.9.x to 1.10.x upgrade + hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config + roles: + - openshift_facts + tasks: + - name: Determine available Docker version + script: ../../../../common/openshift-cluster/upgrades/files/rpm_versions.sh docker + register: g_docker_version_result + when: not openshift.common.is_atomic | bool + + - name: Check if Docker is installed + command: rpm -q docker + register: pkg_check + failed_when: pkg_check.rc > 1 + changed_when: no + when: not openshift.common.is_atomic | bool + + - set_fact: + g_docker_version: "{{ g_docker_version_result.stdout | from_yaml }}" + when: not openshift.common.is_atomic | bool + + - name: Set fact if docker requires an upgrade + set_fact: + docker_upgrade: true + when: not openshift.common.is_atomic | bool and pkg_check.rc == 0 and g_docker_version.curr_version | version_compare('1.10','<') + + - fail: + msg: This playbook requires access to Docker 1.10 or later + when: g_docker_version.avail_version | default(g_docker_version.curr_version, true) | version_compare('1.10','<') + +# If a node fails, halt everything, the admin will need to clean up and we +# don't want to carry on, potentially taking out every node. The playbook can safely be re-run +# and will not take any action on a node already running 1.10+. +- name: Evacuate and upgrade nodes + hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config + serial: 1 + any_errors_fatal: true + tasks: + - debug: var=docker_upgrade + + - name: Prepare for Node evacuation + command: > + {{ openshift.common.admin_binary }} manage-node {{ openshift.common.hostname | lower }} --schedulable=false + delegate_to: "{{ groups.oo_first_master.0 }}" + when: docker_upgrade is defined and docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_config + +# TODO: skip all node evac stuff for non-nodes (i.e. separate containerized etcd hosts) + - name: Evacuate Node for Kubelet upgrade + command: > + {{ openshift.common.admin_binary }} manage-node {{ openshift.common.hostname | lower }} --evacuate --force + delegate_to: "{{ groups.oo_first_master.0 }}" + when: docker_upgrade is defined and docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_config + + - name: Stop containerized services + service: name={{ item }} state=stopped + with_items: + - "{{ openshift.common.service_type }}-master" + - "{{ openshift.common.service_type }}-master-api" + - "{{ openshift.common.service_type }}-master-controllers" + - "{{ openshift.common.service_type }}-node" + - etcd + - openvswitch + failed_when: false + when: docker_upgrade is defined and docker_upgrade | bool and openshift.common.is_containerized | bool + + - name: Remove all containers and images + script: files/nuke_images.sh docker + register: nuke_images_result + when: docker_upgrade is defined and docker_upgrade | bool + + - name: Upgrade Docker + command: "{{ ansible_pkg_mgr}} update -y docker" + register: docker_upgrade_result + when: docker_upgrade is defined and docker_upgrade | bool + + - name: Restart containerized services + service: name={{ item }} state=started + with_items: + - etcd + - openvswitch + - "{{ openshift.common.service_type }}-master" + - "{{ openshift.common.service_type }}-master-api" + - "{{ openshift.common.service_type }}-master-controllers" + - "{{ openshift.common.service_type }}-node" + failed_when: false + when: docker_upgrade is defined and docker_upgrade | bool and openshift.common.is_containerized | bool + + - name: Wait for master API to come back online + become: no + local_action: + module: wait_for + host="{{ inventory_hostname }}" + state=started + delay=10 + port="{{ openshift.master.api_port }}" + when: docker_upgrade is defined and docker_upgrade | bool and inventory_hostname in groups.oo_masters_to_config + + - name: Set node schedulability + command: > + {{ openshift.common.admin_binary }} manage-node {{ openshift.common.hostname | lower }} --schedulable=true + delegate_to: "{{ groups.oo_first_master.0 }}" + when: openshift.node.schedulable | bool + when: docker_upgrade is defined and docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_config and openshift.node.schedulable | bool + diff --git a/playbooks/byo/openshift-cluster/upgrades/docker/files/nuke_images.sh b/playbooks/byo/openshift-cluster/upgrades/docker/files/nuke_images.sh new file mode 100644 index 000000000..9a5ee2276 --- /dev/null +++ b/playbooks/byo/openshift-cluster/upgrades/docker/files/nuke_images.sh @@ -0,0 +1,23 @@ +#!/bin/bash + +# Stop any running containers +running_container_count=`docker ps -q | wc -l` +if test $running_container_count -gt 0 +then + docker stop $(docker ps -q) +fi + +# Delete all containers +container_count=`docker ps -a -q | wc -l` +if test $container_count -gt 0 +then + docker rm -f -v $(docker ps -a -q) +fi + +# Delete all images (forcefully) +image_count=`docker images -q | wc -l` +if test $image_count -gt 0 +then + # Taken from: https://gist.github.com/brianclements/f72b2de8e307c7b56689#gistcomment-1443144 + docker rmi $(docker images | grep "$2/\|/$2 \| $2 \|$2 \|$2-\|$2_" | awk '{print $1 ":" $2}') 2>/dev/null || echo "No images matching \"$2\" left to purge." +fi diff --git a/playbooks/byo/openshift-cluster/upgrades/docker/roles b/playbooks/byo/openshift-cluster/upgrades/docker/roles new file mode 120000 index 000000000..6bc1a7aef --- /dev/null +++ b/playbooks/byo/openshift-cluster/upgrades/docker/roles @@ -0,0 +1 @@ +../../../../../roles \ No newline at end of file diff --git a/playbooks/byo/openshift-cluster/upgrades/docker/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/docker/upgrade.yml new file mode 100644 index 000000000..0f86abd89 --- /dev/null +++ b/playbooks/byo/openshift-cluster/upgrades/docker/upgrade.yml @@ -0,0 +1,29 @@ +# Playbook to upgrade Docker to the max allowable version for an OpenShift cluster. +# +# Currently only supports upgrading 1.9.x to >= 1.10.x. +- hosts: localhost + connection: local + become: no + gather_facts: no + tasks: + - include_vars: ../../cluster_hosts.yml + - add_host: + name: "{{ item }}" + groups: l_oo_all_hosts + with_items: g_all_hosts | default([]) + changed_when: false + +- hosts: l_oo_all_hosts + gather_facts: no + tasks: + - include_vars: ../../cluster_hosts.yml + +- include: ../../../../common/openshift-cluster/evaluate_groups.yml + vars: + # Do not allow adding hosts during upgrade. + g_new_master_hosts: [] + g_new_node_hosts: [] + openshift_cluster_id: "{{ cluster_id | default('default') }}" + openshift_deployment_type: "{{ deployment_type }}" + +- include: docker_upgrade.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_1_to_v3_2/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_1_to_v3_2/upgrade.yml index 24617620b..8c89e118e 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_1_to_v3_2/upgrade.yml +++ b/playbooks/byo/openshift-cluster/upgrades/v3_1_to_v3_2/upgrade.yml @@ -4,10 +4,6 @@ become: no gather_facts: no tasks: - - name: Verify Ansible version is greater than or equal to 1.9.4 and less than 2.0 - fail: - msg: "Unsupported ansible version: {{ ansible_version }} found." - when: ansible_version.full | version_compare('1.9.4', 'lt') or ansible_version.full | version_compare('2.0', 'ge') - include_vars: ../../../../byo/openshift-cluster/cluster_hosts.yml - add_host: name: "{{ item }}" diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/docker_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/docker_upgrade.yml deleted file mode 100644 index 9ade795f2..000000000 --- a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/docker_upgrade.yml +++ /dev/null @@ -1,15 +0,0 @@ -- name: Check if Docker is installed - command: rpm -q docker - register: pkg_check - failed_when: pkg_check.rc > 1 - changed_when: no - -- name: Upgrade Docker - command: "{{ ansible_pkg_mgr}} update -y docker" - when: pkg_check.rc == 0 and g_docker_version.curr_version | version_compare('1.9','<') - register: docker_upgrade - -- name: Restart Docker - command: systemctl restart docker - when: docker_upgrade | changed - diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/upgrade.yml index c93bf2a17..156e80c0f 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/upgrade.yml @@ -3,13 +3,13 @@ # The restart playbook should be run after this playbook completes. ############################################################################### -- name: Upgrade docker +- include: ../../../../byo/openshift-cluster/upgrades/docker/docker_upgrade.yml + +- name: Update Docker facts hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config roles: - openshift_facts tasks: - - include: docker_upgrade.yml - when: not openshift.common.is_atomic | bool - name: Set post docker install facts openshift_facts: role: "{{ item.role }}" diff --git a/roles/docker/tasks/main.yml b/roles/docker/tasks/main.yml index b9b2666fb..f002dbc01 100644 --- a/roles/docker/tasks/main.yml +++ b/roles/docker/tasks/main.yml @@ -1,41 +1,42 @@ --- -# tasks file for docker - -- name: Get current installed version if docker_version is specified +# Going forward we require Docker 1.10 or greater. If the user has a lesser version installed they must run a separate upgrade process. +- name: Get current installed Docker version command: "{{ repoquery_cmd }} --installed --qf '%{version}' docker" - when: not openshift.common.is_atomic | bool and docker_version != '' - register: docker_version_result + when: not openshift.common.is_atomic | bool + register: curr_docker_version changed_when: false -- stat: path=/etc/sysconfig/docker-storage - register: docker_storage_check +# TODO: The use of upgrading var will be removed in the coming upgrade refactor. This is a temporary +# fix to wory around the fact that right now, this role is called during upgrade, before we're +# ready to upgrade Docker. +- name: Fail if Docker upgrade is required + fail: + msg: "Docker {{ curr_docker_version.stdout }} must be upgraded to Docker 1.10 or greater" + when: not upgrading | bool and not curr_docker_version | skipped and curr_docker_version.stdout | default('0.0', True) | version_compare('1.10', '<') -- name: Remove deferred deletion for downgrades from 1.9 +- name: Get latest available version of Docker command: > - sed -i 's/--storage-opt dm.use_deferred_deletion=true//' /etc/sysconfig/docker-storage - when: docker_storage_check.stat.exists | bool and not docker_version_result | skipped and docker_version_result.stdout | default('0.0', True) | version_compare('1.9', '>=') and docker_version | version_compare('1.9', '<') - -- name: Downgrade docker if necessary - command: "{{ ansible_pkg_mgr }} swap -y docker-* docker-*{{ docker_version }}" - register: docker_downgrade_result - when: not docker_version_result | skipped and docker_version_result.stdout | default('0.0', True) | version_compare(docker_version, 'gt') + {{ repoquery_cmd }} --qf '%{version}' "docker" + register: avail_docker_version + failed_when: false + changed_when: false + when: not openshift.common.is_atomic | bool -- name: Install docker - action: "{{ ansible_pkg_mgr }} name=docker{{ '-' + docker_version if docker_version is defined and docker_version != '' else '' }} state=present" - when: not openshift.common.is_atomic | bool and docker_downgrade_result | skipped +- name: Verify Docker >= 1.10 is available + fail: + msg: "Docker {{ avail_docker_version.stdout }} is available, but 1.10 or greater is required" + when: not avail_docker_version | skipped and avail_docker_version.stdout | default('0.0', True) | version_compare('1.10', '<') -# If docker were enabled and started before we downgraded it may have entered a -# failed state. Check for that and clear it if necessary. -- name: Check that docker hasn't entered failed state - command: systemctl show docker - register: docker_state - changed_when: False +- stat: path=/etc/sysconfig/docker-storage + register: docker_storage_check -- name: Reset docker service state - command: systemctl reset-failed docker.service - when: " 'ActiveState=failed' in docker_state.stdout " +# Make sure Docker is installed, but does not update a running version. +# Docker upgrades are handled by a separate playbook. +- name: Install Docker + action: "{{ ansible_pkg_mgr }} name=docker state=present" + when: not openshift.common.is_atomic | bool -- name: Start the docker service +- name: Start the Docker service service: name: docker enabled: yes @@ -86,7 +87,7 @@ notify: - restart docker -- name: Set various docker options +- name: Set various Docker options lineinfile: dest: /etc/sysconfig/docker regexp: '^OPTIONS=.*$' diff --git a/roles/etcd/tasks/main.yml b/roles/etcd/tasks/main.yml index 71735dc25..a798dc973 100644 --- a/roles/etcd/tasks/main.yml +++ b/roles/etcd/tasks/main.yml @@ -28,18 +28,18 @@ state: directory mode: 0700 -- name: Check for etcd service presence - command: systemctl show etcd.service - register: etcd_show - changed_when: false - - name: Disable system etcd when containerized - when: etcd_is_containerized | bool and 'LoadState=not-found' not in etcd_show.stdout + when: etcd_is_containerized | bool service: name: etcd state: stopped enabled: no +- name: Check for etcd service presence + command: systemctl show etcd.service + register: etcd_show + changed_when: false + - name: Mask system etcd when containerized when: etcd_is_containerized | bool and 'LoadState=not-found' not in etcd_show.stdout command: systemctl mask etcd diff --git a/roles/openshift_docker_facts/tasks/main.yml b/roles/openshift_docker_facts/tasks/main.yml index 43359dcb5..cdea90413 100644 --- a/roles/openshift_docker_facts/tasks/main.yml +++ b/roles/openshift_docker_facts/tasks/main.yml @@ -57,14 +57,3 @@ l_common_version: "{{ common_version.stdout | default('0.0', True) }}" when: not openshift.common.is_containerized | bool -- name: Set docker version to be installed - set_fact: - docker_version: "{{ '1.8.2' }}" - when: " ( l_common_version | version_compare('3.2','<') and openshift.common.service_type in ['openshift', 'atomic-openshift'] ) or - ( l_common_version | version_compare('1.1.4','<') and openshift.common.service_type == 'origin' )" - -- name: Set docker version to be installed - set_fact: - docker_version: "{{ '1.9.1' }}" - when: " ( l_common_version | version_compare('3.2','>') and openshift.common.service_type == 'atomic-openshift' ) or - ( l_common_version | version_compare('1.2','>') and openshift.common.service_type == 'origin' )" diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml index f70eaf144..63a54a0d9 100644 --- a/roles/openshift_master/tasks/main.yml +++ b/roles/openshift_master/tasks/main.yml @@ -164,14 +164,9 @@ register: start_result notify: Verify API Server -- name: Check for non-HA master service presence - command: systemctl show {{ openshift.common.service_type }}-master.service - register: master_svc_show - changed_when: false - -- name: Stop and disable non-HA master when running HA +- name: Stop and disable non HA master when running HA service: name={{ openshift.common.service_type }}-master enabled=no state=stopped - when: openshift_master_ha | bool and 'LoadState=not-found' not in master_svc_show.stdout + when: openshift_master_ha | bool - set_fact: master_service_status_changed: "{{ start_result | changed }}" -- cgit v1.2.1 From c45c5935df97bb4aa9e8e9067d868371b04a24bf Mon Sep 17 00:00:00 2001 From: Devan Goodwin Date: Mon, 6 Jun 2016 14:58:02 -0300 Subject: Fix Docker 1.10 problems with empty tags and trailing : Docker 1.10 is no longer tolerant of commands like "docker pull myimage:" when we do not have an image tag in play. Adjust all occurrences with one that only includes the : if a version is defined. Adjust the containerized CLI wrappers for a similar problem. --- roles/openshift_cli/tasks/main.yml | 2 +- roles/openshift_cli/templates/openshift.j2 | 10 +++++++--- roles/openshift_master/tasks/main.yml | 2 +- roles/openshift_node/tasks/main.yml | 4 ++-- 4 files changed, 11 insertions(+), 7 deletions(-) diff --git a/roles/openshift_cli/tasks/main.yml b/roles/openshift_cli/tasks/main.yml index bfa60e5b0..c0a712513 100644 --- a/roles/openshift_cli/tasks/main.yml +++ b/roles/openshift_cli/tasks/main.yml @@ -5,7 +5,7 @@ - name: Pull CLI Image command: > - docker pull {{ openshift.common.cli_image }}:{{ openshift_version }} + docker pull {{ openshift.common.cli_image }}{{ ':' + openshift_version if openshift_version is defined and openshift_version != '' else '' }} when: openshift.common.is_containerized | bool - name: Create /usr/local/bin/openshift cli wrapper diff --git a/roles/openshift_cli/templates/openshift.j2 b/roles/openshift_cli/templates/openshift.j2 index 437e08aab..8a3f3a257 100644 --- a/roles/openshift_cli/templates/openshift.j2 +++ b/roles/openshift_cli/templates/openshift.j2 @@ -5,14 +5,14 @@ fi cmd=`basename $0` user=`id -u` group=`id -g` -image_tag={{ openshift_version }} +image_tag="{{ openshift_version }}" >&2 echo """ ================================================================================ ATTENTION: You are running ${cmd} via a wrapper around 'docker run {{ openshift.common.cli_image }}:${image_tag}'. This wrapper is intended only to be used to bootstrap an environment. Please install client tools on another host once you have granted cluster-admin -privileges to a user. +privileges to a user. {% if openshift.common.deployment_type in ['openshift-enterprise','atomic-enterprise'] %} See https://docs.openshift.com/enterprise/latest/cli_reference/get_started_cli.html {% else %} @@ -21,4 +21,8 @@ See https://docs.openshift.org/latest/cli_reference/get_started_cli.html ================================================================================= """ -docker run -i --privileged --net=host --user=${user}:${group} -v ~/.kube:/root/.kube -v /tmp:/tmp -v {{ openshift.common.config_base}}:{{ openshift.common.config_base }} -e KUBECONFIG=/root/.kube/config --entrypoint ${cmd} --rm {{ openshift.common.cli_image }}:${image_tag} "${@}" +if [ -n "$image_tag" ]; then + image_tag=":$image_tag" +fi + +docker run -i --privileged --net=host --user=${user}:${group} -v ~/.kube:/root/.kube -v /tmp:/tmp -v {{ openshift.common.config_base}}:{{ openshift.common.config_base }} -e KUBECONFIG=/root/.kube/config --entrypoint ${cmd} --rm {{ openshift.common.cli_image }}${image_tag} "${@}" diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml index 63a54a0d9..e9a9c4251 100644 --- a/roles/openshift_master/tasks/main.yml +++ b/roles/openshift_master/tasks/main.yml @@ -29,7 +29,7 @@ - name: Pull master image command: > - docker pull {{ openshift.master.master_image }}:{{ openshift_version }} + docker pull {{ openshift.master.master_image }}{{ ':' + openshift_version if openshift_version is defined and openshift_version != '' else '' }} when: openshift.common.is_containerized | bool - name: Create openshift.common.data_dir diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml index e8bd13855..657e99e87 100644 --- a/roles/openshift_node/tasks/main.yml +++ b/roles/openshift_node/tasks/main.yml @@ -39,12 +39,12 @@ - name: Pull node image command: > - docker pull {{ openshift.node.node_image }}:{{ openshift_version }} + docker pull {{ openshift.node.node_image }}{{ ':' + openshift_version if openshift_version is defined and openshift_version != '' else '' }} when: openshift.common.is_containerized | bool - name: Pull OpenVSwitch image command: > - docker pull {{ openshift.node.ovs_image }}:{{ openshift_version }} + docker pull {{ openshift.node.ovs_image }}{{ ':' + openshift_version if openshift_version is defined and openshift_version != '' else '' }} when: openshift.common.is_containerized | bool and openshift.common.use_openshift_sdn | bool - name: Install the systemd units -- cgit v1.2.1 From 4bec37c10f835902a42b62493c5df0de97db73f8 Mon Sep 17 00:00:00 2001 From: Brenton Leanhardt Date: Wed, 8 Jun 2016 09:37:19 -0400 Subject: Bug 1338726 - never abort install if the latest version of docker is already installed --- roles/docker/tasks/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/docker/tasks/main.yml b/roles/docker/tasks/main.yml index f002dbc01..cafbdee4c 100644 --- a/roles/docker/tasks/main.yml +++ b/roles/docker/tasks/main.yml @@ -20,7 +20,7 @@ register: avail_docker_version failed_when: false changed_when: false - when: not openshift.common.is_atomic | bool + when: not curr_docker_version.stdout | default('0.0', True) | version_compare('1.10', '>=') and not openshift.common.is_atomic | bool - name: Verify Docker >= 1.10 is available fail: -- cgit v1.2.1 From 2d015f6197162da0b851a888cce5813aa056269e Mon Sep 17 00:00:00 2001 From: Pascal Bach Date: Thu, 9 Jun 2016 10:26:58 +0200 Subject: Add lower case proxy variables Some applications expect the *_PROXY variables to be lower case. To support this too inject them in addition to the upper case ones. Signed-off-by: Pascal Bach Reviewed-by: Fabio Huser --- roles/openshift_builddefaults/vars/main.yml | 6 ++++++ roles/openshift_master_facts/vars/main.yml | 6 ++++++ 2 files changed, 12 insertions(+) diff --git a/roles/openshift_builddefaults/vars/main.yml b/roles/openshift_builddefaults/vars/main.yml index 9727c73a5..bcdf68112 100644 --- a/roles/openshift_builddefaults/vars/main.yml +++ b/roles/openshift_builddefaults/vars/main.yml @@ -13,3 +13,9 @@ builddefaults_yaml: value: "{{ openshift.builddefaults.https_proxy | default('', true) }}" - name: NO_PROXY value: "{{ openshift.builddefaults.no_proxy | default('', true) | join(',') }}" + - name: http_proxy + value: "{{ openshift.builddefaults.http_proxy | default('', true) }}" + - name: https_proxy + value: "{{ openshift.builddefaults.https_proxy | default('', true) }}" + - name: no_proxy + value: "{{ openshift.builddefaults.no_proxy | default('', true) | join(',') }}" diff --git a/roles/openshift_master_facts/vars/main.yml b/roles/openshift_master_facts/vars/main.yml index 3b0ee2761..086d8340c 100644 --- a/roles/openshift_master_facts/vars/main.yml +++ b/roles/openshift_master_facts/vars/main.yml @@ -11,4 +11,10 @@ builddefaults_yaml: - name: HTTPS_PROXY value: "{{ openshift.master.builddefaults_https_proxy | default(omit, true) }}" - name: NO_PROXY + value: "{{ openshift.master.builddefaults_no_proxy | default(omit, true) | join(',') }}" + - name: http_proxy + value: "{{ openshift.master.builddefaults_http_proxy | default(omit, true) }}" + - name: https_proxy + value: "{{ openshift.master.builddefaults_https_proxy | default(omit, true) }}" + - name: no_proxy value: "{{ openshift.master.builddefaults_no_proxy | default(omit, true) | join(',') }}" \ No newline at end of file -- cgit v1.2.1 From dcff138a3230a0f23cb627901b2a28cc6bec2e6b Mon Sep 17 00:00:00 2001 From: Devan Goodwin Date: Thu, 9 Jun 2016 08:37:34 -0300 Subject: Restore mistakenly reverted code. --- .../byo/openshift-cluster/upgrades/v3_1_to_v3_2/upgrade.yml | 4 ++++ roles/etcd/tasks/main.yml | 12 ++++++------ roles/openshift_master/tasks/main.yml | 9 +++++++-- 3 files changed, 17 insertions(+), 8 deletions(-) diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_1_to_v3_2/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_1_to_v3_2/upgrade.yml index 8c89e118e..24617620b 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_1_to_v3_2/upgrade.yml +++ b/playbooks/byo/openshift-cluster/upgrades/v3_1_to_v3_2/upgrade.yml @@ -4,6 +4,10 @@ become: no gather_facts: no tasks: + - name: Verify Ansible version is greater than or equal to 1.9.4 and less than 2.0 + fail: + msg: "Unsupported ansible version: {{ ansible_version }} found." + when: ansible_version.full | version_compare('1.9.4', 'lt') or ansible_version.full | version_compare('2.0', 'ge') - include_vars: ../../../../byo/openshift-cluster/cluster_hosts.yml - add_host: name: "{{ item }}" diff --git a/roles/etcd/tasks/main.yml b/roles/etcd/tasks/main.yml index a798dc973..71735dc25 100644 --- a/roles/etcd/tasks/main.yml +++ b/roles/etcd/tasks/main.yml @@ -28,18 +28,18 @@ state: directory mode: 0700 +- name: Check for etcd service presence + command: systemctl show etcd.service + register: etcd_show + changed_when: false + - name: Disable system etcd when containerized - when: etcd_is_containerized | bool + when: etcd_is_containerized | bool and 'LoadState=not-found' not in etcd_show.stdout service: name: etcd state: stopped enabled: no -- name: Check for etcd service presence - command: systemctl show etcd.service - register: etcd_show - changed_when: false - - name: Mask system etcd when containerized when: etcd_is_containerized | bool and 'LoadState=not-found' not in etcd_show.stdout command: systemctl mask etcd diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml index f12371c23..28faee155 100644 --- a/roles/openshift_master/tasks/main.yml +++ b/roles/openshift_master/tasks/main.yml @@ -172,9 +172,14 @@ register: start_result notify: Verify API Server -- name: Stop and disable non HA master when running HA +- name: Check for non-HA master service presence + command: systemctl show {{ openshift.common.service_type }}-master.service + register: master_svc_show + changed_when: false + +- name: Stop and disable non-HA master when running HA service: name={{ openshift.common.service_type }}-master enabled=no state=stopped - when: openshift_master_ha | bool + when: openshift_master_ha | bool and 'LoadState=not-found' not in master_svc_show.stdout - set_fact: master_service_status_changed: "{{ start_result | changed }}" -- cgit v1.2.1 From 7a2a4d7603ddec410a9f045d2de374317520ced8 Mon Sep 17 00:00:00 2001 From: Scott Dodson Date: Thu, 9 Jun 2016 16:31:34 -0400 Subject: Automatic commit of package [openshift-ansible] release [3.3.0-1]. --- .tito/packages/openshift-ansible | 2 +- openshift-ansible.spec | 20 +++++++++++++++++++- 2 files changed, 20 insertions(+), 2 deletions(-) diff --git a/.tito/packages/openshift-ansible b/.tito/packages/openshift-ansible index 1c588ea84..f7cce4809 100644 --- a/.tito/packages/openshift-ansible +++ b/.tito/packages/openshift-ansible @@ -1 +1 @@ -3.0.97-1 ./ +3.3.0-1 ./ diff --git a/openshift-ansible.spec b/openshift-ansible.spec index 5089ddaaf..b44f4e28a 100644 --- a/openshift-ansible.spec +++ b/openshift-ansible.spec @@ -5,7 +5,7 @@ } Name: openshift-ansible -Version: 3.0.97 +Version: 3.3.0 Release: 1%{?dist} Summary: Openshift and Atomic Enterprise Ansible License: ASL 2.0 @@ -205,6 +205,24 @@ Atomic OpenShift Utilities includes %changelog +* Thu Jun 09 2016 Scott Dodson 3.3.0-1 +- Restore mistakenly reverted code. (dgoodwin@redhat.com) +- Add openshift_loadbalancer_facts role to set lb facts prior to running + dependencies. (abutcher@redhat.com) +- Bug 1338726 - never abort install if the latest version of docker is already + installed (bleanhar@redhat.com) +- Preserve proxy config if it's undefined (sdodson@redhat.com) +- At least backup things (sdodson@redhat.com) +- Use unique play names to make things easier to debug (sdodson@redhat.com) +- Ansible 2.1 support. (abutcher@redhat.com) +- add skydns port 8053 to openstack master sec group (jawed.khelil@amadeus.com) +- fix dns openstack flavor instead of openshift flavor + (jawed.khelil@amadeus.com) +- Fix Docker 1.10 problems with empty tags and trailing : (dgoodwin@redhat.com) +- ensure htpasswd file exists (tob@butter.sh) +- Docker 1.10 Upgrade (dgoodwin@redhat.com) +- Add flag to manage htpasswd, or not. (tob@butter.sh) + * Mon Jun 06 2016 Scott Dodson 3.0.97-1 - Only run node specific bits on nodes (sdodson@redhat.com) - Update main.yaml (detiber@gmail.com) -- cgit v1.2.1