summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.tito/packages/openshift-ansible2
-rwxr-xr-xbin/opssh1
-rwxr-xr-xbin/ossh5
-rw-r--r--docs/best_practices_guide.adoc67
-rw-r--r--filter_plugins/openshift_master.py28
-rw-r--r--openshift-ansible.spec43
-rw-r--r--playbooks/adhoc/uninstall.yml4
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_1_minor/README.md17
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_1_minor/upgrade.yml14
l---------playbooks/byo/openshift-master/filter_plugins1
l---------playbooks/byo/openshift-master/lookup_plugins1
-rw-r--r--playbooks/byo/openshift-master/restart.yml4
l---------playbooks/byo/openshift-master/roles1
l---------playbooks/common/openshift-cluster/upgrades/v3_1_minor/filter_plugins1
l---------playbooks/common/openshift-cluster/upgrades/v3_1_minor/library1
l---------playbooks/common/openshift-cluster/upgrades/v3_1_minor/lookup_plugins1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_1_minor/post.yml50
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_1_minor/pre.yml87
l---------playbooks/common/openshift-cluster/upgrades/v3_1_minor/roles1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_1_minor/upgrade.yml137
-rw-r--r--playbooks/common/openshift-master/config.yml1
-rw-r--r--playbooks/common/openshift-master/restart.yml141
-rw-r--r--playbooks/common/openshift-master/restart_hosts.yml39
-rw-r--r--playbooks/common/openshift-master/restart_hosts_pacemaker.yml25
-rw-r--r--playbooks/common/openshift-master/restart_services.yml27
-rw-r--r--playbooks/common/openshift-master/restart_services_pacemaker.yml10
-rw-r--r--playbooks/common/openshift-node/config.yml15
-rw-r--r--roles/openshift_facts/tasks/main.yml1
-rw-r--r--roles/openshift_master/handlers/main.yml13
-rw-r--r--roles/openshift_master/tasks/main.yml15
-rw-r--r--roles/openshift_node/tasks/main.yml15
-rw-r--r--roles/os_zabbix/tasks/main.yml4
-rw-r--r--utils/src/ooinstall/cli_installer.py4
-rw-r--r--utils/test/cli_installer_tests.py42
-rw-r--r--utils/test/fixture.py16
35 files changed, 777 insertions, 57 deletions
diff --git a/.tito/packages/openshift-ansible b/.tito/packages/openshift-ansible
index 271e86d20..d85882bf9 100644
--- a/.tito/packages/openshift-ansible
+++ b/.tito/packages/openshift-ansible
@@ -1 +1 @@
-3.0.28-1 ./
+3.0.32-1 ./
diff --git a/bin/opssh b/bin/opssh
index 3747bc993..7a2ffdb1d 100755
--- a/bin/opssh
+++ b/bin/opssh
@@ -16,6 +16,7 @@ Options:
-c CLUSTER, --cluster CLUSTER
which cluster to use
-e ENV, --env ENV which environment to use
+ --v3 When working with v3 environments. v2 by default
-t HOST_TYPE, --host-type HOST_TYPE
which host type to use
--list-host-types list all of the host types
diff --git a/bin/ossh b/bin/ossh
index 6519e4e08..5e2506638 100755
--- a/bin/ossh
+++ b/bin/ossh
@@ -72,6 +72,8 @@ class Ossh(object):
parser.add_argument('-o', '--ssh_opts', action='store',
help='options to pass to SSH.\n \
"-oForwardX11=yes,TCPKeepAlive=yes"')
+ parser.add_argument('-A', default=False, action="store_true",
+ help='Forward authentication agent')
parser.add_argument('host', nargs='?', default='')
self.args = parser.parse_args()
@@ -177,6 +179,9 @@ class Ossh(object):
if self.user:
ssh_args.append('-l%s' % self.user)
+ if self.args.A:
+ ssh_args.append('-A')
+
if self.args.verbose:
ssh_args.append('-vvv')
diff --git a/docs/best_practices_guide.adoc b/docs/best_practices_guide.adoc
index 6b744333c..267aa850d 100644
--- a/docs/best_practices_guide.adoc
+++ b/docs/best_practices_guide.adoc
@@ -13,9 +13,12 @@ This guide complies with https://www.ietf.org/rfc/rfc2119.txt[RFC2119].
== Pull Requests
+
+
+[[All-pull-requests-MUST-pass-the-build-bot-before-they-are-merged]]
[cols="2v,v"]
|===
-| **Rule**
+| <<All-pull-requests-MUST-pass-the-build-bot-before-they-are-merged, Rule>>
| All pull requests MUST pass the build bot *before* they are merged.
|===
@@ -30,9 +33,10 @@ The tooling is flexible enough that exceptions can be made so that the tool the
=== Python Source Files
'''
+[[Python-source-files-MUST-contain-the-following-vim-mode-line]]
[cols="2v,v"]
|===
-| **Rule**
+| <<Python-source-files-MUST-contain-the-following-vim-mode-line, Rule>>
| Python source files MUST contain the following vim mode line.
|===
@@ -48,9 +52,10 @@ If mode lines for other editors are needed, please open a GitHub issue.
=== Method Signatures
'''
+[[When-adding-a-new-paramemter-to-an-existing-method-a-default-value-SHOULD-be-used]]
[cols="2v,v"]
|===
-| **Rule**
+| <<When-adding-a-new-paramemter-to-an-existing-method-a-default-value-SHOULD-be-used, Rule>>
| When adding a new paramemter to an existing method, a default value SHOULD be used
|===
The purpose of this rule is to make it so that method signatures are backwards compatible.
@@ -74,18 +79,20 @@ def add_person(first_name, last_name, age=None):
http://www.pylint.org/[PyLint] is used in an attempt to keep the python code as clean and as managable as possible. The build bot runs each pull request through PyLint and any warnings or errors cause the build bot to fail the pull request.
'''
+[[PyLint-rules-MUST-NOT-be-disabled-on-a-whole-file]]
[cols="2v,v"]
|===
-| **Rule**
+| <<PyLint-rules-MUST-NOT-be-disabled-on-a-whole-file, Rule>>
| PyLint rules MUST NOT be disabled on a whole file.
|===
Instead, http://docs.pylint.org/faq.html#is-it-possible-to-locally-disable-a-particular-message[disable the PyLint check on the line where PyLint is complaining].
'''
+[[PyLint-rules-MUST-NOT-be-disabled-unless-they-meet-one-of-the-following-exceptions]]
[cols="2v,v"]
|===
-| **Rule**
+| <<PyLint-rules-MUST-NOT-be-disabled-unless-they-meet-one-of-the-following-exceptions, Rule>>
| PyLint rules MUST NOT be disabled unless they meet one of the following exceptions
|===
@@ -95,9 +102,10 @@ Instead, http://docs.pylint.org/faq.html#is-it-possible-to-locally-disable-a-par
1. When PyLint fails, but the code makes more sense the way it is formatted (stylistic exception). For this exception, the description of the PyLint disable MUST state why the code is more clear, AND the person reviewing the PR will decide if they agree or not. The reviewer may reject the PR if they disagree with the reason for the disable.
'''
+[[All-PyLint-rule-disables-MUST-be-documented-in-the-code]]
[cols="2v,v"]
|===
-| **Rule**
+| <<All-PyLint-rule-disables-MUST-be-documented-in-the-code, Rule>>
| All PyLint rule disables MUST be documented in the code.
|===
@@ -124,9 +132,10 @@ metadata[line] = results.pop()
=== Yaml Files (Playbooks, Roles, Vars, etc)
'''
+[[Ansible-files-SHOULD-NOT-use-JSON-use-pure-YAML-instead]]
[cols="2v,v"]
|===
-| **Rule**
+| <<Ansible-files-SHOULD-NOT-use-JSON-use-pure-YAML-instead, Rule>>
| Ansible files SHOULD NOT use JSON (use pure YAML instead).
|===
@@ -144,9 +153,10 @@ Every effort should be made to keep our Ansible YAML files in pure YAML.
=== Modules
'''
+[[Custom-Ansible-modules-SHOULD-be-embedded-in-a-role]]
[cols="2v,v"]
|===
-| **Rule**
+| <<Custom-Ansible-modules-SHOULD-be-embedded-in-a-role, Rule>>
| Custom Ansible modules SHOULD be embedded in a role.
|===
@@ -177,9 +187,10 @@ The purpose of this rule is to make it easy to include custom modules in our pla
'''
+[[Parameters-to-Ansible-modules-SHOULD-use-the-Yaml-dictionary-format-when-3-or-more-parameters-are-being-passed]]
[cols="2v,v"]
|===
-| **Rule**
+| <<Parameters-to-Ansible-modules-SHOULD-use-the-Yaml-dictionary-format-when-3-or-more-parameters-are-being-passed, Rule>>
| Parameters to Ansible modules SHOULD use the Yaml dictionary format when 3 or more parameters are being passed
|===
@@ -204,9 +215,10 @@ When a module has several parameters that are being passed in, it's hard to see
'''
+[[Parameters-to-Ansible-modules-SHOULD-use-the-Yaml-dictionary-format-when-the-line-length-exceeds-120-characters]]
[cols="2v,v"]
|===
-| **Rule**
+| <<Parameters-to-Ansible-modules-SHOULD-use-the-Yaml-dictionary-format-when-the-line-length-exceeds-120-characters, Rule>>
| Parameters to Ansible modules SHOULD use the Yaml dictionary format when the line length exceeds 120 characters
|===
@@ -228,9 +240,10 @@ Lines that are long quickly become a wall of text that isn't easily parsable. It
----
'''
+[[The-Ansible-command-module-SHOULD-be-used-instead-of-the-Ansible-shell-module]]
[cols="2v,v"]
|===
-| **Rule**
+| <<The-Ansible-command-module-SHOULD-be-used-instead-of-the-Ansible-shell-module, Rule>>
| The Ansible `command` module SHOULD be used instead of the Ansible `shell` module.
|===
.Context
@@ -251,9 +264,10 @@ The Ansible `shell` module can run most commands that can be run from a bash CLI
----
'''
+[[The-Ansible-quote-filter-MUST-be-used-with-any-variable-passed-into-the-shell-module]]
[cols="2v,v"]
|===
-| **Rule**
+| <<The-Ansible-quote-filter-MUST-be-used-with-any-variable-passed-into-the-shell-module, Rule>>
| The Ansible `quote` filter MUST be used with any variable passed into the shell module.
|===
.Context
@@ -279,9 +293,10 @@ It is recommended not to use the `shell` module. However, if it absolutely must
* http://docs.ansible.com/fail_module.html[Ansible Fail Module]
'''
+[[Ansible-playbooks-MUST-begin-with-checks-for-any-variables-that-they-require]]
[cols="2v,v"]
|===
-| **Rule**
+| <<Ansible-playbooks-MUST-begin-with-checks-for-any-variables-that-they-require, Rule>>
| Ansible playbooks MUST begin with checks for any variables that they require.
|===
@@ -299,9 +314,10 @@ If an Ansible playbook requires certain variables to be set, it's best to check
----
'''
+[[Ansible-roles-tasks-main-yml-file-MUST-begin-with-checks-for-any-variables-that-they-require]]
[cols="2v,v"]
|===
-| **Rule**
+| <<Ansible-roles-tasks-main-yml-file-MUST-begin-with-checks-for-any-variables-that-they-require, Rule>>
| Ansible roles tasks/main.yml file MUST begin with checks for any variables that they require.
|===
@@ -318,9 +334,10 @@ If an Ansible role requires certain variables to be set, it's best to check for
=== Tasks
'''
+[[Ansible-tasks-SHOULD-NOT-be-used-in-ansible-playbooks-Instead-use-pre_tasks-and-post_tasks]]
[cols="2v,v"]
|===
-| **Rule**
+| <<Ansible-tasks-SHOULD-NOT-be-used-in-ansible-playbooks-Instead-use-pre_tasks-and-post_tasks, Rule>>
| Ansible tasks SHOULD NOT be used in ansible playbooks. Instead, use pre_tasks and post_tasks.
|===
An Ansible play is defined as a Yaml dictionary. Because of that, ansible doesn't know if the play's tasks list or roles list was specified first. Therefore Ansible always runs tasks after roles.
@@ -370,9 +387,10 @@ Therefore, we SHOULD use pre_tasks and post_tasks to make it more clear when the
=== Roles
'''
+[[All-tasks-in-a-role-SHOULD-be-tagged-with-the-role-name]]
[cols="2v,v"]
|===
-| **Rule**
+| <<All-tasks-in-a-role-SHOULD-be-tagged-with-the-role-name, Rule>>
| All tasks in a role SHOULD be tagged with the role name.
|===
@@ -395,9 +413,10 @@ This is very useful when developing and debugging new tasks. It can also signifi
'''
+[[The-Ansible-roles-directory-MUST-maintain-a-flat-structure]]
[cols="2v,v"]
|===
-| **Rule**
+| <<The-Ansible-roles-directory-MUST-maintain-a-flat-structure, Rule>>
| The Ansible roles directory MUST maintain a flat structure.
|===
@@ -410,9 +429,10 @@ This is very useful when developing and debugging new tasks. It can also signifi
* Make it compatible with Ansible Galaxy
'''
+[[Ansible-Roles-SHOULD-be-named-like-technology_component_subcomponent]]
[cols="2v,v"]
|===
-| **Rule**
+| [[Ansible-Roles-SHOULD-be-named-like-technology_component_subcomponent, Rule]]
| Ansible Roles SHOULD be named like technology_component[_subcomponent].
|===
@@ -430,9 +450,10 @@ Many times the `technology` portion of the pattern will line up with a package n
* http://jinja.pocoo.org/docs/dev/templates/#builtin-filters[Jinja2 Builtin Filters]
'''
+[[The-default-filter-SHOULD-replace-empty-strings-lists-etc]]
[cols="2v,v"]
|===
-| **Rule**
+| <<The-default-filter-SHOULD-replace-empty-strings-lists-etc, Rule>>
| The `default` filter SHOULD replace empty strings, lists, etc.
|===
@@ -469,15 +490,17 @@ This is almost always more desirable than an empty list, string, etc.
=== Yum and DNF
'''
+[[Package-installation-MUST-use-ansible-action-module-to-abstract-away-dnf-yum]]
[cols="2v,v"]
|===
-| **Rule**
+| <<Package-installation-MUST-use-ansible-action-module-to-abstract-away-dnf-yum, Rule>>
| Package installation MUST use ansible action module to abstract away dnf/yum.
-| Package installation MUST use name= and state=present rather than pkg= and state=installed respectively.
|===
+
+[[Package-installation-MUST-use-name-and-state-present-rather-than-pkg-and-state-installed-respectively]]
[cols="2v,v"]
|===
-| **Rule**
+| <<Package-installation-MUST-use-name-and-state-present-rather-than-pkg-and-state-installed-respectively, Rule>>
| Package installation MUST use name= and state=present rather than pkg= and state=installed respectively.
|===
diff --git a/filter_plugins/openshift_master.py b/filter_plugins/openshift_master.py
index 8d7c62ad1..7ababc51a 100644
--- a/filter_plugins/openshift_master.py
+++ b/filter_plugins/openshift_master.py
@@ -463,6 +463,32 @@ class FilterModule(object):
IdentityProviderBase.validate_idp_list(idp_list)
return yaml.safe_dump([idp.to_dict() for idp in idp_list], default_flow_style=False)
+ @staticmethod
+ def validate_pcs_cluster(data, masters=None):
+ ''' Validates output from "pcs status", ensuring that each master
+ provided is online.
+ Ex: data = ('...',
+ 'PCSD Status:',
+ 'master1.example.com: Online',
+ 'master2.example.com: Online',
+ 'master3.example.com: Online',
+ '...')
+ masters = ['master1.example.com',
+ 'master2.example.com',
+ 'master3.example.com']
+ returns True
+ '''
+ if not issubclass(type(data), str):
+ raise errors.AnsibleFilterError("|failed expects data is a string")
+ if not issubclass(type(masters), list):
+ raise errors.AnsibleFilterError("|failed expects masters is a list")
+ valid = True
+ for master in masters:
+ if "{0}: Online".format(master) not in data:
+ valid = False
+ return valid
+
def filters(self):
''' returns a mapping of filters to methods '''
- return {"translate_idps": self.translate_idps}
+ return {"translate_idps": self.translate_idps,
+ "validate_pcs_cluster": self.validate_pcs_cluster}
diff --git a/openshift-ansible.spec b/openshift-ansible.spec
index 70938e8d2..c8f6a2673 100644
--- a/openshift-ansible.spec
+++ b/openshift-ansible.spec
@@ -5,7 +5,7 @@
}
Name: openshift-ansible
-Version: 3.0.28
+Version: 3.0.32
Release: 1%{?dist}
Summary: Openshift and Atomic Enterprise Ansible
License: ASL 2.0
@@ -259,6 +259,47 @@ Atomic OpenShift Utilities includes
%changelog
+* Thu Jan 14 2016 Brenton Leanhardt <bleanhar@redhat.com> 3.0.32-1
+- Uninstall remove containerized wrapper and symlinks (abutcher@redhat.com)
+
+* Thu Jan 14 2016 Brenton Leanhardt <bleanhar@redhat.com> 3.0.31-1
+- Check api prior to starting node. (abutcher@redhat.com)
+- added anchors (twiest@redhat.com)
+
+* Wed Jan 13 2016 Joel Diaz <jdiaz@redhat.com> 3.0.30-1
+- Add -A and detail --v3 flags
+
+* Wed Jan 13 2016 Brenton Leanhardt <bleanhar@redhat.com> 3.0.29-1
+- 3.1.1 upgrade playbook (bleanhar@redhat.com)
+- Updated help menu for v3 flag (kwoodson@redhat.com)
+- Add wait in between api and controllers start for native ha.
+ (abutcher@redhat.com)
+- atomic-openshift-installer: Error handling for unicode hostnames
+ (smunilla@redhat.com)
+- Update api verification. (abutcher@redhat.com)
+- Add a Verify API Server handler that waits for the API server to become
+ available (sdodson@redhat.com)
+- Add -A parameter to forward ssh agent (jdiaz@redhat.com)
+- Validate pacemaker cluster members. (abutcher@redhat.com)
+- Removed atomic host check (kwoodson@redhat.com)
+- Add is_containerized inputs to nosetests. (abutcher@redhat.com)
+- Add wait for API before starting controllers w/ native ha install.
+ (abutcher@redhat.com)
+- Fix for to_padded_yaml filter (jdetiber@redhat.com)
+- - sqashed to one commit (llange@redhat.com)
+- Switch to using hostnamectl as it works on atomic and rhel7
+ (sdodson@redhat.com)
+- Update rolling restart playbook for pacemaker support. Replace fail with a
+ warn and prompt if running ansible from a host that will be rebooted. Re-
+ organize playbooks. (abutcher@redhat.com)
+- Implement simple master rolling restarts. (dgoodwin@redhat.com)
+- re-enable containerize installs (sdodson@redhat.com)
+- Set portal net in master playbook (jdetiber@redhat.com)
+- Set the cli image to match osm_image in openshift_cli role
+ (sdodson@redhat.com)
+- atomic-openshift-installer: Populate new_nodes group (smunilla@redhat.com)
+- Always pull docker images (sdodson@redhat.com)
+
* Mon Jan 11 2016 Kenny Woodson <kwoodson@redhat.com> 3.0.28-1
- added the rhe7-host-monitoring service file (mwoodson@redhat.com)
- Fixing tab completion for latest metadata changes (kwoodson@redhat.com)
diff --git a/playbooks/adhoc/uninstall.yml b/playbooks/adhoc/uninstall.yml
index ac20f5f9b..36d686c8b 100644
--- a/playbooks/adhoc/uninstall.yml
+++ b/playbooks/adhoc/uninstall.yml
@@ -202,6 +202,10 @@
- /usr/lib/systemd/system/atomic-openshift-master-controllers.service
- /usr/lib/systemd/system/origin-master-api.service
- /usr/lib/systemd/system/origin-master-controllers.service
+ - /usr/local/bin/openshift
+ - /usr/local/bin/oadm
+ - /usr/local/bin/oc
+ - /usr/local/bin/kubectl
# Since we are potentially removing the systemd unit files for separated
# master-api and master-controllers services, so we need to reload the
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_1_minor/README.md b/playbooks/byo/openshift-cluster/upgrades/v3_1_minor/README.md
new file mode 100644
index 000000000..b230835c3
--- /dev/null
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_1_minor/README.md
@@ -0,0 +1,17 @@
+# v3.1 minor upgrade playbook
+This upgrade will preserve all locally made configuration modifications to the
+Masters and Nodes.
+
+## Overview
+This playbook is available as a technical preview. It currently performs the
+following steps.
+
+ * Upgrade and restart master services
+ * Upgrade and restart node services
+ * Applies the latest cluster policies
+ * Updates the default router if one exists
+ * Updates the default registry if one exists
+ * Updates image streams and quickstarts
+
+## Usage
+ansible-playbook -i ~/ansible-inventory openshift-ansible/playbooks/byo/openshift-cluster/upgrades/v3_1_minor/upgrade.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_1_minor/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_1_minor/upgrade.yml
new file mode 100644
index 000000000..20fa9b10f
--- /dev/null
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_1_minor/upgrade.yml
@@ -0,0 +1,14 @@
+---
+- include: ../../../../common/openshift-cluster/evaluate_groups.yml
+ vars:
+ g_etcd_hosts: "{{ groups.etcd | default([]) }}"
+ g_master_hosts: "{{ groups.masters | default([]) }}"
+ g_nfs_hosts: "{{ groups.nfs | default([]) }}"
+ g_node_hosts: "{{ groups.nodes | default([]) }}"
+ g_lb_hosts: "{{ groups.lb | default([]) }}"
+ openshift_cluster_id: "{{ cluster_id | default('default') }}"
+ openshift_deployment_type: "{{ deployment_type }}"
+- include: ../../../../common/openshift-cluster/upgrades/v3_1_minor/pre.yml
+- include: ../../../../common/openshift-cluster/upgrades/v3_1_minor/upgrade.yml
+- include: ../../../openshift-master/restart.yml
+- include: ../../../../common/openshift-cluster/upgrades/v3_1_minor/post.yml
diff --git a/playbooks/byo/openshift-master/filter_plugins b/playbooks/byo/openshift-master/filter_plugins
new file mode 120000
index 000000000..99a95e4ca
--- /dev/null
+++ b/playbooks/byo/openshift-master/filter_plugins
@@ -0,0 +1 @@
+../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/byo/openshift-master/lookup_plugins b/playbooks/byo/openshift-master/lookup_plugins
new file mode 120000
index 000000000..ac79701db
--- /dev/null
+++ b/playbooks/byo/openshift-master/lookup_plugins
@@ -0,0 +1 @@
+../../../lookup_plugins \ No newline at end of file
diff --git a/playbooks/byo/openshift-master/restart.yml b/playbooks/byo/openshift-master/restart.yml
new file mode 100644
index 000000000..a78a6aa3d
--- /dev/null
+++ b/playbooks/byo/openshift-master/restart.yml
@@ -0,0 +1,4 @@
+---
+- include: ../../common/openshift-master/restart.yml
+ vars_files:
+ - ../../byo/openshift-cluster/cluster_hosts.yml
diff --git a/playbooks/byo/openshift-master/roles b/playbooks/byo/openshift-master/roles
new file mode 120000
index 000000000..20c4c58cf
--- /dev/null
+++ b/playbooks/byo/openshift-master/roles
@@ -0,0 +1 @@
+../../../roles \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_minor/filter_plugins b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/filter_plugins
new file mode 120000
index 000000000..27ddaa18b
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/filter_plugins
@@ -0,0 +1 @@
+../../../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_minor/library b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/library
new file mode 120000
index 000000000..53bed9684
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/library
@@ -0,0 +1 @@
+../library \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_minor/lookup_plugins b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/lookup_plugins
new file mode 120000
index 000000000..cf407f69b
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/lookup_plugins
@@ -0,0 +1 @@
+../../../../../lookup_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_minor/post.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/post.yml
new file mode 100644
index 000000000..d8336fcae
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/post.yml
@@ -0,0 +1,50 @@
+---
+###############################################################################
+# Post upgrade - Upgrade default router, default registry and examples
+###############################################################################
+- name: Upgrade default router and default registry
+ hosts: oo_first_master
+ vars:
+ openshift_deployment_type: "{{ deployment_type }}"
+ registry_image: "{{ openshift.master.registry_url | replace( '${component}', 'docker-registry' ) | replace ( '${version}', 'v' + g_new_version ) }}"
+ router_image: "{{ openshift.master.registry_url | replace( '${component}', 'haproxy-router' ) | replace ( '${version}', 'v' + g_new_version ) }}"
+ oc_cmd: "{{ openshift.common.client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig"
+ roles:
+ # Create the new templates shipped in 3.1.z, existing templates are left
+ # unmodified. This prevents the subsequent role definition for
+ # openshift_examples from failing when trying to replace templates that do
+ # not already exist. We could have potentially done a replace --force to
+ # create and update in one step.
+ - openshift_examples
+ # Update the existing templates
+ - role: openshift_examples
+ openshift_examples_import_command: replace
+ pre_tasks:
+ - name: Check for default router
+ command: >
+ {{ oc_cmd }} get -n default dc/router
+ register: _default_router
+ failed_when: false
+ changed_when: false
+
+ - name: Check for default registry
+ command: >
+ {{ oc_cmd }} get -n default dc/docker-registry
+ register: _default_registry
+ failed_when: false
+ changed_when: false
+
+ - name: Update router image to current version
+ when: _default_router.rc == 0
+ command: >
+ {{ oc_cmd }} patch dc/router -p
+ '{"spec":{"template":{"spec":{"containers":[{"name":"router","image":"{{ router_image }}"}]}}}}'
+ --api-version=v1
+
+ - name: Update registry image to current version
+ when: _default_registry.rc == 0
+ command: >
+ {{ oc_cmd }} patch dc/docker-registry -p
+ '{"spec":{"template":{"spec":{"containers":[{"name":"registry","image":"{{ registry_image }}"}]}}}}'
+ --api-version=v1
+
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_minor/pre.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/pre.yml
new file mode 100644
index 000000000..91780de09
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/pre.yml
@@ -0,0 +1,87 @@
+---
+###############################################################################
+# Evaluate host groups and gather facts
+###############################################################################
+- name: Load openshift_facts
+ hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config:oo_lb_to_config
+ roles:
+ - openshift_facts
+
+###############################################################################
+# Pre-upgrade checks
+###############################################################################
+- name: Verify upgrade can proceed
+ hosts: oo_first_master
+ vars:
+ openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}"
+ target_version: "{{ '1.1.1' if deployment_type == 'origin' else '3.1.1' }}"
+ gather_facts: no
+ tasks:
+ - fail:
+ msg: >
+ This upgrade is only supported for origin, openshift-enterprise, and online
+ deployment types
+ when: deployment_type not in ['origin','openshift-enterprise', 'online']
+
+ - fail:
+ msg: >
+ openshift_pkg_version is {{ openshift_pkg_version }} which is not a
+ valid version for a {{ target_version }} upgrade
+ when: openshift_pkg_version is defined and openshift_pkg_version.split('-',1).1 | version_compare(target_version ,'<')
+
+- name: Verify upgrade can proceed
+ hosts: oo_masters_to_config:oo_nodes_to_config
+ vars:
+ target_version: "{{ '1.1.1' if deployment_type == 'origin' else '3.1.1' }}"
+ tasks:
+ - name: Clean package cache
+ command: "{{ ansible_pkg_mgr }} clean all"
+
+ - set_fact:
+ g_new_service_name: "{{ 'origin' if deployment_type =='origin' else 'atomic-openshift' }}"
+
+ - name: Determine available versions
+ script: ../files/versions.sh {{ g_new_service_name }} openshift
+ register: g_versions_result
+
+ - set_fact:
+ g_aos_versions: "{{ g_versions_result.stdout | from_yaml }}"
+
+ - set_fact:
+ g_new_version: "{{ g_aos_versions.curr_version.split('-', 1).0 if g_aos_versions.avail_version is none else g_aos_versions.avail_version.split('-', 1).0 }}"
+
+ - fail:
+ msg: This playbook requires Origin 1.1 or later
+ when: deployment_type == 'origin' and g_aos_versions.curr_version | version_compare('1.1','<')
+
+ - fail:
+ msg: This playbook requires Atomic Enterprise Platform/OpenShift Enterprise 3.1 or later
+ when: deployment_type == 'atomic-openshift' and g_aos_versions.curr_version | version_compare('3.1','<')
+
+ - fail:
+ msg: Upgrade packages not found
+ when: (g_aos_versions.avail_version | default(g_aos_versions.curr_version, true) | version_compare(target_version, '<'))
+
+ - set_fact:
+ pre_upgrade_complete: True
+
+
+##############################################################################
+# Gate on pre-upgrade checks
+##############################################################################
+- name: Gate on pre-upgrade checks
+ hosts: localhost
+ connection: local
+ become: no
+ vars:
+ pre_upgrade_hosts: "{{ groups.oo_masters_to_config | union(groups.oo_nodes_to_config) }}"
+ tasks:
+ - set_fact:
+ pre_upgrade_completed: "{{ hostvars
+ | oo_select_keys(pre_upgrade_hosts)
+ | oo_collect('inventory_hostname', {'pre_upgrade_complete': true}) }}"
+ - set_fact:
+ pre_upgrade_failed: "{{ pre_upgrade_hosts | difference(pre_upgrade_completed) }}"
+ - fail:
+ msg: "Upgrade cannot continue. The following hosts did not complete pre-upgrade checks: {{ pre_upgrade_failed | join(',') }}"
+ when: pre_upgrade_failed | length > 0
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_minor/roles b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/roles
new file mode 120000
index 000000000..6bc1a7aef
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/roles
@@ -0,0 +1 @@
+../../../../../roles \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_minor/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/upgrade.yml
new file mode 100644
index 000000000..81dbba1e3
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/upgrade.yml
@@ -0,0 +1,137 @@
+---
+###############################################################################
+# The restart playbook should be run after this playbook completes.
+###############################################################################
+
+###############################################################################
+# Upgrade Masters
+###############################################################################
+- name: Upgrade master packages and configuration
+ hosts: oo_masters_to_config
+ vars:
+ openshift_version: "{{ openshift_pkg_version | default('') }}"
+ tasks:
+ - name: Upgrade master packages
+ command: "{{ ansible_pkg_mgr}} update -y {{ openshift.common.service_type }}-master{{ openshift_version }}"
+
+ - name: Ensure python-yaml present for config upgrade
+ action: "{{ ansible_pkg_mgr }} name=PyYAML state=present"
+ when: not openshift.common.is_atomic | bool
+
+# Currently 3.1.1 does not have any new configuration settings
+#
+# - name: Upgrade master configuration
+# openshift_upgrade_config:
+# from_version: '3.0'
+# to_version: '3.1'
+# role: master
+# config_base: "{{ hostvars[inventory_hostname].openshift.common.config_base }}"
+
+- name: Set master update status to complete
+ hosts: oo_masters_to_config
+ tasks:
+ - set_fact:
+ master_update_complete: True
+
+##############################################################################
+# Gate on master update complete
+##############################################################################
+- name: Gate on master update
+ hosts: localhost
+ connection: local
+ become: no
+ tasks:
+ - set_fact:
+ master_update_completed: "{{ hostvars
+ | oo_select_keys(groups.oo_masters_to_config)
+ | oo_collect('inventory_hostname', {'master_update_complete': true}) }}"
+ - set_fact:
+ master_update_failed: "{{ groups.oo_masters_to_config | difference(master_update_completed) }}"
+ - fail:
+ msg: "Upgrade cannot continue. The following masters did not finish updating: {{ master_update_failed | join(',') }}"
+ when: master_update_failed | length > 0
+
+###############################################################################
+# Upgrade Nodes
+###############################################################################
+- name: Upgrade nodes
+ hosts: oo_nodes_to_config
+ vars:
+ openshift_version: "{{ openshift_pkg_version | default('') }}"
+ roles:
+ - openshift_facts
+ tasks:
+ - name: Upgrade node packages
+ command: "{{ ansible_pkg_mgr }} update -y {{ openshift.common.service_type }}-node{{ openshift_version }}"
+
+ - name: Restart node service
+ service: name="{{ openshift.common.service_type }}-node" state=restarted
+
+ - set_fact:
+ node_update_complete: True
+
+##############################################################################
+# Gate on nodes update
+##############################################################################
+- name: Gate on nodes update
+ hosts: localhost
+ connection: local
+ become: no
+ tasks:
+ - set_fact:
+ node_update_completed: "{{ hostvars
+ | oo_select_keys(groups.oo_nodes_to_config)
+ | oo_collect('inventory_hostname', {'node_update_complete': true}) }}"
+ - set_fact:
+ node_update_failed: "{{ groups.oo_nodes_to_config | difference(node_update_completed) }}"
+ - fail:
+ msg: "Upgrade cannot continue. The following nodes did not finish updating: {{ node_update_failed | join(',') }}"
+ when: node_update_failed | length > 0
+
+###############################################################################
+# Reconcile Cluster Roles and Cluster Role Bindings
+###############################################################################
+- name: Reconcile Cluster Roles and Cluster Role Bindings
+ hosts: oo_masters_to_config
+ vars:
+ origin_reconcile_bindings: "{{ deployment_type == 'origin' and g_new_version | version_compare('1.0.6', '>') }}"
+ ent_reconcile_bindings: true
+ openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}"
+ tasks:
+ - name: Reconcile Cluster Roles
+ command: >
+ {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+ policy reconcile-cluster-roles --confirm
+ run_once: true
+
+ - name: Reconcile Cluster Role Bindings
+ command: >
+ {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+ policy reconcile-cluster-role-bindings
+ --exclude-groups=system:authenticated
+ --exclude-groups=system:unauthenticated
+ --exclude-users=system:anonymous
+ --additive-only=true --confirm
+ when: origin_reconcile_bindings | bool or ent_reconcile_bindings | bool
+ run_once: true
+
+ - set_fact:
+ reconcile_complete: True
+
+##############################################################################
+# Gate on reconcile
+##############################################################################
+- name: Gate on reconcile
+ hosts: localhost
+ connection: local
+ become: no
+ tasks:
+ - set_fact:
+ reconcile_completed: "{{ hostvars
+ | oo_select_keys(groups.oo_masters_to_config)
+ | oo_collect('inventory_hostname', {'reconcile_complete': true}) }}"
+ - set_fact:
+ reconcile_failed: "{{ groups.oo_masters_to_config | difference(reconcile_completed) }}"
+ - fail:
+ msg: "Upgrade cannot continue. The following masters did not finish reconciling: {{ reconcile_failed | join(',') }}"
+ when: reconcile_failed | length > 0
diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml
index 4ecdf2a0c..0df03f194 100644
--- a/playbooks/common/openshift-master/config.yml
+++ b/playbooks/common/openshift-master/config.yml
@@ -313,6 +313,7 @@
- name: Configure master instances
hosts: oo_masters_to_config
+ any_errors_fatal: true
serial: 1
vars:
sync_tmpdir: "{{ hostvars.localhost.g_master_mktemp.stdout }}"
diff --git a/playbooks/common/openshift-master/restart.yml b/playbooks/common/openshift-master/restart.yml
new file mode 100644
index 000000000..fa13a64cb
--- /dev/null
+++ b/playbooks/common/openshift-master/restart.yml
@@ -0,0 +1,141 @@
+---
+- include: ../openshift-cluster/evaluate_groups.yml
+
+- name: Validate configuration for rolling restart
+ hosts: oo_masters_to_config
+ roles:
+ - openshift_facts
+ tasks:
+ - fail:
+ msg: "openshift_rolling_restart_mode must be set to either 'services' or 'system'"
+ when: openshift_rolling_restart_mode is defined and openshift_rolling_restart_mode not in ["services", "system"]
+ - openshift_facts:
+ role: "{{ item.role }}"
+ local_facts: "{{ item.local_facts }}"
+ with_items:
+ - role: common
+ local_facts:
+ rolling_restart_mode: "{{ openshift_rolling_restart_mode | default('services') }}"
+ - role: master
+ local_facts:
+ cluster_method: "{{ openshift_master_cluster_method | default(None) }}"
+
+# Creating a temp file on localhost, we then check each system that will
+# be rebooted to see if that file exists, if so we know we're running
+# ansible on a machine that needs a reboot, and we need to error out.
+- name: Create temp file on localhost
+ hosts: localhost
+ connection: local
+ become: no
+ gather_facts: no
+ tasks:
+ - local_action: command mktemp
+ register: mktemp
+ changed_when: false
+
+- name: Check if temp file exists on any masters
+ hosts: oo_masters_to_config
+ tasks:
+ - stat: path="{{ hostvars.localhost.mktemp.stdout }}"
+ register: exists
+ changed_when: false
+
+- name: Cleanup temp file on localhost
+ hosts: localhost
+ connection: local
+ become: no
+ gather_facts: no
+ tasks:
+ - file: path="{{ hostvars.localhost.mktemp.stdout }}" state=absent
+ changed_when: false
+
+- name: Warn if restarting the system where ansible is running
+ hosts: oo_masters_to_config
+ tasks:
+ - pause:
+ prompt: >
+ Warning: Running playbook from a host that will be restarted!
+ Press CTRL+C and A to abort playbook execution. You may
+ continue by pressing ENTER but the playbook will stop
+ executing once this system restarts and services must be
+ manually verified.
+ when: exists.stat.exists and openshift.common.rolling_restart_mode == 'system'
+ - set_fact:
+ current_host: "{{ exists.stat.exists }}"
+ when: openshift.common.rolling_restart_mode == 'system'
+
+- name: Determine which masters are currently active
+ hosts: oo_masters_to_config
+ tasks:
+ - name: Check master service status
+ command: >
+ systemctl is-active {{ openshift.common.service_type }}-master
+ register: active_check_output
+ when: openshift.master.cluster_method == 'pacemaker'
+ failed_when: active_check_output.stdout not in ['active', 'inactive']
+ changed_when: false
+ - set_fact:
+ is_active: "{{ active_check_output.stdout == 'active' }}"
+ when: openshift.master.cluster_method == 'pacemaker'
+
+- name: Evaluate master groups
+ hosts: localhost
+ become: no
+ tasks:
+ - name: Evaluate oo_active_masters
+ add_host:
+ name: "{{ item }}"
+ groups: oo_active_masters
+ ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
+ ansible_sudo: "{{ g_sudo | default(omit) }}"
+ with_items: "{{ groups.oo_masters_to_config | default([]) }}"
+ when: (hostvars[item]['is_active'] | default(false)) | bool
+ - name: Evaluate oo_current_masters
+ add_host:
+ name: "{{ item }}"
+ groups: oo_current_masters
+ ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
+ ansible_sudo: "{{ g_sudo | default(omit) }}"
+ with_items: "{{ groups.oo_masters_to_config | default([]) }}"
+ when: (hostvars[item]['current_host'] | default(false)) | bool
+
+- name: Validate pacemaker cluster
+ hosts: oo_active_masters
+ tasks:
+ - name: Retrieve pcs status
+ command: pcs status
+ register: pcs_status_output
+ changed_when: false
+ - fail:
+ msg: >
+ Pacemaker cluster validation failed. One or more nodes are not online.
+ when: not (pcs_status_output.stdout | validate_pcs_cluster(groups.oo_masters_to_config)) | bool
+
+- name: Restart masters
+ hosts: oo_masters_to_config:!oo_active_masters:!oo_current_masters
+ vars:
+ openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}"
+ serial: 1
+ tasks:
+ - include: restart_hosts.yml
+ when: openshift.common.rolling_restart_mode == 'system'
+ - include: restart_services.yml
+ when: openshift.common.rolling_restart_mode == 'services'
+
+- name: Restart active masters
+ hosts: oo_active_masters
+ serial: 1
+ tasks:
+ - include: restart_hosts_pacemaker.yml
+ when: openshift.common.rolling_restart_mode == 'system'
+ - include: restart_services_pacemaker.yml
+ when: openshift.common.rolling_restart_mode == 'services'
+
+- name: Restart current masters
+ hosts: oo_current_masters
+ serial: 1
+ tasks:
+ - include: restart_hosts.yml
+ when: openshift.common.rolling_restart_mode == 'system'
+ - include: restart_services.yml
+ when: openshift.common.rolling_restart_mode == 'services'
diff --git a/playbooks/common/openshift-master/restart_hosts.yml b/playbooks/common/openshift-master/restart_hosts.yml
new file mode 100644
index 000000000..ff206f5a2
--- /dev/null
+++ b/playbooks/common/openshift-master/restart_hosts.yml
@@ -0,0 +1,39 @@
+- name: Restart master system
+ # https://github.com/ansible/ansible/issues/10616
+ shell: sleep 2 && shutdown -r now "OpenShift Ansible master rolling restart"
+ async: 1
+ poll: 0
+ ignore_errors: true
+ become: yes
+# When cluster_method != pacemaker we can ensure the api_port is
+# available.
+- name: Wait for master API to come back online
+ become: no
+ local_action:
+ module: wait_for
+ host="{{ inventory_hostname }}"
+ state=started
+ delay=10
+ port="{{ openshift.master.api_port }}"
+ when: openshift.master.cluster_method != 'pacemaker'
+- name: Wait for master to start
+ become: no
+ local_action:
+ module: wait_for
+ host="{{ inventory_hostname }}"
+ state=started
+ delay=10
+ port=22
+ when: openshift.master.cluster_method == 'pacemaker'
+- name: Wait for master to become available
+ command: pcs status
+ register: pcs_status_output
+ until: pcs_status_output.stdout | validate_pcs_cluster([inventory_hostname]) | bool
+ retries: 15
+ delay: 2
+ changed_when: false
+ when: openshift.master.cluster_method == 'pacemaker'
+- fail:
+ msg: >
+ Pacemaker cluster validation failed {{ inventory hostname }} is not online.
+ when: openshift.master.cluster_method == 'pacemaker' and not (pcs_status_output.stdout | validate_pcs_cluster([inventory_hostname])) | bool
diff --git a/playbooks/common/openshift-master/restart_hosts_pacemaker.yml b/playbooks/common/openshift-master/restart_hosts_pacemaker.yml
new file mode 100644
index 000000000..c9219e8de
--- /dev/null
+++ b/playbooks/common/openshift-master/restart_hosts_pacemaker.yml
@@ -0,0 +1,25 @@
+- name: Fail over master resource
+ command: >
+ pcs resource move master {{ hostvars | oo_select_keys(groups['oo_masters_to_config']) | oo_collect('openshift.common.hostname', {'is_active': 'False'}) | list | first }}
+- name: Wait for master API to come back online
+ become: no
+ local_action:
+ module: wait_for
+ host="{{ openshift.master.cluster_hostname }}"
+ state=started
+ delay=10
+ port="{{ openshift.master.api_port }}"
+- name: Restart master system
+ # https://github.com/ansible/ansible/issues/10616
+ shell: sleep 2 && shutdown -r now "OpenShift Ansible master rolling restart"
+ async: 1
+ poll: 0
+ ignore_errors: true
+ become: yes
+- name: Wait for master to start
+ become: no
+ local_action:
+ module: wait_for
+ host="{{ inventory_hostname }}"
+ state=started
+ delay=10
diff --git a/playbooks/common/openshift-master/restart_services.yml b/playbooks/common/openshift-master/restart_services.yml
new file mode 100644
index 000000000..5e539cd65
--- /dev/null
+++ b/playbooks/common/openshift-master/restart_services.yml
@@ -0,0 +1,27 @@
+- name: Restart master
+ service:
+ name: "{{ openshift.common.service_type }}-master"
+ state: restarted
+ when: not openshift_master_ha | bool
+- name: Restart master API
+ service:
+ name: "{{ openshift.common.service_type }}-master-api"
+ state: restarted
+ when: openshift_master_ha | bool and openshift.master.cluster_method != 'pacemaker'
+- name: Wait for master API to come back online
+ become: no
+ local_action:
+ module: wait_for
+ host="{{ inventory_hostname }}"
+ state=started
+ delay=10
+ port="{{ openshift.master.api_port }}"
+ when: openshift_master_ha | bool and openshift.master.cluster_method != 'pacemaker'
+- name: Restart master controllers
+ service:
+ name: "{{ openshift.common.service_type }}-master-controllers"
+ state: restarted
+ # Ignore errrors since it is possible that type != simple for
+ # pre-3.1.1 installations.
+ ignore_errors: true
+ when: openshift_master_ha | bool and openshift.master.cluster_method != 'pacemaker'
diff --git a/playbooks/common/openshift-master/restart_services_pacemaker.yml b/playbooks/common/openshift-master/restart_services_pacemaker.yml
new file mode 100644
index 000000000..e738f3fb6
--- /dev/null
+++ b/playbooks/common/openshift-master/restart_services_pacemaker.yml
@@ -0,0 +1,10 @@
+- name: Restart master services
+ command: pcs resource restart master
+- name: Wait for master API to come back online
+ become: no
+ local_action:
+ module: wait_for
+ host="{{ openshift.master.cluster_hostname }}"
+ state=started
+ delay=10
+ port="{{ openshift.master.api_port }}"
diff --git a/playbooks/common/openshift-node/config.yml b/playbooks/common/openshift-node/config.yml
index 483a7768c..8d0c4945e 100644
--- a/playbooks/common/openshift-node/config.yml
+++ b/playbooks/common/openshift-node/config.yml
@@ -215,6 +215,19 @@
| oo_collect('openshift.common.hostname') }}"
openshift_node_vars: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']) }}"
pre_tasks:
-
+ # Necessary because when you're on a node that's also a master the master will be
+ # restarted after the node restarts docker and it will take up to 60 seconds for
+ # systemd to start the master again
+ - name: Wait for master API to become available before proceeding
+ # Using curl here since the uri module requires python-httplib2 and
+ # wait_for port doesn't provide health information.
+ command: >
+ curl -k --head --silent {{ openshift.master.api_url }}
+ register: api_available_output
+ until: api_available_output.stdout.find("200 OK") != -1
+ retries: 120
+ delay: 1
+ changed_when: false
+ when: openshift.common.is_containerized | bool
roles:
- openshift_manage_node
diff --git a/roles/openshift_facts/tasks/main.yml b/roles/openshift_facts/tasks/main.yml
index 87fa99a3b..e40a1b329 100644
--- a/roles/openshift_facts/tasks/main.yml
+++ b/roles/openshift_facts/tasks/main.yml
@@ -10,6 +10,7 @@
shell: ls /run/ostree-booted
ignore_errors: yes
failed_when: false
+ changed_when: false
register: ostree_output
# Locally setup containerized facts for now
diff --git a/roles/openshift_master/handlers/main.yml b/roles/openshift_master/handlers/main.yml
index e1b95eda4..6b9992eea 100644
--- a/roles/openshift_master/handlers/main.yml
+++ b/roles/openshift_master/handlers/main.yml
@@ -2,11 +2,24 @@
- name: restart master
service: name={{ openshift.common.service_type }}-master state=restarted
when: (not openshift_master_ha | bool) and (not (master_service_status_changed | default(false) | bool))
+ notify: Verify API Server
- name: restart master api
service: name={{ openshift.common.service_type }}-master-api state=restarted
when: (openshift_master_ha | bool) and (not (master_api_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native'
+ notify: Verify API Server
- name: restart master controllers
service: name={{ openshift.common.service_type }}-master-controllers state=restarted
when: (openshift_master_ha | bool) and (not (master_controllers_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native'
+
+- name: Verify API Server
+ # Using curl here since the uri module requires python-httplib2 and
+ # wait_for port doesn't provide health information.
+ command: >
+ curl -k --head --silent {{ openshift.master.api_url }}
+ register: api_available_output
+ until: api_available_output.stdout.find("200 OK") != -1
+ retries: 120
+ delay: 1
+ changed_when: false
diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml
index 3b46a0df4..150b76fc8 100644
--- a/roles/openshift_master/tasks/main.yml
+++ b/roles/openshift_master/tasks/main.yml
@@ -269,6 +269,7 @@
service: name={{ openshift.common.service_type }}-master enabled=yes state=started
when: not openshift_master_ha | bool
register: start_result
+ notify: Verify API Server
- name: Stop and disable non HA master when running HA
service: name={{ openshift.common.service_type }}-master enabled=no state=stopped
@@ -287,6 +288,20 @@
master_api_service_status_changed: "{{ start_result | changed }}"
when: openshift_master_ha | bool and openshift.master.cluster_method == 'native'
+# A separate wait is required here for native HA since notifies will
+# be resolved after all tasks in the role.
+- name: Wait for API to become available
+ # Using curl here since the uri module requires python-httplib2 and
+ # wait_for port doesn't provide health information.
+ command: >
+ curl -k --head --silent {{ openshift.master.api_url }}
+ register: api_available_output
+ until: api_available_output.stdout.find("200 OK") != -1
+ retries: 120
+ delay: 1
+ changed_when: false
+ when: openshift_master_ha | bool and openshift.master.cluster_method == 'native' and master_api_service_status_changed | bool
+
- name: Start and enable master controller
service: name={{ openshift.common.service_type }}-master-controllers enabled=yes state=started
when: openshift_master_ha | bool and openshift.master.cluster_method == 'native'
diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml
index 0828d8e2c..9035248f9 100644
--- a/roles/openshift_node/tasks/main.yml
+++ b/roles/openshift_node/tasks/main.yml
@@ -103,6 +103,21 @@
- name: Additional storage plugin configuration
include: storage_plugins/main.yml
+# Necessary because when you're on a node that's also a master the master will be
+# restarted after the node restarts docker and it will take up to 60 seconds for
+# systemd to start the master again
+- name: Wait for master API to become available before proceeding
+ # Using curl here since the uri module requires python-httplib2 and
+ # wait_for port doesn't provide health information.
+ command: >
+ curl -k --head --silent {{ openshift_node_master_api_url }}
+ register: api_available_output
+ until: api_available_output.stdout.find("200 OK") != -1
+ retries: 120
+ delay: 1
+ changed_when: false
+ when: openshift.common.is_containerized | bool
+
- name: Start and enable node
service: name={{ openshift.common.service_type }}-node enabled=yes state=started
register: start_result
diff --git a/roles/os_zabbix/tasks/main.yml b/roles/os_zabbix/tasks/main.yml
index a8b65dd56..7552086d4 100644
--- a/roles/os_zabbix/tasks/main.yml
+++ b/roles/os_zabbix/tasks/main.yml
@@ -1,8 +1,4 @@
---
-- fail:
- msg: "Zabbix config is not yet supported on atomic hosts"
- when: openshift.common.is_containerized | bool
-
- name: Main List all templates
zbx_template:
zbx_server: "{{ ozb_server }}"
diff --git a/utils/src/ooinstall/cli_installer.py b/utils/src/ooinstall/cli_installer.py
index 2b6c9deee..4e30929da 100644
--- a/utils/src/ooinstall/cli_installer.py
+++ b/utils/src/ooinstall/cli_installer.py
@@ -33,9 +33,7 @@ def is_valid_hostname(hostname):
def validate_prompt_hostname(hostname):
if '' == hostname or is_valid_hostname(hostname):
return hostname
- raise click.BadParameter('"{}" appears to be an invalid hostname. ' \
- 'Please double-check this value i' \
- 'and re-enter it.'.format(hostname))
+ raise click.BadParameter('Invalid hostname. Please double-check this value and re-enter it.')
def get_ansible_ssh_user():
click.clear()
diff --git a/utils/test/cli_installer_tests.py b/utils/test/cli_installer_tests.py
index ea380d565..72e8521d0 100644
--- a/utils/test/cli_installer_tests.py
+++ b/utils/test/cli_installer_tests.py
@@ -681,9 +681,9 @@ class AttendedCliTests(OOCliFixture):
run_playbook_mock.return_value = 0
cli_input = build_input(hosts=[
- ('10.0.0.1', True),
- ('10.0.0.2', False),
- ('10.0.0.3', False)],
+ ('10.0.0.1', True, False),
+ ('10.0.0.2', False, False),
+ ('10.0.0.3', False, False)],
ssh_user='root',
variant_num=1,
confirm_facts='y')
@@ -722,10 +722,10 @@ class AttendedCliTests(OOCliFixture):
run_playbook_mock.return_value = 0
cli_input = build_input(hosts=[
- ('10.0.0.1', True),
- ('10.0.0.2', False),
+ ('10.0.0.1', True, False),
+ ('10.0.0.2', False, False),
],
- add_nodes=[('10.0.0.3', False)],
+ add_nodes=[('10.0.0.3', False, False)],
ssh_user='root',
variant_num=1,
confirm_facts='y')
@@ -773,9 +773,9 @@ class AttendedCliTests(OOCliFixture):
mock_facts['10.0.0.2']['common']['version'] = "3.0.0"
cli_input = build_input(hosts=[
- ('10.0.0.1', True),
+ ('10.0.0.1', True, False),
],
- add_nodes=[('10.0.0.2', False)],
+ add_nodes=[('10.0.0.2', False, False)],
ssh_user='root',
variant_num=1,
schedulable_masters_ok=True,
@@ -796,10 +796,10 @@ class AttendedCliTests(OOCliFixture):
run_playbook_mock.return_value = 0
cli_input = build_input(hosts=[
- ('10.0.0.1', True),
- ('10.0.0.2', True),
- ('10.0.0.3', True),
- ('10.0.0.4', False)],
+ ('10.0.0.1', True, False),
+ ('10.0.0.2', True, False),
+ ('10.0.0.3', True, False),
+ ('10.0.0.4', False, False)],
ssh_user='root',
variant_num=1,
confirm_facts='y',
@@ -837,9 +837,9 @@ class AttendedCliTests(OOCliFixture):
run_playbook_mock.return_value = 0
cli_input = build_input(hosts=[
- ('10.0.0.1', True),
- ('10.0.0.2', True),
- ('10.0.0.3', True)],
+ ('10.0.0.1', True, False),
+ ('10.0.0.2', True, False),
+ ('10.0.0.3', True, False)],
ssh_user='root',
variant_num=1,
confirm_facts='y',
@@ -872,10 +872,10 @@ class AttendedCliTests(OOCliFixture):
run_playbook_mock.return_value = 0
cli_input = build_input(hosts=[
- ('10.0.0.1', True),
- ('10.0.0.2', True),
- ('10.0.0.3', False),
- ('10.0.0.4', True)],
+ ('10.0.0.1', True, False),
+ ('10.0.0.2', True, False),
+ ('10.0.0.3', False, False),
+ ('10.0.0.4', True, False)],
ssh_user='root',
variant_num=1,
confirm_facts='y',
@@ -893,7 +893,7 @@ class AttendedCliTests(OOCliFixture):
run_playbook_mock.return_value = 0
cli_input = build_input(hosts=[
- ('10.0.0.1', True)],
+ ('10.0.0.1', True, False)],
ssh_user='root',
variant_num=1,
confirm_facts='y')
@@ -921,7 +921,7 @@ class AttendedCliTests(OOCliFixture):
run_playbook_mock.return_value = 0
cli_input = build_input(hosts=[
- ('10.0.0.1', True)],
+ ('10.0.0.1', True, False)],
ssh_user='root',
variant_num=2,
confirm_facts='y')
diff --git a/utils/test/fixture.py b/utils/test/fixture.py
index 90bd9e1ef..be759578a 100644
--- a/utils/test/fixture.py
+++ b/utils/test/fixture.py
@@ -138,7 +138,7 @@ class OOCliFixture(OOInstallFixture):
self.assertEquals(exp_hosts_to_run_on_len, len(hosts_to_run_on))
-#pylint: disable=too-many-arguments,too-many-branches
+#pylint: disable=too-many-arguments,too-many-branches,too-many-statements
def build_input(ssh_user=None, hosts=None, variant_num=None,
add_nodes=None, confirm_facts=None, schedulable_masters_ok=None,
master_lb=None):
@@ -163,13 +163,19 @@ def build_input(ssh_user=None, hosts=None, variant_num=None,
num_masters = 0
if hosts:
i = 0
- for (host, is_master) in hosts:
+ for (host, is_master, is_containerized) in hosts:
inputs.append(host)
if is_master:
inputs.append('y')
num_masters += 1
else:
inputs.append('n')
+
+ if is_containerized:
+ inputs.append('container')
+ else:
+ inputs.append('rpm')
+
#inputs.append('rpm')
# We should not be prompted to add more hosts if we're currently at
# 2 masters, this is an invalid HA configuration, so this question
@@ -196,8 +202,12 @@ def build_input(ssh_user=None, hosts=None, variant_num=None,
inputs.append('y')
inputs.append('1') # Add more nodes
i = 0
- for (host, is_master) in add_nodes:
+ for (host, is_master, is_containerized) in add_nodes:
inputs.append(host)
+ if is_containerized:
+ inputs.append('container')
+ else:
+ inputs.append('rpm')
#inputs.append('rpm')
if i < len(add_nodes) - 1:
inputs.append('y') # Add more hosts