summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--README_openstack.md1
-rw-r--r--inventory/README.md9
-rw-r--r--inventory/byo/hosts.origin.example8
-rw-r--r--inventory/byo/hosts.ose.example8
-rw-r--r--inventory/hosts2
-rw-r--r--playbooks/adhoc/metrics_setup/README.md25
-rw-r--r--playbooks/adhoc/metrics_setup/files/metrics-deployer-setup.yaml37
-rw-r--r--playbooks/adhoc/metrics_setup/files/metrics.yaml116
-rw-r--r--playbooks/adhoc/metrics_setup/playbooks/install.yml45
-rw-r--r--playbooks/adhoc/metrics_setup/playbooks/master_config_facts.yml10
-rw-r--r--playbooks/adhoc/metrics_setup/playbooks/uninstall.yml16
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml10
-rw-r--r--playbooks/common/openshift-cluster/redeploy-certificates.yml8
l---------playbooks/common/openshift-cluster/upgrades/openvswitch-avoid-oom.conf1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml10
-rw-r--r--playbooks/openstack/openshift-cluster/files/heat_stack.yaml163
-rw-r--r--playbooks/openstack/openshift-cluster/files/heat_stack_server.yaml24
-rw-r--r--playbooks/openstack/openshift-cluster/files/user-data13
-rw-r--r--roles/flannel/tasks/main.yml4
-rwxr-xr-xroles/openshift_facts/library/openshift_facts.py52
-rw-r--r--roles/openshift_manage_node/tasks/main.yml10
-rw-r--r--roles/openshift_node/tasks/main.yml1
-rw-r--r--roles/openshift_node/templates/node.yaml.v1.j22
-rw-r--r--utils/setup.py5
-rw-r--r--utils/src/data/data_file1
-rw-r--r--utils/test/test_utils.py30
-rw-r--r--utils/workflows/enterprise_deploy/openshift.sh2
27 files changed, 274 insertions, 339 deletions
diff --git a/README_openstack.md b/README_openstack.md
index 1998a5878..d3d1f9052 100644
--- a/README_openstack.md
+++ b/README_openstack.md
@@ -25,6 +25,7 @@ On Fedora:
On RHEL / CentOS:
```
yum install -y ansible python-novaclient python-neutronclient python-heatclient
+ sudo pip install shade
```
Configuration
diff --git a/inventory/README.md b/inventory/README.md
new file mode 100644
index 000000000..b8edfcbb0
--- /dev/null
+++ b/inventory/README.md
@@ -0,0 +1,9 @@
+# OpenShift Ansible inventory config files
+
+You can install OpenShift on:
+
+* [Amazon Web Services](aws/hosts/)
+* [BYO](byo/) (Bring your own), use this inventory config file to install OpenShift on your bare metal servers
+* [GCE](gce/) (Google Compute Engine)
+* [libvirt](libviert/hosts/)
+* [OpenStack](openstack/hosts/)
diff --git a/inventory/byo/hosts.origin.example b/inventory/byo/hosts.origin.example
index 13f4c214c..e769537f9 100644
--- a/inventory/byo/hosts.origin.example
+++ b/inventory/byo/hosts.origin.example
@@ -472,7 +472,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# network blocks should be private and should not conflict with network blocks
# in your infrastructure that pods may require access to. Can not be changed
# after deployment.
-#osm_cluster_network_cidr=10.1.0.0/16
+#osm_cluster_network_cidr=10.128.0.0/14
#openshift_portal_net=172.30.0.0/16
@@ -492,9 +492,9 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# the CIDRs reserved for external IPs, nodes, pods, or services.
#openshift_master_ingress_ip_network_cidr=172.46.0.0/16
-# Configure number of bits to allocate to each host’s subnet e.g. 8
-# would mean a /24 network on the host.
-#osm_host_subnet_length=8
+# Configure number of bits to allocate to each host’s subnet e.g. 9
+# would mean a /23 network on the host.
+#osm_host_subnet_length=9
# Configure master API and console ports.
#openshift_master_api_port=8443
diff --git a/inventory/byo/hosts.ose.example b/inventory/byo/hosts.ose.example
index 2d54dfceb..be919c105 100644
--- a/inventory/byo/hosts.ose.example
+++ b/inventory/byo/hosts.ose.example
@@ -472,7 +472,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# network blocks should be private and should not conflict with network blocks
# in your infrastructure that pods may require access to. Can not be changed
# after deployment.
-#osm_cluster_network_cidr=10.1.0.0/16
+#osm_cluster_network_cidr=10.128.0.0/14
#openshift_portal_net=172.30.0.0/16
@@ -492,9 +492,9 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# the CIDRs reserved for external IPs, nodes, pods, or services.
#openshift_master_ingress_ip_network_cidr=172.46.0.0/16
-# Configure number of bits to allocate to each host’s subnet e.g. 8
-# would mean a /24 network on the host.
-#osm_host_subnet_length=8
+# Configure number of bits to allocate to each host’s subnet e.g. 9
+# would mean a /23 network on the host.
+#osm_host_subnet_length=9
# Configure master API and console ports.
#openshift_master_api_port=8443
diff --git a/inventory/hosts b/inventory/hosts
deleted file mode 100644
index 72b7ae646..000000000
--- a/inventory/hosts
+++ /dev/null
@@ -1,2 +0,0 @@
-# Eventually we'll add the GCE, AWS, etc dynamic inventories, but for now...
-localhost
diff --git a/playbooks/adhoc/metrics_setup/README.md b/playbooks/adhoc/metrics_setup/README.md
deleted file mode 100644
index 71aa1e109..000000000
--- a/playbooks/adhoc/metrics_setup/README.md
+++ /dev/null
@@ -1,25 +0,0 @@
-## Playbook for adding [Metrics](https://github.com/openshift/origin-metrics) to Openshift
-
-See OSE Ansible [readme](https://github.com/openshift/openshift-ansible/blob/master/README_OSE.md) for general install instructions. Playbook has been tested on OSE 3.1/RHEL7.2 cluster
-
-
-Add the following vars to `[OSEv3:vars]` section of your inventory file
-```
-[OSEv3:vars]
-# Enable cluster metrics
-use_cluster_metrics=true
-metrics_external_service=< external service name for metrics >
-metrics_image_prefix=rcm-img-docker01.build.eng.bos.redhat.com:5001/openshift3/
-metrics_image_version=3.1.0
-```
-
-Run playbook
-```
-ansible-playbook -i $INVENTORY_FILE playbooks/install.yml
-```
-
-## Contact
-Email: hawkular-dev@lists.jboss.org
-
-## Credits
-Playbook adapted from install shell scripts by Matt Mahoney
diff --git a/playbooks/adhoc/metrics_setup/files/metrics-deployer-setup.yaml b/playbooks/adhoc/metrics_setup/files/metrics-deployer-setup.yaml
deleted file mode 100644
index f70e0b18b..000000000
--- a/playbooks/adhoc/metrics_setup/files/metrics-deployer-setup.yaml
+++ /dev/null
@@ -1,37 +0,0 @@
-#!/bin/bash
-#
-# Copyright 2014-2015 Red Hat, Inc. and/or its affiliates
-# and other contributors as indicated by the @author tags.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-apiVersion: "v1"
-kind: "List"
-metadata:
- name: metrics-deployer-setup
- annotations:
- description: "Required dependencies for the metrics deployer pod."
- tags: "infrastructure"
-labels:
- metrics-infra: deployer
- provider: openshift
- component: deployer
-items:
--
- apiVersion: v1
- kind: ServiceAccount
- metadata:
- name: metrics-deployer
- secrets:
- - name: metrics-deployer
diff --git a/playbooks/adhoc/metrics_setup/files/metrics.yaml b/playbooks/adhoc/metrics_setup/files/metrics.yaml
deleted file mode 100644
index d823b2587..000000000
--- a/playbooks/adhoc/metrics_setup/files/metrics.yaml
+++ /dev/null
@@ -1,116 +0,0 @@
-#!/bin/bash
-#
-# Copyright 2014-2015 Red Hat, Inc. and/or its affiliates
-# and other contributors as indicated by the @author tags.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-apiVersion: "v1"
-kind: "Template"
-metadata:
- name: metrics-deployer-template
- annotations:
- description: "Template for deploying the required Metrics integration. Requires cluster-admin 'metrics-deployer' service account and 'metrics-deployer' secret."
- tags: "infrastructure"
-labels:
- metrics-infra: deployer
- provider: openshift
- component: deployer
-objects:
--
- apiVersion: v1
- kind: Pod
- metadata:
- generateName: metrics-deployer-
- spec:
- containers:
- - image: ${IMAGE_PREFIX}metrics-deployer:${IMAGE_VERSION}
- name: deployer
- volumeMounts:
- - name: secret
- mountPath: /secret
- readOnly: true
- - name: empty
- mountPath: /etc/deploy
- env:
- - name: PROJECT
- valueFrom:
- fieldRef:
- fieldPath: metadata.namespace
- - name: IMAGE_PREFIX
- value: ${IMAGE_PREFIX}
- - name: IMAGE_VERSION
- value: ${IMAGE_VERSION}
- - name: PUBLIC_MASTER_URL
- value: ${PUBLIC_MASTER_URL}
- - name: MASTER_URL
- value: ${MASTER_URL}
- - name: REDEPLOY
- value: ${REDEPLOY}
- - name: USE_PERSISTENT_STORAGE
- value: ${USE_PERSISTENT_STORAGE}
- - name: HAWKULAR_METRICS_HOSTNAME
- value: ${HAWKULAR_METRICS_HOSTNAME}
- - name: CASSANDRA_NODES
- value: ${CASSANDRA_NODES}
- - name: CASSANDRA_PV_SIZE
- value: ${CASSANDRA_PV_SIZE}
- - name: METRIC_DURATION
- value: ${METRIC_DURATION}
- dnsPolicy: ClusterFirst
- restartPolicy: Never
- serviceAccount: metrics-deployer
- volumes:
- - name: empty
- emptyDir: {}
- - name: secret
- secret:
- secretName: metrics-deployer
-parameters:
--
- description: 'Specify prefix for metrics components; e.g. for "openshift/origin-metrics-deployer:v1.1", set prefix "openshift/origin-"'
- name: IMAGE_PREFIX
- value: "hawkular/"
--
- description: 'Specify version for metrics components; e.g. for "openshift/origin-metrics-deployer:v1.1", set version "v1.1"'
- name: IMAGE_VERSION
- value: "0.7.0-SNAPSHOT"
--
- description: "Internal URL for the master, for authentication retrieval"
- name: MASTER_URL
- value: "https://kubernetes.default.svc:443"
--
- description: "External hostname where clients will reach Hawkular Metrics"
- name: HAWKULAR_METRICS_HOSTNAME
- required: true
--
- description: "If set to true the deployer will try and delete all the existing components before trying to redeploy."
- name: REDEPLOY
- value: "false"
--
- description: "Set to true for persistent storage, set to false to use non persistent storage"
- name: USE_PERSISTENT_STORAGE
- value: "true"
--
- description: "The number of Cassandra Nodes to deploy for the initial cluster"
- name: CASSANDRA_NODES
- value: "1"
--
- description: "The persistent volume size for each of the Cassandra nodes"
- name: CASSANDRA_PV_SIZE
- value: "1Gi"
--
- description: "How many days metrics should be stored for."
- name: METRIC_DURATION
- value: "7"
diff --git a/playbooks/adhoc/metrics_setup/playbooks/install.yml b/playbooks/adhoc/metrics_setup/playbooks/install.yml
deleted file mode 100644
index a9ec3c1ef..000000000
--- a/playbooks/adhoc/metrics_setup/playbooks/install.yml
+++ /dev/null
@@ -1,45 +0,0 @@
----
-- include: master_config_facts.yml
-- name: "Install metrics"
- hosts: masters
- vars:
- metrics_public_url: "https://{{ metrics_external_service }}/hawkular/metrics"
- tasks:
- - name: "Add metrics url to master config"
- lineinfile: "state=present dest=/etc/origin/master/master-config.yaml regexp='^\ \ metricsPublicURL' insertbefore='^\ \ publicURL' line='\ \ metricsPublicURL: {{ metrics_public_url }}'"
-
- - name: "Restart master service"
- service: name=atomic-openshift-master state=restarted
-
- - name: "Copy metrics-deployer yaml to remote"
- copy: "src=../files/metrics-deployer-setup.yaml dest=/tmp/metrics-deployer-setup.yaml force=yes"
-
- - name: "Add metrics-deployer"
- command: "{{item}}"
- run_once: true
- register: output
- failed_when: ('already exists' not in output.stderr) and (output.rc != 0)
- with_items:
- - oc project openshift-infra
- - oc create -f /tmp/metrics-deployer-setup.yaml
-
- - name: "Give metrics-deployer SA permissions"
- command: "oadm policy add-role-to-user edit system:serviceaccount:openshift-infra:metrics-deployer"
- run_once: true
-
- - name: "Give heapster SA permissions"
- command: "oadm policy add-cluster-role-to-user cluster-reader system:serviceaccount:openshift-infra:heapster"
- run_once: true
-
- - name: "Create metrics-deployer secret"
- command: "oc secrets new metrics-deployer nothing=/dev/null"
- register: output
- failed_when: ('already exists' not in output.stderr) and (output.rc != 0)
- run_once: true
-
- - name: "Copy metrics.yaml to remote"
- copy: "src=../files/metrics.yaml dest=/tmp/metrics.yaml force=yes"
-
- - name: "Process yml template"
- shell: "oc process -f /tmp/metrics.yaml -v MASTER_URL={{ masterPublicURL }},REDEPLOY=true,HAWKULAR_METRICS_HOSTNAME={{ metrics_external_service }},IMAGE_PREFIX={{ metrics_image_prefix }},IMAGE_VERSION={{ metrics_image_version }},USE_PERSISTENT_STORAGE=false | oc create -f -"
- run_once: true \ No newline at end of file
diff --git a/playbooks/adhoc/metrics_setup/playbooks/master_config_facts.yml b/playbooks/adhoc/metrics_setup/playbooks/master_config_facts.yml
deleted file mode 100644
index 65de11bc4..000000000
--- a/playbooks/adhoc/metrics_setup/playbooks/master_config_facts.yml
+++ /dev/null
@@ -1,10 +0,0 @@
----
-- name: "Load master config"
- hosts: masters
- vars:
- master_config_file: "/tmp/ansible-metrics-{{ ansible_hostname }}"
- tasks:
- - name: "Fetch master config from remote"
- fetch: "src=/etc/origin/master/master-config.yaml dest={{ master_config_file }} flat=yes"
- - name: "Load config"
- include_vars: "{{ master_config_file }}"
diff --git a/playbooks/adhoc/metrics_setup/playbooks/uninstall.yml b/playbooks/adhoc/metrics_setup/playbooks/uninstall.yml
deleted file mode 100644
index 06c4586ee..000000000
--- a/playbooks/adhoc/metrics_setup/playbooks/uninstall.yml
+++ /dev/null
@@ -1,16 +0,0 @@
----
-- name: "Uninstall metrics"
- hosts: masters
- tasks:
- - name: "Remove metrics url from master config"
- lineinfile: "state=absent dest=/etc/origin/master/master-config.yaml regexp='^\ \ metricsPublicURL'"
-
- - name: "Delete metrics objects"
- command: "{{item}}"
- with_items:
- - oc delete all --selector=metrics-infra
- # - oc delete secrets --selector=metrics-infra
- # - oc delete sa --selector=metrics-infra
- - oc delete templates --selector=metrics-infra
- - oc delete sa metrics-deployer
- - oc delete secret metrics-deployer
diff --git a/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml
index 834461e14..381e3ed8f 100644
--- a/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml
@@ -1,6 +1,6 @@
- name: Check for appropriate Docker versions
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
+ hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config
roles:
- openshift_facts
tasks:
@@ -19,19 +19,19 @@
# don't want to carry on, potentially taking out every node. The playbook can safely be re-run
# and will not take any action on a node already running the requested docker version.
- name: Evacuate and upgrade nodes
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
+ hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config
serial: 1
any_errors_fatal: true
tasks:
- name: Prepare for Node evacuation
command: >
- {{ openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename }} --schedulable=false
+ {{ openshift.common.client_binary }} adm manage-node {{ openshift.common.hostname | lower }} --schedulable=false
delegate_to: "{{ groups.oo_first_master.0 }}"
when: l_docker_upgrade is defined and l_docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_upgrade
- name: Evacuate Node for Kubelet upgrade
command: >
- {{ openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename }} --evacuate --force
+ {{ openshift.common.client_binary }} adm manage-node {{ openshift.common.hostname | lower }} --evacuate --force
delegate_to: "{{ groups.oo_first_master.0 }}"
when: l_docker_upgrade is defined and l_docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_upgrade
@@ -40,7 +40,7 @@
- name: Set node schedulability
command: >
- {{ openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename }} --schedulable=true
+ {{ openshift.common.client_binary }} adm manage-node {{ openshift.common.hostname | lower }} --schedulable=true
delegate_to: "{{ groups.oo_first_master.0 }}"
when: openshift.node.schedulable | bool
when: l_docker_upgrade is defined and l_docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_upgrade and openshift.node.schedulable | bool
diff --git a/playbooks/common/openshift-cluster/redeploy-certificates.yml b/playbooks/common/openshift-cluster/redeploy-certificates.yml
index 5f008a045..74147fe01 100644
--- a/playbooks/common/openshift-cluster/redeploy-certificates.yml
+++ b/playbooks/common/openshift-cluster/redeploy-certificates.yml
@@ -212,7 +212,7 @@
- name: Determine if node is currently scheduleable
command: >
{{ openshift.common.client_binary }} --config={{ hostvars[groups.oo_first_master.0].mktemp.stdout }}/admin.kubeconfig
- get node {{ openshift.node.nodename }} -o json
+ get node {{ openshift.common.hostname | lower }} -o json
register: node_output
when: openshift_certificates_redeploy_ca | default(false) | bool
delegate_to: "{{ groups.oo_first_master.0 }}"
@@ -225,7 +225,7 @@
- name: Prepare for node evacuation
command: >
{{ openshift.common.client_binary }} adm --config={{ hostvars[groups.oo_first_master.0].mktemp.stdout }}/admin.kubeconfig
- manage-node {{ openshift.node.nodename }}
+ manage-node {{ openshift.common.hostname | lower }}
--schedulable=false
delegate_to: "{{ groups.oo_first_master.0 }}"
when: openshift_certificates_redeploy_ca | default(false) | bool and was_schedulable | bool
@@ -233,7 +233,7 @@
- name: Evacuate node
command: >
{{ openshift.common.client_binary }} adm --config={{ hostvars[groups.oo_first_master.0].mktemp.stdout }}/admin.kubeconfig
- manage-node {{ openshift.node.nodename }}
+ manage-node {{ openshift.common.hostname | lower }}
--evacuate --force
delegate_to: "{{ groups.oo_first_master.0 }}"
when: openshift_certificates_redeploy_ca | default(false) | bool and was_schedulable | bool
@@ -241,7 +241,7 @@
- name: Set node schedulability
command: >
{{ openshift.common.client_binary }} adm --config={{ hostvars[groups.oo_first_master.0].mktemp.stdout }}/admin.kubeconfig
- manage-node {{ openshift.node.nodename }} --schedulable=true
+ manage-node {{ openshift.common.hostname | lower }} --schedulable=true
delegate_to: "{{ groups.oo_first_master.0 }}"
when: openshift_certificates_redeploy_ca | default(false) | bool and was_schedulable | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/openvswitch-avoid-oom.conf b/playbooks/common/openshift-cluster/upgrades/openvswitch-avoid-oom.conf
new file mode 120000
index 000000000..514526fe2
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/openvswitch-avoid-oom.conf
@@ -0,0 +1 @@
+../../../../roles/openshift_node/templates/openvswitch-avoid-oom.conf \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
index 1f314c854..e66344f99 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
@@ -17,7 +17,7 @@
# we merge upgrade functionality into the base roles and a normal config.yml playbook run.
- name: Determine if node is currently scheduleable
command: >
- {{ openshift.common.client_binary }} get node {{ openshift.node.nodename | lower }} -o json
+ {{ openshift.common.client_binary }} get node {{ openshift.common.hostname | lower }} -o json
register: node_output
delegate_to: "{{ groups.oo_first_master.0 }}"
changed_when: false
@@ -29,7 +29,7 @@
- name: Mark unschedulable if host is a node
command: >
- {{ openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename | lower }} --schedulable=false
+ {{ openshift.common.client_binary }} adm manage-node {{ openshift.common.hostname | lower }} --schedulable=false
delegate_to: "{{ groups.oo_first_master.0 }}"
when: inventory_hostname in groups.oo_nodes_to_upgrade
# NOTE: There is a transient "object has been modified" error here, allow a couple
@@ -41,7 +41,7 @@
- name: Evacuate Node for Kubelet upgrade
command: >
- {{ openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename | lower }} --evacuate --force
+ {{ openshift.common.client_binary }} adm manage-node {{ openshift.common.hostname | lower }} --evacuate --force
delegate_to: "{{ groups.oo_first_master.0 }}"
when: inventory_hostname in groups.oo_nodes_to_upgrade
tasks:
@@ -64,12 +64,10 @@
- name: Set node schedulability
command: >
- {{ openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename | lower }} --schedulable=true
+ {{ openshift.common.client_binary }} adm manage-node {{ openshift.common.hostname | lower }} --schedulable=true
delegate_to: "{{ groups.oo_first_master.0 }}"
when: inventory_hostname in groups.oo_nodes_to_upgrade and was_schedulable | bool
register: node_sched
until: node_sched.rc == 0
retries: 3
delay: 1
-
-
diff --git a/playbooks/openstack/openshift-cluster/files/heat_stack.yaml b/playbooks/openstack/openshift-cluster/files/heat_stack.yaml
index 20ce47c07..458cf5ac7 100644
--- a/playbooks/openstack/openshift-cluster/files/heat_stack.yaml
+++ b/playbooks/openstack/openshift-cluster/files/heat_stack.yaml
@@ -45,7 +45,7 @@ parameters:
node_port_incoming:
type: string
label: Source of node port connections
- description: Authorized sources targeting node ports
+ description: Authorized sources targetting node ports
default: 0.0.0.0/0
num_etcd:
@@ -88,6 +88,11 @@ parameters:
label: Infra image
description: Name of the image for the infra node servers
+ dns_image:
+ type: string
+ label: DNS image
+ description: Name of the image for the DNS server
+
etcd_flavor:
type: string
label: Etcd flavor
@@ -108,6 +113,11 @@ parameters:
label: Infra flavor
description: Flavor of the infra node servers
+ dns_flavor:
+ type: string
+ label: DNS flavor
+ description: Flavor of the DNS server
+
outputs:
etcd_names:
@@ -158,6 +168,26 @@ outputs:
description: Floating IPs of the nodes
value: { get_attr: [ infra_nodes, floating_ip ] }
+ dns_name:
+ description: Name of the DNS
+ value:
+ get_attr:
+ - dns
+ - name
+
+ dns_floating_ip:
+ description: Floating IP of the DNS
+ value:
+ get_attr:
+ - dns
+ - addresses
+ - str_replace:
+ template: openshift-ansible-cluster_id-net
+ params:
+ cluster_id: { get_param: cluster_id }
+ - 1
+ - addr
+
resources:
net:
@@ -183,7 +213,22 @@ resources:
template: subnet_24_prefix.0/24
params:
subnet_24_prefix: { get_param: subnet_24_prefix }
- dns_nameservers: { get_param: dns_nameservers }
+ allocation_pools:
+ - start:
+ str_replace:
+ template: subnet_24_prefix.3
+ params:
+ subnet_24_prefix: { get_param: subnet_24_prefix }
+ end:
+ str_replace:
+ template: subnet_24_prefix.254
+ params:
+ subnet_24_prefix: { get_param: subnet_24_prefix }
+ dns_nameservers:
+ - str_replace:
+ template: subnet_24_prefix.2
+ params:
+ subnet_24_prefix: { get_param: subnet_24_prefix }
router:
type: OS::Neutron::Router
@@ -383,6 +428,44 @@ resources:
port_range_min: 443
port_range_max: 443
+ dns-secgrp:
+ type: OS::Neutron::SecurityGroup
+ properties:
+ name:
+ str_replace:
+ template: openshift-ansible-cluster_id-dns-secgrp
+ params:
+ cluster_id: { get_param: cluster_id }
+ description:
+ str_replace:
+ template: Security group for cluster_id cluster DNS
+ params:
+ cluster_id: { get_param: cluster_id }
+ rules:
+ - direction: ingress
+ protocol: tcp
+ port_range_min: 22
+ port_range_max: 22
+ remote_ip_prefix: { get_param: ssh_incoming }
+ - direction: ingress
+ protocol: udp
+ port_range_min: 53
+ port_range_max: 53
+ remote_mode: remote_group_id
+ remote_group_id: { get_resource: etcd-secgrp }
+ - direction: ingress
+ protocol: udp
+ port_range_min: 53
+ port_range_max: 53
+ remote_mode: remote_group_id
+ remote_group_id: { get_resource: master-secgrp }
+ - direction: ingress
+ protocol: udp
+ port_range_min: 53
+ port_range_max: 53
+ remote_mode: remote_group_id
+ remote_group_id: { get_resource: node-secgrp }
+
etcd:
type: OS::Heat::ResourceGroup
properties:
@@ -516,3 +599,79 @@ resources:
cluster_id: { get_param: cluster_id }
depends_on:
- interface
+
+ dns:
+ type: OS::Nova::Server
+ properties:
+ name:
+ str_replace:
+ template: cluster_id-dns
+ params:
+ cluster_id: { get_param: cluster_id }
+ key_name: { get_resource: keypair }
+ image: { get_param: dns_image }
+ flavor: { get_param: dns_flavor }
+ networks:
+ - port: { get_resource: dns-port }
+ user_data: { get_resource: dns-config }
+ user_data_format: RAW
+
+ dns-port:
+ type: OS::Neutron::Port
+ properties:
+ network: { get_resource: net }
+ fixed_ips:
+ - subnet: { get_resource: subnet }
+ ip_address:
+ str_replace:
+ template: subnet_24_prefix.2
+ params:
+ subnet_24_prefix: { get_param: subnet_24_prefix }
+ security_groups:
+ - { get_resource: dns-secgrp }
+
+ dns-floating-ip:
+ type: OS::Neutron::FloatingIP
+ properties:
+ floating_network: { get_param: external_net }
+ port_id: { get_resource: dns-port }
+
+ dns-config:
+ type: OS::Heat::MultipartMime
+ properties:
+ parts:
+ - config:
+ str_replace:
+ template: |
+ #cloud-config
+ disable_root: true
+
+ system_info:
+ default_user:
+ name: openshift
+ sudo: ["ALL=(ALL) NOPASSWD: ALL"]
+
+ write_files:
+ - path: /etc/sudoers.d/00-openshift-no-requiretty
+ permissions: 440
+ content: |
+ Defaults:openshift !requiretty
+ - path: /etc/sysconfig/network-scripts/ifcfg-eth0
+ content: |
+ DEVICE="eth0"
+ BOOTPROTO="dhcp"
+ DNS1="$dns1"
+ DNS2="$dns2"
+ PEERDNS="no"
+ ONBOOT="yes"
+ runcmd:
+ - [ "/usr/bin/systemctl", "restart", "network" ]
+ params:
+ $dns1:
+ get_param:
+ - dns_nameservers
+ - 0
+ $dns2:
+ get_param:
+ - dns_nameservers
+ - 1
diff --git a/playbooks/openstack/openshift-cluster/files/heat_stack_server.yaml b/playbooks/openstack/openshift-cluster/files/heat_stack_server.yaml
index 435139849..f83f2c984 100644
--- a/playbooks/openstack/openshift-cluster/files/heat_stack_server.yaml
+++ b/playbooks/openstack/openshift-cluster/files/heat_stack_server.yaml
@@ -107,7 +107,7 @@ resources:
flavor: { get_param: flavor }
networks:
- port: { get_resource: port }
- user_data: { get_resource: config }
+ user_data: { get_file: user-data }
user_data_format: RAW
metadata:
environment: { get_param: cluster_env }
@@ -128,25 +128,3 @@ resources:
properties:
floating_network: { get_param: floating_network }
port_id: { get_resource: port }
-
- config:
- type: OS::Heat::CloudConfig
- properties:
- cloud_config:
- disable_root: true
-
- hostname: { get_param: name }
-
- system_info:
- default_user:
- name: openshift
- sudo: ["ALL=(ALL) NOPASSWD: ALL"]
-
- write_files:
- - path: /etc/sudoers.d/00-openshift-no-requiretty
- permissions: 440
- # content: Defaults:openshift !requiretty
- # Encoded in base64 to be sure that we do not forget the trailing newline or
- # sudo will not be able to parse that file
- encoding: b64
- content: RGVmYXVsdHM6b3BlbnNoaWZ0ICFyZXF1aXJldHR5Cg==
diff --git a/playbooks/openstack/openshift-cluster/files/user-data b/playbooks/openstack/openshift-cluster/files/user-data
new file mode 100644
index 000000000..eb65f7cec
--- /dev/null
+++ b/playbooks/openstack/openshift-cluster/files/user-data
@@ -0,0 +1,13 @@
+#cloud-config
+disable_root: true
+
+system_info:
+ default_user:
+ name: openshift
+ sudo: ["ALL=(ALL) NOPASSWD: ALL"]
+
+write_files:
+ - path: /etc/sudoers.d/00-openshift-no-requiretty
+ permissions: 440
+ content: |
+ Defaults:openshift !requiretty
diff --git a/roles/flannel/tasks/main.yml b/roles/flannel/tasks/main.yml
index f5b16fb76..bf400cfe8 100644
--- a/roles/flannel/tasks/main.yml
+++ b/roles/flannel/tasks/main.yml
@@ -2,7 +2,7 @@
- name: Install flannel
become: yes
action: "{{ ansible_pkg_mgr }} name=flannel state=present"
- when: not openshift.common.is_containerized | bool
+ when: not openshift.common.is_atomic | bool
- name: Set flannel etcd options
become: yes
@@ -15,7 +15,7 @@
- { regexp: "^(FLANNEL_ETCD=)", line: '\1{{ etcd_hosts|join(",") }}' }
- { regexp: "^(FLANNEL_ETCD_ENDPOINTS=)", line: '\1{{ etcd_hosts|join(",") }}' }
- { regexp: "^(FLANNEL_ETCD_KEY=)", line: '\1{{ flannel_etcd_key }}' }
- - { regexp: "^(FLANNEL_ETCD_KEY_PREFIX=)", line: '\1{{ flannel_etcd_key }}' }
+ - { regexp: "^(FLANNEL_ETCD_PREFIX=)", line: '\1{{ flannel_etcd_key }}' }
- name: Set flannel options
become: yes
diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py
index 95325610d..cd6b6456b 100755
--- a/roles/openshift_facts/library/openshift_facts.py
+++ b/roles/openshift_facts/library/openshift_facts.py
@@ -148,7 +148,6 @@ def hostname_valid(hostname):
if (not hostname or
hostname.startswith('localhost') or
hostname.endswith('localdomain') or
- hostname.endswith('novalocal') or
len(hostname.split('.')) < 2):
return False
@@ -363,15 +362,12 @@ def normalize_openstack_facts(metadata, facts):
facts['network']['ip'] = local_ipv4
facts['network']['public_ip'] = metadata['ec2_compat']['public-ipv4']
- for f_var, h_var, ip_var in [('hostname', 'hostname', 'local-ipv4'),
- ('public_hostname', 'public-hostname', 'public-ipv4')]:
- try:
- if socket.gethostbyname(metadata['ec2_compat'][h_var]) == metadata['ec2_compat'][ip_var]:
- facts['network'][f_var] = metadata['ec2_compat'][h_var]
- else:
- facts['network'][f_var] = metadata['ec2_compat'][ip_var]
- except socket.gaierror:
- facts['network'][f_var] = metadata['ec2_compat'][ip_var]
+ # TODO: verify local hostname makes sense and is resolvable
+ facts['network']['hostname'] = metadata['hostname']
+
+ # TODO: verify that public hostname makes sense and is resolvable
+ pub_h = metadata['ec2_compat']['public-hostname']
+ facts['network']['public_hostname'] = pub_h
return facts
@@ -901,10 +897,29 @@ def set_sdn_facts_if_unset(facts, system_facts):
facts['common']['sdn_network_plugin_name'] = plugin
if 'master' in facts:
+ # set defaults for sdn_cluster_network_cidr and sdn_host_subnet_length
+ # these might be overridden if they exist in the master config file
+ sdn_cluster_network_cidr = '10.128.0.0/14'
+ sdn_host_subnet_length = '9'
+
+ master_cfg_path = os.path.join(facts['common']['config_base'],
+ 'master/master-config.yaml')
+ if os.path.isfile(master_cfg_path):
+ with open(master_cfg_path, 'r') as master_cfg_f:
+ config = yaml.safe_load(master_cfg_f.read())
+
+ if 'networkConfig' in config:
+ if 'clusterNetworkCIDR' in config['networkConfig']:
+ sdn_cluster_network_cidr = \
+ config['networkConfig']['clusterNetworkCIDR']
+ if 'hostSubnetLength' in config['networkConfig']:
+ sdn_host_subnet_length = \
+ config['networkConfig']['hostSubnetLength']
+
if 'sdn_cluster_network_cidr' not in facts['master']:
- facts['master']['sdn_cluster_network_cidr'] = '10.1.0.0/16'
+ facts['master']['sdn_cluster_network_cidr'] = sdn_cluster_network_cidr
if 'sdn_host_subnet_length' not in facts['master']:
- facts['master']['sdn_host_subnet_length'] = '8'
+ facts['master']['sdn_host_subnet_length'] = sdn_host_subnet_length
if 'node' in facts and 'sdn_mtu' not in facts['node']:
node_ip = facts['common']['ip']
@@ -921,14 +936,6 @@ def set_sdn_facts_if_unset(facts, system_facts):
return facts
-def set_nodename(facts):
- if 'node' in facts and 'common' in facts:
- if 'cloudprovider' in facts and facts['cloudprovider']['kind'] == 'openstack':
- facts['node']['nodename'] = facts['provider']['metadata']['hostname'].replace('.novalocal', '')
- else:
- facts['node']['nodename'] = facts['common']['hostname'].lower()
- return facts
-
def migrate_oauth_template_facts(facts):
"""
Migrate an old oauth template fact to a newer format if it's present.
@@ -1305,7 +1312,7 @@ def apply_provider_facts(facts, provider_facts):
facts['common'][h_var] = choose_hostname(
[provider_facts['network'].get(h_var)],
- facts['common'][h_var]
+ facts['common'][ip_var]
)
facts['provider'] = provider_facts
@@ -1774,8 +1781,8 @@ class OpenShiftFacts(object):
facts = set_node_schedulability(facts)
facts = set_selectors(facts)
facts = set_identity_providers_if_unset(facts)
- facts = set_sdn_facts_if_unset(facts, self.system_facts)
facts = set_deployment_facts_if_unset(facts)
+ facts = set_sdn_facts_if_unset(facts, self.system_facts)
facts = set_container_facts_if_unset(facts)
facts = build_kubelet_args(facts)
facts = build_controller_args(facts)
@@ -1788,7 +1795,6 @@ class OpenShiftFacts(object):
facts = set_proxy_facts(facts)
if not safe_get_bool(facts['common']['is_containerized']):
facts = set_installed_variant_rpm_facts(facts)
- facts = set_nodename(facts)
return dict(openshift=facts)
def get_defaults(self, roles, deployment_type, deployment_subtype):
diff --git a/roles/openshift_manage_node/tasks/main.yml b/roles/openshift_manage_node/tasks/main.yml
index 28e4e46e9..a1f42f8c4 100644
--- a/roles/openshift_manage_node/tasks/main.yml
+++ b/roles/openshift_manage_node/tasks/main.yml
@@ -14,7 +14,7 @@
- name: Wait for Node Registration
command: >
- {{ openshift.common.client_binary }} get node {{ hostvars[item].openshift.node.nodename }}
+ {{ openshift.common.client_binary }} get node {{ hostvars[item].openshift.common.hostname }}
--config={{ openshift_manage_node_kubeconfig }}
-n default
register: omd_get_node
@@ -26,19 +26,19 @@
- name: Set node schedulability
command: >
- {{ openshift.common.client_binary }} adm manage-node {{ hostvars[item].openshift.node.nodename }} --schedulable={{ 'true' if hostvars[item].openshift.node.schedulable | bool else 'false' }}
+ {{ openshift.common.client_binary }} adm manage-node {{ hostvars[item].openshift.common.hostname | lower }} --schedulable={{ 'true' if hostvars[item].openshift.node.schedulable | bool else 'false' }}
--config={{ openshift_manage_node_kubeconfig }}
-n default
with_items: "{{ openshift_nodes }}"
- when: hostvars[item].openshift.node.nodename is defined
+ when: hostvars[item].openshift.common.hostname is defined
- name: Label nodes
command: >
- {{ openshift.common.client_binary }} label --overwrite node {{ hostvars[item].openshift.node.nodename }} {{ hostvars[item].openshift.node.labels | oo_combine_dict }}
+ {{ openshift.common.client_binary }} label --overwrite node {{ hostvars[item].openshift.common.hostname | lower }} {{ hostvars[item].openshift.node.labels | oo_combine_dict }}
--config={{ openshift_manage_node_kubeconfig }}
-n default
with_items: "{{ openshift_nodes }}"
- when: hostvars[item].openshift.node.nodename is defined and 'labels' in hostvars[item].openshift.node and hostvars[item].openshift.node.labels != {}
+ when: hostvars[item].openshift.common.hostname is defined and 'labels' in hostvars[item].openshift.node and hostvars[item].openshift.node.labels != {}
- name: Delete temp directory
file:
diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml
index 8b3145785..474df497e 100644
--- a/roles/openshift_node/tasks/main.yml
+++ b/roles/openshift_node/tasks/main.yml
@@ -100,6 +100,7 @@
line: "AWS_ACCESS_KEY_ID={{ openshift_cloudprovider_aws_access_key | default('') }}"
- regex: '^AWS_SECRET_ACCESS_KEY='
line: "AWS_SECRET_ACCESS_KEY={{ openshift_cloudprovider_aws_secret_key | default('') }}"
+ no_log: True
when: "openshift_cloudprovider_kind is defined and openshift_cloudprovider_kind == 'aws' and openshift_cloudprovider_aws_access_key is defined and openshift_cloudprovider_aws_secret_key is defined"
notify:
- restart node
diff --git a/roles/openshift_node/templates/node.yaml.v1.j2 b/roles/openshift_node/templates/node.yaml.v1.j2
index 9bcaf4d84..68d153052 100644
--- a/roles/openshift_node/templates/node.yaml.v1.j2
+++ b/roles/openshift_node/templates/node.yaml.v1.j2
@@ -33,7 +33,7 @@ networkConfig:
{% if openshift.node.set_node_ip | bool %}
nodeIP: {{ openshift.common.ip }}
{% endif %}
-nodeName: {{ openshift.node.nodename }}
+nodeName: {{ openshift.common.hostname | lower }}
podManifestConfig:
servingInfo:
bindAddress: 0.0.0.0:10250
diff --git a/utils/setup.py b/utils/setup.py
index 563897bb1..7909321c9 100644
--- a/utils/setup.py
+++ b/utils/setup.py
@@ -65,11 +65,6 @@ setup(
'ooinstall': ['ansible.cfg', 'ansible-quiet.cfg', 'ansible_plugins/*'],
},
- # Although 'package_data' is the preferred approach, in some case you may
- # need to place data files outside of your packages. See:
- # http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files # noqa
- # In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
- #data_files=[('my_data', ['data/data_file'])],
tests_require=['nose'],
test_suite='nose.collector',
diff --git a/utils/src/data/data_file b/utils/src/data/data_file
deleted file mode 100644
index 7c0646bfd..000000000
--- a/utils/src/data/data_file
+++ /dev/null
@@ -1 +0,0 @@
-some data \ No newline at end of file
diff --git a/utils/test/test_utils.py b/utils/test/test_utils.py
index 8d59f388e..2e59d86f2 100644
--- a/utils/test/test_utils.py
+++ b/utils/test/test_utils.py
@@ -6,7 +6,7 @@ import unittest
import logging
import sys
import copy
-from ooinstall.utils import debug_env
+from ooinstall.utils import debug_env, is_valid_hostname
import mock
@@ -70,3 +70,31 @@ class TestUtils(unittest.TestCase):
self.assertItemsEqual(
self.expected,
_il.debug.call_args_list)
+
+ ######################################################################
+ def test_utils_is_valid_hostname_invalid(self):
+ """Verify is_valid_hostname can detect None or too-long hostnames"""
+ # A hostname that's empty, None, or more than 255 chars is invalid
+ empty_hostname = ''
+ res = is_valid_hostname(empty_hostname)
+ self.assertFalse(res)
+
+ none_hostname = None
+ res = is_valid_hostname(none_hostname)
+ self.assertFalse(res)
+
+ too_long_hostname = "a" * 256
+ res = is_valid_hostname(too_long_hostname)
+ self.assertFalse(res)
+
+ def test_utils_is_valid_hostname_ends_with_dot(self):
+ """Verify is_valid_hostname can parse hostnames with trailing periods"""
+ hostname = "foo.example.com."
+ res = is_valid_hostname(hostname)
+ self.assertTrue(res)
+
+ def test_utils_is_valid_hostname_normal_hostname(self):
+ """Verify is_valid_hostname can parse regular hostnames"""
+ hostname = "foo.example.com"
+ res = is_valid_hostname(hostname)
+ self.assertTrue(res)
diff --git a/utils/workflows/enterprise_deploy/openshift.sh b/utils/workflows/enterprise_deploy/openshift.sh
deleted file mode 100644
index 040a9a84d..000000000
--- a/utils/workflows/enterprise_deploy/openshift.sh
+++ /dev/null
@@ -1,2 +0,0 @@
-# This file is not used for OpenShift 3.0. It's merely an artifact of the the
-# installation framework originally used for OpenShift 2.x.