summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.tito/packages/openshift-ansible2
-rw-r--r--docs/proposals/role_decomposition.md6
-rw-r--r--openshift-ansible.spec53
-rw-r--r--playbooks/aws/openshift-cluster/build_ami.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_6/master_config_upgrade.yml5
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_7/master_config_upgrade.yml5
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml2
-rw-r--r--playbooks/common/openshift-etcd/embedded2external.yml2
-rw-r--r--playbooks/common/openshift-master/config.yml6
-rw-r--r--playbooks/common/openshift-node/additional_config.yml14
-rw-r--r--roles/ansible_service_broker/defaults/main.yml5
-rw-r--r--roles/ansible_service_broker/tasks/install.yml48
-rw-r--r--roles/ansible_service_broker/vars/default_images.yml2
-rw-r--r--roles/ansible_service_broker/vars/openshift-enterprise.yml5
-rw-r--r--roles/docker/tasks/main.yml9
-rw-r--r--roles/docker/tasks/package_docker.yml8
-rw-r--r--roles/docker/tasks/systemcontainer_crio.yml34
-rw-r--r--roles/docker/tasks/systemcontainer_docker.yml36
-rw-r--r--roles/etcd/tasks/system_container.yml5
-rw-r--r--roles/kuryr/README.md38
-rw-r--r--roles/kuryr/defaults/main.yaml72
-rw-r--r--roles/kuryr/meta/main.yml17
-rw-r--r--roles/kuryr/tasks/master.yaml52
-rw-r--r--roles/kuryr/tasks/node.yaml48
-rw-r--r--roles/kuryr/tasks/serviceaccount.yaml31
-rw-r--r--roles/kuryr/templates/cni-daemonset.yaml.j253
-rw-r--r--roles/kuryr/templates/configmap.yaml.j2343
-rw-r--r--roles/kuryr/templates/controller-deployment.yaml.j240
-rw-r--r--roles/openshift_atomic/README.md28
-rw-r--r--roles/openshift_atomic/meta/main.yml13
-rw-r--r--roles/openshift_atomic/tasks/proxy.yml32
-rw-r--r--roles/openshift_cli/tasks/main.yml13
-rw-r--r--roles/openshift_logging/README.md18
-rw-r--r--roles/openshift_logging/defaults/main.yml36
-rw-r--r--roles/openshift_logging/filter_plugins/openshift_logging.py19
-rw-r--r--roles/openshift_logging/filter_plugins/test34
-rw-r--r--roles/openshift_logging/library/openshift_logging_facts.py2
-rw-r--r--roles/openshift_logging/tasks/install_logging.yaml9
-rw-r--r--roles/openshift_logging_curator/defaults/main.yml5
-rw-r--r--roles/openshift_logging_curator/tasks/main.yaml1
-rw-r--r--roles/openshift_logging_curator/templates/curator.j215
-rw-r--r--roles/openshift_logging_elasticsearch/defaults/main.yml5
-rw-r--r--roles/openshift_logging_elasticsearch/tasks/main.yaml11
-rw-r--r--roles/openshift_logging_elasticsearch/templates/es.j27
-rw-r--r--roles/openshift_logging_eventrouter/README.md6
-rw-r--r--roles/openshift_logging_eventrouter/defaults/main.yaml3
-rw-r--r--roles/openshift_logging_eventrouter/tasks/install_eventrouter.yaml2
-rw-r--r--roles/openshift_logging_eventrouter/templates/eventrouter-template.j26
-rw-r--r--roles/openshift_logging_fluentd/defaults/main.yml5
-rw-r--r--roles/openshift_logging_fluentd/tasks/main.yaml3
-rw-r--r--roles/openshift_logging_fluentd/templates/fluentd.j223
-rw-r--r--roles/openshift_logging_kibana/defaults/main.yml2
-rw-r--r--roles/openshift_logging_kibana/tasks/main.yaml2
-rw-r--r--roles/openshift_logging_kibana/templates/kibana.j238
-rw-r--r--roles/openshift_logging_mux/defaults/main.yml13
-rw-r--r--roles/openshift_logging_mux/tasks/main.yaml1
-rw-r--r--roles/openshift_logging_mux/templates/mux.j219
-rw-r--r--roles/openshift_master/defaults/main.yml3
-rw-r--r--roles/openshift_master/tasks/system_container.yml5
-rw-r--r--roles/openshift_master/tasks/upgrade_facts.yml4
-rw-r--r--roles/openshift_master/templates/master.yaml.v1.j22
-rw-r--r--roles/openshift_node/defaults/main.yml3
-rw-r--r--roles/openshift_node/tasks/node_system_container.yml5
-rw-r--r--roles/openshift_node/tasks/openvswitch_system_container.yml5
-rw-r--r--roles/openshift_node/templates/node.yaml.v1.j24
-rw-r--r--roles/openshift_node_certificates/handlers/main.yml16
-rw-r--r--roles/openshift_node_dnsmasq/README.md27
-rw-r--r--roles/openshift_node_dnsmasq/defaults/main.yml1
-rw-r--r--roles/openshift_node_dnsmasq/tasks/network-manager.yml1
-rw-r--r--roles/openshift_sanitize_inventory/tasks/main.yml13
-rw-r--r--roles/openshift_sanitize_inventory/tasks/unsupported.yml8
71 files changed, 1230 insertions, 181 deletions
diff --git a/.tito/packages/openshift-ansible b/.tito/packages/openshift-ansible
index cbfcd1c87..8dde380f1 100644
--- a/.tito/packages/openshift-ansible
+++ b/.tito/packages/openshift-ansible
@@ -1 +1 @@
-3.7.0-0.166.0 ./
+3.7.0-0.176.0 ./
diff --git a/docs/proposals/role_decomposition.md b/docs/proposals/role_decomposition.md
index b6c1d8c5b..6434e24e7 100644
--- a/docs/proposals/role_decomposition.md
+++ b/docs/proposals/role_decomposition.md
@@ -158,13 +158,13 @@ providing the location of the generated certificates to the individual roles.
openshift_logging_kibana_es_host: "{{ openshift_logging_es_ops_host }}"
openshift_logging_kibana_es_port: "{{ openshift_logging_es_ops_port }}"
openshift_logging_kibana_nodeselector: "{{ openshift_logging_kibana_ops_nodeselector }}"
- openshift_logging_kibana_cpu_limit: "{{ openshift_logging_kibana_ops_cpu_limit }}"
openshift_logging_kibana_memory_limit: "{{ openshift_logging_kibana_ops_memory_limit }}"
+ openshift_logging_kibana_cpu_request: "{{ openshift_logging_kibana_ops_cpu_request }}"
openshift_logging_kibana_hostname: "{{ openshift_logging_kibana_ops_hostname }}"
openshift_logging_kibana_replicas: "{{ openshift_logging_kibana_ops_replica_count }}"
openshift_logging_kibana_proxy_debug: "{{ openshift_logging_kibana_ops_proxy_debug }}"
- openshift_logging_kibana_proxy_cpu_limit: "{{ openshift_logging_kibana_ops_proxy_cpu_limit }}"
openshift_logging_kibana_proxy_memory_limit: "{{ openshift_logging_kibana_ops_proxy_memory_limit }}"
+ openshift_logging_kibana_proxy_cpu_request: "{{ openshift_logging_kibana_ops_proxy_cpu_request }}"
openshift_logging_kibana_cert: "{{ openshift_logging_kibana_ops_cert }}"
openshift_logging_kibana_key: "{{ openshift_logging_kibana_ops_key }}"
openshift_logging_kibana_ca: "{{ openshift_logging_kibana_ops_ca}}"
@@ -193,8 +193,8 @@ providing the location of the generated certificates to the individual roles.
openshift_logging_curator_image_prefix: "{{ openshift_logging_image_prefix }}"
openshift_logging_curator_image_version: "{{ openshift_logging_image_version }}"
openshift_logging_curator_image_pull_secret: "{{ openshift_logging_image_pull_secret }}"
- openshift_logging_curator_cpu_limit: "{{ openshift_logging_curator_ops_cpu_limit }}"
openshift_logging_curator_memory_limit: "{{ openshift_logging_curator_ops_memory_limit }}"
+ openshift_logging_curator_cpu_request: "{{ openshift_logging_curator_ops_cpu_request }}"
openshift_logging_curator_nodeselector: "{{ openshift_logging_curator_ops_nodeselector }}"
when:
- openshift_logging_use_ops | bool
diff --git a/openshift-ansible.spec b/openshift-ansible.spec
index 399b3a129..6866f9a4f 100644
--- a/openshift-ansible.spec
+++ b/openshift-ansible.spec
@@ -10,7 +10,7 @@
Name: openshift-ansible
Version: 3.7.0
-Release: 0.166.0%{?dist}
+Release: 0.176.0%{?dist}
Summary: Openshift and Atomic Enterprise Ansible
License: ASL 2.0
URL: https://github.com/openshift/openshift-ansible
@@ -280,6 +280,57 @@ Atomic OpenShift Utilities includes
%changelog
+* Mon Oct 23 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.176.0
+- Update defaults (fabian@fabianism.us)
+- Use service-ca.crt instead of master ca.crt (fabian@fabianism.us)
+- use master cert (fabian@fabianism.us)
+- Bug 1496426 - add asb-client secret to openshift-ansible-service-broker
+ namespace (fabian@fabianism.us)
+- docker: Move enterprise registry from pkg to main (smilner@redhat.com)
+- systemcontainers: Verify atomic.conf proxy is always configured
+ (smilner@redhat.com)
+- Add variable to control whether NetworkManager hook is installed
+ (hansmi@vshn.ch)
+
+* Mon Oct 23 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.175.0
+-
+
+* Sun Oct 22 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.174.0
+-
+
+* Sun Oct 22 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.173.0
+-
+
+* Sun Oct 22 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.172.0
+-
+
+* Sat Oct 21 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.171.0
+- Use "requests" for CPU resources instead of limits
+ (peter.portante@redhat.com)
+- [bz1501271] Attempt to use ami ssh user and default to ansible_ssh_user.
+ (kwoodson@redhat.com)
+- Fix undefined variable for master upgrades (mgugino@redhat.com)
+- Adding pre check to verify clusterid is set along with cloudprovider when
+ performing upgrade. (kwoodson@redhat.com)
+
+* Fri Oct 20 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.170.0
+- Check for container runtime prior to restarting when updating system CA
+ trust. (abutcher@redhat.com)
+- bug 1489498. preserve replica and shard settings (jcantril@redhat.com)
+- Set servingInfo.clientCA to ca.crt during upgrade. (abutcher@redhat.com)
+
+* Fri Oct 20 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.169.0
+- Initial Kuryr support (mdulko@redhat.com)
+- Indentation errors (dymurray@redhat.com)
+- Bug 1503233 - Add liveness and readiness probe checks to ASB deploymentconfig
+ (dymurray@redhat.com)
+
+* Fri Oct 20 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.168.0
+-
+
+* Thu Oct 19 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.167.0
+-
+
* Thu Oct 19 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.166.0
-
diff --git a/playbooks/aws/openshift-cluster/build_ami.yml b/playbooks/aws/openshift-cluster/build_ami.yml
index ee281929a..5b4a6a1e8 100644
--- a/playbooks/aws/openshift-cluster/build_ami.yml
+++ b/playbooks/aws/openshift-cluster/build_ami.yml
@@ -26,7 +26,7 @@
tasks:
- name: set the user to perform installation
set_fact:
- ansible_ssh_user: "{{ openshift_aws_build_ami_ssh_user | default('root') }}"
+ ansible_ssh_user: "{{ openshift_aws_build_ami_ssh_user | default(ansible_ssh_user) }}"
openshift_node_bootstrap: True
# This is the part that installs all of the software and configs for the instance
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/master_config_upgrade.yml
index 52458e03c..db0c8f886 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_6/master_config_upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_6/master_config_upgrade.yml
@@ -8,3 +8,8 @@
dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
yaml_key: 'controllerConfig.serviceServingCert.signer.keyFile'
yaml_value: service-signer.key
+
+- modify_yaml:
+ dest: "{{ openshift.common.config_base }}/master/master-config.yaml"
+ yaml_key: servingInfo.clientCA
+ yaml_value: ca.crt
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/master_config_upgrade.yml
index c26e8f744..1d4d1919c 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_7/master_config_upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_7/master_config_upgrade.yml
@@ -13,3 +13,8 @@
dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
yaml_key: 'controllerConfig.serviceServingCert.signer.keyFile'
yaml_value: service-signer.key
+
+- modify_yaml:
+ dest: "{{ openshift.common.config_base }}/master/master-config.yaml"
+ yaml_key: servingInfo.clientCA
+ yaml_value: ca.crt
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml
index bf3b94682..81f6dc8a4 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml
@@ -125,7 +125,7 @@
# All controllers must be stopped at the same time then restarted
- name: Cycle all controller services to force new leader election mode
- hosts: oo_etcd_to_config
+ hosts: oo_masters_to_config
gather_facts: no
tasks:
- name: Stop {{ openshift.common.service_type }}-master-controllers
diff --git a/playbooks/common/openshift-etcd/embedded2external.yml b/playbooks/common/openshift-etcd/embedded2external.yml
index 9264f3c32..b16b78c4f 100644
--- a/playbooks/common/openshift-etcd/embedded2external.yml
+++ b/playbooks/common/openshift-etcd/embedded2external.yml
@@ -158,7 +158,7 @@
tasks_from: configure_external_etcd
vars:
etcd_peer_url_scheme: "https"
- etcd_ip: "{{ openshift.common.ip }}"
+ etcd_ip: "{{ hostvars[groups.oo_etcd_to_config.0].openshift.common.ip }}"
etcd_peer_port: 2379
# 9. start the master
diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml
index 6e57f282e..b359919ba 100644
--- a/playbooks/common/openshift-master/config.yml
+++ b/playbooks/common/openshift-master/config.yml
@@ -206,6 +206,12 @@
when: openshift_use_nuage | default(false) | bool
- role: calico_master
when: openshift_use_calico | default(false) | bool
+ tasks:
+ - include_role:
+ name: kuryr
+ tasks_from: master
+ when: openshift_use_kuryr | default(false) | bool
+
post_tasks:
- name: Create group for deployment type
group_by: key=oo_masters_deployment_type_{{ openshift.common.deployment_type }}
diff --git a/playbooks/common/openshift-node/additional_config.yml b/playbooks/common/openshift-node/additional_config.yml
index fe51ef833..ac757397b 100644
--- a/playbooks/common/openshift-node/additional_config.yml
+++ b/playbooks/common/openshift-node/additional_config.yml
@@ -19,10 +19,14 @@
- group_by:
key: oo_nodes_use_{{ (openshift_use_contiv | default(False)) | ternary('contiv','nothing') }}
changed_when: False
+ # Create group for kuryr nodes
+ - group_by:
+ key: oo_nodes_use_{{ (openshift_use_kuryr | default(False)) | ternary('kuryr','nothing') }}
+ changed_when: False
- include: etcd_client_config.yml
vars:
- openshift_node_scale_up_group: "oo_nodes_use_flannel:oo_nodes_use_calico:oo_nodes_use_contiv"
+ openshift_node_scale_up_group: "oo_nodes_use_flannel:oo_nodes_use_calico:oo_nodes_use_contiv:oo_nodes_use_kuryr"
- name: Additional node config
hosts: oo_nodes_use_flannel
@@ -50,3 +54,11 @@
- role: contiv
contiv_role: netplugin
when: openshift_use_contiv | default(false) | bool
+
+- name: Configure Kuryr node
+ hosts: oo_nodes_use_kuryr
+ tasks:
+ - include_role:
+ name: kuryr
+ tasks_from: node
+ when: openshift_use_kuryr | default(false) | bool
diff --git a/roles/ansible_service_broker/defaults/main.yml b/roles/ansible_service_broker/defaults/main.yml
index fa982d533..dc05b03b5 100644
--- a/roles/ansible_service_broker/defaults/main.yml
+++ b/roles/ansible_service_broker/defaults/main.yml
@@ -13,7 +13,4 @@ ansible_service_broker_launch_apb_on_bind: false
ansible_service_broker_image_pull_policy: IfNotPresent
ansible_service_broker_sandbox_role: edit
-ansible_service_broker_auto_escalate: true
-ansible_service_broker_registry_tag: latest
-ansible_service_broker_registry_whitelist:
- - '.*-apb$'
+ansible_service_broker_auto_escalate: false
diff --git a/roles/ansible_service_broker/tasks/install.yml b/roles/ansible_service_broker/tasks/install.yml
index c0384b7c4..66c3d9cc4 100644
--- a/roles/ansible_service_broker/tasks/install.yml
+++ b/roles/ansible_service_broker/tasks/install.yml
@@ -22,23 +22,14 @@
ansible_service_broker_registry_user: "{{ ansible_service_broker_registry_user | default(__ansible_service_broker_registry_user) }}"
ansible_service_broker_registry_password: "{{ ansible_service_broker_registry_password | default(__ansible_service_broker_registry_password) }}"
ansible_service_broker_registry_organization: "{{ ansible_service_broker_registry_organization | default(__ansible_service_broker_registry_organization) }}"
-
- ansible_service_broker_certs_dir: "{{ openshift.common.config_base }}/service-catalog"
+ ansible_service_broker_registry_tag: "{{ ansible_service_broker_registry_tag | default(__ansible_service_broker_registry_tag) }}"
+ ansible_service_broker_registry_whitelist: "{{ ansible_service_broker_registry_whitelist | default(__ansible_service_broker_registry_whitelist) }}"
- name: set ansible-service-broker image facts using set prefix and tag
set_fact:
ansible_service_broker_image: "{{ ansible_service_broker_image_prefix }}ansible-service-broker:{{ ansible_service_broker_image_tag }}"
ansible_service_broker_etcd_image: "{{ ansible_service_broker_etcd_image_prefix }}etcd:{{ ansible_service_broker_etcd_image_tag }}"
-- set_fact:
- openshift_master_config_dir: "{{ openshift.common.config_base }}/master"
- when: openshift_master_config_dir is undefined
-
-- slurp:
- src: "{{ openshift_master_config_dir }}/service-signer.crt"
- register: catalog_ca
-
-
- include: validate_facts.yml
@@ -83,13 +74,12 @@
state: present
name: asb-access
rules:
- - nonResourceURLs: ["/ansible-service-broker", "ansible-service-broker/*"]
+ - nonResourceURLs: ["/ansible-service-broker", "/ansible-service-broker/*"]
verbs: ["get", "post", "put", "patch", "delete"]
- name: Bind admin cluster-role to asb serviceaccount
oc_adm_policy_user:
state: present
- namespace: openshift-ansible-service-broker
resource_kind: cluster-role
resource_name: admin
user: "system:serviceaccount:openshift-ansible-service-broker:asb"
@@ -97,7 +87,6 @@
- name: Bind auth cluster role to asb service account
oc_adm_policy_user:
state: present
- namespace: openshift-ansible-service-broker
resource_kind: cluster-role
resource_name: asb-auth
user: "system:serviceaccount:openshift-ansible-service-broker:asb"
@@ -105,7 +94,6 @@
- name: Bind asb-access role to asb-client service account
oc_adm_policy_user:
state: present
- namespace: openshift-ansible-service-broker
resource_kind: cluster-role
resource_name: asb-access
user: "system:serviceaccount:openshift-ansible-service-broker:asb-client"
@@ -113,6 +101,7 @@
- name: create asb-client token secret
oc_obj:
name: asb-client
+ namespace: openshift-ansible-service-broker
state: present
kind: Secret
content:
@@ -122,10 +111,20 @@
kind: Secret
metadata:
name: asb-client
+ namespace: openshift-ansible-service-broker
annotations:
kubernetes.io/service-account.name: asb-client
type: kubernetes.io/service-account-token
+- oc_secret:
+ state: list
+ namespace: openshift-ansible-service-broker
+ name: asb-client
+ register: asb_client_secret
+
+- set_fact:
+ service_ca_crt: asb_client_secret.results.results.0.data['service-ca.crt']
+
# Using oc_obj because oc_service doesn't seem to allow annotations
# TODO: Extend oc_service to allow annotations
- name: create ansible-service-broker service
@@ -141,6 +140,7 @@
kind: Service
metadata:
name: asb
+ namespace: openshift-ansible-service-broker
labels:
app: openshift-ansible-service-broker
service: asb
@@ -235,6 +235,20 @@
value: /etc/ansible-service-broker/config.yaml
resources: {}
terminationMessagePath: /tmp/termination-log
+ readinessProbe:
+ httpGet:
+ port: 1338
+ path: /healthz
+ scheme: HTTPS
+ initialDelaySeconds: 15
+ timeoutSeconds: 1
+ livenessProbe:
+ httpGet:
+ port: 1338
+ path: /healthz
+ scheme: HTTPS
+ initialDelaySeconds: 15
+ timeoutSeconds: 1
- image: "{{ ansible_service_broker_etcd_image }}"
name: etcd
@@ -340,11 +354,11 @@
metadata:
name: ansible-service-broker
spec:
- url: http://asb.openshift-ansible-service-broker.svc:1338/ansible-service-broker
+ url: https://asb.openshift-ansible-service-broker.svc:1338/ansible-service-broker
authInfo:
bearer:
secretRef:
name: asb-client
namespace: openshift-ansible-service-broker
kind: Secret
- caBundle: "{{ catalog_ca.content }}"
+ caBundle: "{{ service_ca_crt }}"
diff --git a/roles/ansible_service_broker/vars/default_images.yml b/roles/ansible_service_broker/vars/default_images.yml
index 3e9639adf..8438e993f 100644
--- a/roles/ansible_service_broker/vars/default_images.yml
+++ b/roles/ansible_service_broker/vars/default_images.yml
@@ -13,3 +13,5 @@ __ansible_service_broker_registry_url: null
__ansible_service_broker_registry_user: null
__ansible_service_broker_registry_password: null
__ansible_service_broker_registry_organization: null
+__ansible_service_broker_registry_tag: latest
+__ansible_service_broker_registry_whitelist: []
diff --git a/roles/ansible_service_broker/vars/openshift-enterprise.yml b/roles/ansible_service_broker/vars/openshift-enterprise.yml
index 9c576cb76..fc58b4fd8 100644
--- a/roles/ansible_service_broker/vars/openshift-enterprise.yml
+++ b/roles/ansible_service_broker/vars/openshift-enterprise.yml
@@ -1,7 +1,7 @@
---
__ansible_service_broker_image_prefix: registry.access.redhat.com/openshift3/ose-
-__ansible_service_broker_image_tag: v3.6
+__ansible_service_broker_image_tag: v3.7
__ansible_service_broker_etcd_image_prefix: rhel7/
__ansible_service_broker_etcd_image_tag: latest
@@ -14,3 +14,6 @@ __ansible_service_broker_registry_url: "https://registry.access.redhat.com"
__ansible_service_broker_registry_user: null
__ansible_service_broker_registry_password: null
__ansible_service_broker_registry_organization: null
+__ansible_service_broker_registry_tag: v3.7
+__ansible_service_broker_registry_whitelist:
+ - '.*-apb$'
diff --git a/roles/docker/tasks/main.yml b/roles/docker/tasks/main.yml
index f73f90686..5ea73568a 100644
--- a/roles/docker/tasks/main.yml
+++ b/roles/docker/tasks/main.yml
@@ -10,6 +10,15 @@
l_use_crio: "{{ openshift_use_crio | default(False) }}"
l_use_crio_only: "{{ openshift_use_crio_only | default(False) }}"
+- name: Add enterprise registry, if necessary
+ set_fact:
+ l2_docker_additional_registries: "{{ l2_docker_additional_registries + [openshift_docker_ent_reg] }}"
+ when:
+ - openshift.common.deployment_type == 'openshift-enterprise'
+ - openshift_docker_ent_reg != ''
+ - openshift_docker_ent_reg not in l2_docker_additional_registries
+ - not l_use_crio_only
+
- name: Use Package Docker if Requested
include: package_docker.yml
when:
diff --git a/roles/docker/tasks/package_docker.yml b/roles/docker/tasks/package_docker.yml
index 7ccab37a5..d6aee0513 100644
--- a/roles/docker/tasks/package_docker.yml
+++ b/roles/docker/tasks/package_docker.yml
@@ -52,14 +52,6 @@
- restart docker
when: not (os_firewall_use_firewalld | default(False)) | bool
-- name: Add enterprise registry, if necessary
- set_fact:
- l2_docker_additional_registries: "{{ l2_docker_additional_registries + [openshift_docker_ent_reg] }}"
- when:
- - openshift.common.deployment_type == 'openshift-enterprise'
- - openshift_docker_ent_reg != ''
- - openshift_docker_ent_reg not in l2_docker_additional_registries
-
- stat: path=/etc/sysconfig/docker
register: docker_check
diff --git a/roles/docker/tasks/systemcontainer_crio.yml b/roles/docker/tasks/systemcontainer_crio.yml
index a79600930..13bbd359e 100644
--- a/roles/docker/tasks/systemcontainer_crio.yml
+++ b/roles/docker/tasks/systemcontainer_crio.yml
@@ -82,36 +82,10 @@
enabled: yes
state: restarted
-
-- block:
-
- - name: Add http_proxy to /etc/atomic.conf
- lineinfile:
- dest: /etc/atomic.conf
- regexp: "^#?http_proxy[:=]{1}"
- line: "http_proxy: {{ openshift.common.http_proxy | default('') }}"
- when:
- - openshift.common.http_proxy is defined
- - openshift.common.http_proxy != ''
-
- - name: Add https_proxy to /etc/atomic.conf
- lineinfile:
- dest: /etc/atomic.conf
- regexp: "^#?https_proxy[:=]{1}"
- line: "https_proxy: {{ openshift.common.https_proxy | default('') }}"
- when:
- - openshift.common.https_proxy is defined
- - openshift.common.https_proxy != ''
-
- - name: Add no_proxy to /etc/atomic.conf
- lineinfile:
- dest: /etc/atomic.conf
- regexp: "^#?no_proxy[:=]{1}"
- line: "no_proxy: {{ openshift.common.no_proxy | default('') }}"
- when:
- - openshift.common.no_proxy is defined
- - openshift.common.no_proxy != ''
-
+- name: Ensure proxies are in the atomic.conf
+ include_role:
+ name: openshift_atomic
+ tasks_from: proxy
- block:
diff --git a/roles/docker/tasks/systemcontainer_docker.yml b/roles/docker/tasks/systemcontainer_docker.yml
index 15c6a55db..726e8ada7 100644
--- a/roles/docker/tasks/systemcontainer_docker.yml
+++ b/roles/docker/tasks/systemcontainer_docker.yml
@@ -68,38 +68,10 @@
retries: 3
delay: 30
-
-# Set http_proxy, https_proxy, and no_proxy in /etc/atomic.conf
-# regexp: the line starts with or without #, followed by the string
-# http_proxy, then either : or =
-- block:
-
- - name: Add http_proxy to /etc/atomic.conf
- lineinfile:
- dest: /etc/atomic.conf
- regexp: "^#?http_proxy[:=]{1}"
- line: "http_proxy: {{ openshift.common.http_proxy | default('') }}"
- when:
- - openshift.common.http_proxy is defined
- - openshift.common.http_proxy != ''
-
- - name: Add https_proxy to /etc/atomic.conf
- lineinfile:
- dest: /etc/atomic.conf
- regexp: "^#?https_proxy[:=]{1}"
- line: "https_proxy: {{ openshift.common.https_proxy | default('') }}"
- when:
- - openshift.common.https_proxy is defined
- - openshift.common.https_proxy != ''
-
- - name: Add no_proxy to /etc/atomic.conf
- lineinfile:
- dest: /etc/atomic.conf
- regexp: "^#?no_proxy[:=]{1}"
- line: "no_proxy: {{ openshift.common.no_proxy | default('') }}"
- when:
- - openshift.common.no_proxy is defined
- - openshift.common.no_proxy != ''
+- name: Ensure proxies are in the atomic.conf
+ include_role:
+ name: openshift_atomic
+ tasks_from: proxy
- block:
diff --git a/roles/etcd/tasks/system_container.yml b/roles/etcd/tasks/system_container.yml
index 024479fb4..9a6951920 100644
--- a/roles/etcd/tasks/system_container.yml
+++ b/roles/etcd/tasks/system_container.yml
@@ -2,6 +2,11 @@
- set_fact:
l_etcd_src_data_dir: "{{ '/var/lib/origin/openshift.local.etcd' if r_etcd_common_embedded_etcd | bool else '/var/lib/etcd/' }}"
+- name: Ensure proxies are in the atomic.conf
+ include_role:
+ name: openshift_atomic
+ tasks_from: proxy
+
- name: Pull etcd system container
command: atomic pull --storage=ostree {{ openshift.etcd.etcd_image }}
register: pull_result
diff --git a/roles/kuryr/README.md b/roles/kuryr/README.md
new file mode 100644
index 000000000..7b618f902
--- /dev/null
+++ b/roles/kuryr/README.md
@@ -0,0 +1,38 @@
+## OpenStack Kuryr
+
+Install Kuryr CNI components (kuryr-controller, kuryr-cni) on Master and worker
+nodes. Kuryr uses OpenStack Networking service (Neutron) to provide network for
+pods. This allows to have interconnectivity between pods and OpenStack VMs.
+
+## Requirements
+
+* Ansible 2.2+
+* Centos/ RHEL 7.3+
+
+## Current Kuryr restrictions when used with OpenShift
+
+* Openshift Origin only
+* OpenShift on OpenStack Newton or newer (only with Trunk ports)
+
+## Key Ansible inventory Kuryr master configuration parameters
+
+* ``openshift_use_kuryr=True``
+* ``openshift_use_openshift_sdn=False``
+* ``openshift_sdn_network_plugin_name='cni'``
+* ``kuryr_cni_link_interface=eth0``
+* ``kuryr_openstack_auth_url=keystone_url``
+* ``kuryr_openstack_user_domain_name=Default``
+* ``kuryr_openstack_user_project_name=Default``
+* ``kuryr_openstack_project_id=project_uuid``
+* ``kuryr_openstack_username=kuryr``
+* ``kuryr_openstack_password=kuryr_pass``
+* ``kuryr_openstack_pod_sg_id=pod_security_group_uuid``
+* ``kuryr_openstack_pod_subnet_id=pod_subnet_uuid``
+* ``kuryr_openstack_pod_service_id=service_subnet_uuid``
+* ``kuryr_openstack_pod_project_id=pod_project_uuid``
+* ``kuryr_openstack_worker_nodes_subnet_id=worker_nodes_subnet_uuid``
+
+## Kuryr resources
+
+* [Kuryr documentation](https://docs.openstack.org/kuryr-kubernetes/latest/)
+* [Installing Kuryr containerized](https://docs.openstack.org/kuryr-kubernetes/latest/installation/containerized.html)
diff --git a/roles/kuryr/defaults/main.yaml b/roles/kuryr/defaults/main.yaml
new file mode 100644
index 000000000..ff298dda0
--- /dev/null
+++ b/roles/kuryr/defaults/main.yaml
@@ -0,0 +1,72 @@
+---
+# Kuryr conf directory
+kuryr_config_dir: /etc/kuryr
+
+# Kuryr username
+kuryr_openstack_username: kuryr
+
+# Kuryr username domain
+kuryr_openstack_user_domain_name: default
+
+# Kuryr username domain
+kuryr_openstack_project_domain_name: default
+
+# Kuryr OpenShift namespace
+kuryr_namespace: kube-system
+
+# Whether to run the cni plugin in debug mode
+kuryr_cni_debug: "false"
+
+# The version of cni binaries
+cni_version: v0.5.2
+
+# Path to bin dir (where kuryr execs get installed)
+bin_dir: /usr/bin
+
+# Path to the cni binaries
+cni_bin_dir: /opt/cni/bin
+
+# URL for cni binaries
+cni_bin_url_base: "https://github.com/containernetworking/cni/releases/download/"
+cni_bin_url: "{{ cni_bin_url_base }}/{{ cni_version }}/cni-{{ cni_version }}.tgz"
+cni_bin_checksum: "71f411080245aa14d0cc06f6824e8039607dd9e9"
+
+# Kuryr ClusterRole definiton
+kuryr_clusterrole:
+ name: kuryrctl
+ state: present
+ rules:
+ - apiGroups:
+ - ""
+ attributeRestrictions: null
+ verbs:
+ - get
+ - list
+ - watch
+ resources:
+ - daemonsets
+ - deployments
+ - deploymentconfigs
+ - endpoints
+ - ingress
+ - nodes
+ - namespaces
+ - pods
+ - projects
+ - routes
+ - services
+ - apiGroups:
+ - ""
+ attributeRestrictions: null
+ verbs:
+ - update
+ - patch
+ resources:
+ - endpoints
+ - ingress
+ - pods
+ - namespaces
+ - nodes
+ - services
+ - services/status
+ - routes
diff --git a/roles/kuryr/meta/main.yml b/roles/kuryr/meta/main.yml
new file mode 100644
index 000000000..7fd5adf41
--- /dev/null
+++ b/roles/kuryr/meta/main.yml
@@ -0,0 +1,17 @@
+---
+galaxy_info:
+ author: Red Hat
+ description: Kuryr networking
+ company: Red Hat
+ license: Apache License, Version 2.0
+ min_ansible_version: 2.2
+ platforms:
+ - name: EL
+ versions:
+ - 7
+ categories:
+ - cloud
+ - system
+dependencies:
+- { role: lib_openshift }
+- { role: openshift_facts }
diff --git a/roles/kuryr/tasks/master.yaml b/roles/kuryr/tasks/master.yaml
new file mode 100644
index 000000000..55ab16f74
--- /dev/null
+++ b/roles/kuryr/tasks/master.yaml
@@ -0,0 +1,52 @@
+---
+- name: Perform OpenShit ServiceAccount config
+ include: serviceaccount.yaml
+
+- name: Create kuryr manifests tempdir
+ command: mktemp -d
+ register: manifests_tmpdir
+
+- name: Create kuryr ConfigMap manifest
+ become: yes
+ template:
+ src: configmap.yaml.j2
+ dest: "{{ manifests_tmpdir.stdout }}/configmap.yaml"
+
+- name: Create kuryr-controller Deployment manifest
+ become: yes
+ template:
+ src: controller-deployment.yaml.j2
+ dest: "{{ manifests_tmpdir.stdout }}/controller-deployment.yaml"
+
+- name: Create kuryr-cni DaemonSet manifest
+ become: yes
+ template:
+ src: cni-daemonset.yaml.j2
+ dest: "{{ manifests_tmpdir.stdout }}/cni-daemonset.yaml"
+
+- name: Apply ConfigMap manifest
+ oc_obj:
+ state: present
+ kind: ConfigMap
+ name: "kuryr-config"
+ namespace: "{{ kuryr_namespace }}"
+ files:
+ - "{{ manifests_tmpdir.stdout }}/configmap.yaml"
+
+- name: Apply Controller Deployment manifest
+ oc_obj:
+ state: present
+ kind: Deployment
+ name: "kuryr-controller"
+ namespace: "{{ kuryr_namespace }}"
+ files:
+ - "{{ manifests_tmpdir.stdout }}/controller-deployment.yaml"
+
+- name: Apply kuryr-cni DaemonSet manifest
+ oc_obj:
+ state: present
+ kind: DaemonSet
+ name: "kuryr-cni-ds"
+ namespace: "{{ kuryr_namespace }}"
+ files:
+ - "{{ manifests_tmpdir.stdout }}/cni-daemonset.yaml"
diff --git a/roles/kuryr/tasks/node.yaml b/roles/kuryr/tasks/node.yaml
new file mode 100644
index 000000000..ffe814713
--- /dev/null
+++ b/roles/kuryr/tasks/node.yaml
@@ -0,0 +1,48 @@
+---
+- name: Create CNI bin directory
+ file:
+ state: directory
+ path: "{{ cni_bin_dir }}"
+ mode: 0755
+ owner: root
+ group: root
+ recurse: yes
+
+- name: Create CNI extraction tempdir
+ command: mktemp -d
+ register: cni_tmpdir
+
+- name: Download CNI
+ get_url:
+ url: "{{ cni_bin_url }}"
+ checksum: "sha1:{{ cni_bin_checksum }}"
+ mode: 0644
+ dest: "{{ cni_tmpdir.stdout }}"
+ register: downloaded_tarball
+
+- name: Extract CNI
+ become: yes
+ unarchive:
+ remote_src: True
+ src: "{{ downloaded_tarball.dest }}"
+ dest: "{{ cni_bin_dir }}"
+ when: downloaded_tarball.changed
+
+- name: Ensure CNI net.d exists
+ file:
+ path: /etc/cni/net.d
+ recurse: yes
+ state: directory
+
+- name: Configure OpenShift node with disabled service proxy
+ lineinfile:
+ dest: "/etc/sysconfig/{{ openshift.common.service_type }}-node"
+ regexp: '^OPTIONS="?(.*?)"?$'
+ backrefs: yes
+ backup: yes
+ line: 'OPTIONS="\1 --disable dns,proxy,plugins"'
+
+- name: force node restart to disable the proxy
+ service:
+ name: "{{ openshift.common.service_type }}-node"
+ state: restarted
diff --git a/roles/kuryr/tasks/serviceaccount.yaml b/roles/kuryr/tasks/serviceaccount.yaml
new file mode 100644
index 000000000..088f13091
--- /dev/null
+++ b/roles/kuryr/tasks/serviceaccount.yaml
@@ -0,0 +1,31 @@
+---
+- name: Create Controller service account
+ oc_serviceaccount:
+ name: kuryr-controller
+ namespace: "{{ kuryr_namespace }}"
+ register: saout
+
+- name: Create a role for the Kuryr
+ oc_clusterrole: "{{ kuryr_clusterrole }}"
+
+- name: Fetch the created Kuryr controller cluster role
+ oc_clusterrole:
+ name: kuryrctl
+ state: list
+ register: crout
+
+- name: Grant Kuryr the privileged security context constraints
+ oc_adm_policy_user:
+ user: "system:serviceaccount:{{ kuryr_namespace }}:{{ saout.results.results.0.metadata.name }}"
+ namespace: "{{ kuryr_namespace }}"
+ resource_kind: scc
+ resource_name: privileged
+ state: present
+
+- name: Assign role to Kuryr service account
+ oc_adm_policy_user:
+ user: "system:serviceaccount:{{ kuryr_namespace }}:{{ saout.results.results.0.metadata.name }}"
+ namespace: "{{ kuryr_namespace }}"
+ resource_kind: cluster-role
+ resource_name: "{{ crout.results.results.metadata.name }}"
+ state: present
diff --git a/roles/kuryr/templates/cni-daemonset.yaml.j2 b/roles/kuryr/templates/cni-daemonset.yaml.j2
new file mode 100644
index 000000000..39348ae90
--- /dev/null
+++ b/roles/kuryr/templates/cni-daemonset.yaml.j2
@@ -0,0 +1,53 @@
+# More info about the template: https://docs.openstack.org/kuryr-kubernetes/latest/installation/containerized.html#generating-kuryr-resource-definitions-for-kubernetes
+
+apiVersion: extensions/v1beta1
+kind: DaemonSet
+metadata:
+ name: kuryr-cni-ds
+ namespace: {{ kuryr_namespace }}
+ labels:
+ tier: node
+ app: kuryr
+spec:
+ template:
+ metadata:
+ labels:
+ tier: node
+ app: kuryr
+ spec:
+ hostNetwork: true
+ tolerations:
+ - key: node-role.kubernetes.io/master
+ operator: Exists
+ effect: NoSchedule
+ serviceAccountName: kuryr-controller
+ containers:
+ - name: kuryr-cni
+ image: kuryr/cni:latest
+ imagePullPolicy: IfNotPresent
+ command: [ "cni_ds_init" ]
+ securityContext:
+ privileged: true
+ volumeMounts:
+ - name: bin
+ mountPath: /opt/cni/bin
+ - name: net-conf
+ mountPath: /etc/cni/net.d
+ - name: config-volume
+ mountPath: /tmp/kuryr/kuryr.conf
+ subPath: kuryr-cni.conf
+ - name: etc
+ mountPath: /etc
+ volumes:
+ - name: bin
+ hostPath:
+ path: {{ cni_bin_dir }}
+ - name: net-conf
+ hostPath:
+ path: /etc/cni/net.d
+ - name: config-volume
+ configMap:
+ name: kuryr-config
+ - name: etc
+ hostPath:
+ path: /etc \ No newline at end of file
diff --git a/roles/kuryr/templates/configmap.yaml.j2 b/roles/kuryr/templates/configmap.yaml.j2
new file mode 100644
index 000000000..e874d6c25
--- /dev/null
+++ b/roles/kuryr/templates/configmap.yaml.j2
@@ -0,0 +1,343 @@
+# More info about the template: https://docs.openstack.org/kuryr-kubernetes/latest/installation/containerized.html#generating-kuryr-resource-definitions-for-kubernetes
+
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: kuryr-config
+ namespace: {{ kuryr_namespace }}
+data:
+ kuryr.conf: |+
+ [DEFAULT]
+
+ #
+ # From kuryr_kubernetes
+ #
+
+ # Directory for Kuryr vif binding executables. (string value)
+ #bindir = /usr/libexec/kuryr
+
+ # If set to true, the logging level will be set to DEBUG instead of the default
+ # INFO level. (boolean value)
+ # Note: This option can be changed without restarting.
+ #debug = false
+
+ # DEPRECATED: If set to false, the logging level will be set to WARNING instead
+ # of the default INFO level. (boolean value)
+ # This option is deprecated for removal.
+ # Its value may be silently ignored in the future.
+ #verbose = true
+
+ # The name of a logging configuration file. This file is appended to any
+ # existing logging configuration files. For details about logging configuration
+ # files, see the Python logging module documentation. Note that when logging
+ # configuration files are used then all logging configuration is set in the
+ # configuration file and other logging configuration options are ignored (for
+ # example, logging_context_format_string). (string value)
+ # Note: This option can be changed without restarting.
+ # Deprecated group/name - [DEFAULT]/log_config
+ #log_config_append = <None>
+
+ # Defines the format string for %%(asctime)s in log records. Default:
+ # %(default)s . This option is ignored if log_config_append is set. (string
+ # value)
+ #log_date_format = %Y-%m-%d %H:%M:%S
+
+ # (Optional) Name of log file to send logging output to. If no default is set,
+ # logging will go to stderr as defined by use_stderr. This option is ignored if
+ # log_config_append is set. (string value)
+ # Deprecated group/name - [DEFAULT]/logfile
+ #log_file = /var/log/kuryr/kuryr-controller.log
+
+ # (Optional) The base directory used for relative log_file paths. This option
+ # is ignored if log_config_append is set. (string value)
+ # Deprecated group/name - [DEFAULT]/logdir
+ #log_dir = <None>
+
+ # Uses logging handler designed to watch file system. When log file is moved or
+ # removed this handler will open a new log file with specified path
+ # instantaneously. It makes sense only if log_file option is specified and
+ # Linux platform is used. This option is ignored if log_config_append is set.
+ # (boolean value)
+ #watch_log_file = false
+
+ # Use syslog for logging. Existing syslog format is DEPRECATED and will be
+ # changed later to honor RFC5424. This option is ignored if log_config_append
+ # is set. (boolean value)
+ #use_syslog = false
+
+ # Syslog facility to receive log lines. This option is ignored if
+ # log_config_append is set. (string value)
+ #syslog_log_facility = LOG_USER
+
+ # Log output to standard error. This option is ignored if log_config_append is
+ # set. (boolean value)
+ #use_stderr = true
+
+ # Format string to use for log messages with context. (string value)
+ #logging_context_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s
+
+ # Format string to use for log messages when context is undefined. (string
+ # value)
+ #logging_default_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s
+
+ # Additional data to append to log message when logging level for the message
+ # is DEBUG. (string value)
+ #logging_debug_format_suffix = %(funcName)s %(pathname)s:%(lineno)d
+
+ # Prefix each line of exception output with this format. (string value)
+ #logging_exception_prefix = %(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s
+
+ # Defines the format string for %(user_identity)s that is used in
+ # logging_context_format_string. (string value)
+ #logging_user_identity_format = %(user)s %(tenant)s %(domain)s %(user_domain)s %(project_domain)s
+
+ # List of package logging levels in logger=LEVEL pairs. This option is ignored
+ # if log_config_append is set. (list value)
+ #default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN,taskflow=WARN,keystoneauth=WARN,oslo.cache=INFO,dogpile.core.dogpile=INFO
+
+ # Enables or disables publication of error events. (boolean value)
+ #publish_errors = false
+
+ # The format for an instance that is passed with the log message. (string
+ # value)
+ #instance_format = "[instance: %(uuid)s] "
+
+ # The format for an instance UUID that is passed with the log message. (string
+ # value)
+ #instance_uuid_format = "[instance: %(uuid)s] "
+
+ # Enables or disables fatal status of deprecations. (boolean value)
+ #fatal_deprecations = false
+
+
+ [binding]
+
+ driver = kuryr.lib.binding.drivers.vlan
+ link_iface = eth0
+
+ [kubernetes]
+
+ #
+ # From kuryr_kubernetes
+ #
+
+ # The root URL of the Kubernetes API (string value)
+ api_root = {{ openshift.master.api_url }}
+
+ # Absolute path to client cert to connect to HTTPS K8S_API (string value)
+ # ssl_client_crt_file = /etc/kuryr/controller.crt
+
+ # Absolute path client key file to connect to HTTPS K8S_API (string value)
+ # ssl_client_key_file = /etc/kuryr/controller.key
+
+ # Absolute path to ca cert file to connect to HTTPS K8S_API (string value)
+ ssl_ca_crt_file = /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
+
+ # The token to talk to the k8s API
+ token_file = /var/run/secrets/kubernetes.io/serviceaccount/token
+
+ # HTTPS K8S_API server identity verification (boolean value)
+ # TODO (apuimedo): Make configurable
+ ssl_verify_server_crt = True
+
+ # The driver to determine OpenStack project for pod ports (string value)
+ pod_project_driver = default
+
+ # The driver to determine OpenStack project for services (string value)
+ service_project_driver = default
+
+ # The driver to determine Neutron subnets for pod ports (string value)
+ pod_subnets_driver = default
+
+ # The driver to determine Neutron subnets for services (string value)
+ service_subnets_driver = default
+
+ # The driver to determine Neutron security groups for pods (string value)
+ pod_security_groups_driver = default
+
+ # The driver to determine Neutron security groups for services (string value)
+ service_security_groups_driver = default
+
+ # The driver that provides VIFs for Kubernetes Pods. (string value)
+ pod_vif_driver = nested-vlan
+
+
+ [neutron]
+ # Configuration options for OpenStack Neutron
+
+ #
+ # From kuryr_kubernetes
+ #
+
+ # Authentication URL (string value)
+ auth_url = {{ kuryr_openstack_auth_url }}
+
+ # Authentication type to load (string value)
+ # Deprecated group/name - [neutron]/auth_plugin
+ auth_type = password
+
+ # Domain ID to scope to (string value)
+ user_domain_name = {{ kuryr_openstack_user_domain_name }}
+
+ # User's password (string value)
+ password = {{ kuryr_openstack_password }}
+
+ # Domain name containing project (string value)
+ project_domain_name = {{ kuryr_openstack_project_domain_name }}
+
+ # Project ID to scope to (string value)
+ # Deprecated group/name - [neutron]/tenant-id
+ project_id = {{ kuryr_openstack_project_id }}
+
+ # Token (string value)
+ #token = <None>
+
+ # Trust ID (string value)
+ #trust_id = <None>
+
+ # User's domain id (string value)
+ #user_domain_id = <None>
+
+ # User id (string value)
+ #user_id = <None>
+
+ # Username (string value)
+ # Deprecated group/name - [neutron]/user-name
+ username = {{kuryr_openstack_username }}
+
+ # Whether a plugging operation is failed if the port to plug does not become
+ # active (boolean value)
+ #vif_plugging_is_fatal = false
+
+ # Seconds to wait for port to become active (integer value)
+ #vif_plugging_timeout = 0
+
+ [neutron_defaults]
+
+ pod_security_groups = {{ kuryr_openstack_pod_sg_id }}
+ pod_subnet = {{ kuryr_openstack_pod_subnet_id }}
+ service_subnet = {{ kuryr_openstack_service_subnet_id }}
+ project = {{ kuryr_openstack_pod_project_id }}
+ # TODO (apuimedo): Remove the duplicated line just after this one once the
+ # RDO packaging contains the upstream patch
+ worker_nodes_subnet = {{ kuryr_openstack_worker_nodes_subnet_id }}
+
+ [pod_vif_nested]
+ worker_nodes_subnet = {{ kuryr_openstack_worker_nodes_subnet_id }}
+ kuryr-cni.conf: |+
+ [DEFAULT]
+
+ #
+ # From kuryr_kubernetes
+ #
+ # If set to true, the logging level will be set to DEBUG instead of the default
+ # INFO level. (boolean value)
+ # Note: This option can be changed without restarting.
+ #debug = false
+
+ # The name of a logging configuration file. This file is appended to any
+ # existing logging configuration files. For details about logging configuration
+ # files, see the Python logging module documentation. Note that when logging
+ # configuration files are used then all logging configuration is set in the
+ # configuration file and other logging configuration options are ignored (for
+ # example, logging_context_format_string). (string value)
+ # Note: This option can be changed without restarting.
+ # Deprecated group/name - [DEFAULT]/log_config
+ #log_config_append = <None>
+
+ # Defines the format string for %%(asctime)s in log records. Default:
+ # %(default)s . This option is ignored if log_config_append is set. (string
+ # value)
+ #log_date_format = %Y-%m-%d %H:%M:%S
+
+ # (Optional) Name of log file to send logging output to. If no default is set,
+ # logging will go to stderr as defined by use_stderr. This option is ignored if
+ # log_config_append is set. (string value)
+ # Deprecated group/name - [DEFAULT]/logfile
+ #log_file = /var/log/kuryr/cni.log
+
+ # (Optional) The base directory used for relative log_file paths. This option
+ # is ignored if log_config_append is set. (string value)
+ # Deprecated group/name - [DEFAULT]/logdir
+ #log_dir = <None>
+
+ # Uses logging handler designed to watch file system. When log file is moved or
+ # removed this handler will open a new log file with specified path
+ # instantaneously. It makes sense only if log_file option is specified and
+ # Linux platform is used. This option is ignored if log_config_append is set.
+ # (boolean value)
+ #watch_log_file = false
+
+ # Use syslog for logging. Existing syslog format is DEPRECATED and will be
+ # changed later to honor RFC5424. This option is ignored if log_config_append
+ # is set. (boolean value)
+ #use_syslog = false
+
+ # Syslog facility to receive log lines. This option is ignored if
+ # log_config_append is set. (string value)
+ #syslog_log_facility = LOG_USER
+
+ # Log output to standard error. This option is ignored if log_config_append is
+ # set. (boolean value)
+ use_stderr = true
+
+ # Format string to use for log messages with context. (string value)
+ #logging_context_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s
+
+ # Format string to use for log messages when context is undefined. (string
+ # value)
+ #logging_default_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s
+
+ # Additional data to append to log message when logging level for the message
+ # is DEBUG. (string value)
+ #logging_debug_format_suffix = %(funcName)s %(pathname)s:%(lineno)d
+
+ # Prefix each line of exception output with this format. (string value)
+ #logging_exception_prefix = %(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s
+
+ # Defines the format string for %(user_identity)s that is used in
+ # logging_context_format_string. (string value)
+ #logging_user_identity_format = %(user)s %(tenant)s %(domain)s %(user_domain)s %(project_domain)s
+
+ # List of package logging levels in logger=LEVEL pairs. This option is ignored
+ # if log_config_append is set. (list value)
+ #default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN,taskflow=WARN,keystoneauth=WARN,oslo.cache=INFO,dogpile.core.dogpile=INFO
+
+ # Enables or disables publication of error events. (boolean value)
+ #publish_errors = false
+
+ # The format for an instance that is passed with the log message. (string
+ # value)
+ #instance_format = "[instance: %(uuid)s] "
+
+ # The format for an instance UUID that is passed with the log message. (string
+ # value)
+ #instance_uuid_format = "[instance: %(uuid)s] "
+
+ # Enables or disables fatal status of deprecations. (boolean value)
+ #fatal_deprecations = false
+
+
+ [binding]
+
+ driver = kuryr.lib.binding.drivers.vlan
+ link_iface = {{ kuryr_cni_link_interface }}
+
+ [kubernetes]
+
+ #
+ # From kuryr_kubernetes
+ #
+
+ # The root URL of the Kubernetes API (string value)
+ api_root = {{ openshift.master.api_url }}
+
+ # The token to talk to the k8s API
+ token_file = /etc/kuryr/token
+
+ # Absolute path to ca cert file to connect to HTTPS K8S_API (string value)
+ ssl_ca_crt_file = /etc/kuryr/ca.crt
+
+ # HTTPS K8S_API server identity verification (boolean value)
+ # TODO (apuimedo): Make configurable
+ ssl_verify_server_crt = True
diff --git a/roles/kuryr/templates/controller-deployment.yaml.j2 b/roles/kuryr/templates/controller-deployment.yaml.j2
new file mode 100644
index 000000000..d970270b5
--- /dev/null
+++ b/roles/kuryr/templates/controller-deployment.yaml.j2
@@ -0,0 +1,40 @@
+# More info about the template: https://docs.openstack.org/kuryr-kubernetes/latest/installation/containerized.html#generating-kuryr-resource-definitions-for-kubernetes
+
+apiVersion: apps/v1beta1
+kind: Deployment
+metadata:
+ labels:
+ name: kuryr-controller
+ name: kuryr-controller
+ namespace: {{ kuryr_namespace }}
+spec:
+ replicas: 1
+ template:
+ metadata:
+ labels:
+ name: kuryr-controller
+ name: kuryr-controller
+ spec:
+ serviceAccountName: kuryr-controller
+ automountServiceAccountToken: true
+ hostNetwork: true
+ containers:
+ - image: kuryr/controller:latest
+ imagePullPolicy: IfNotPresent
+ name: controller
+ terminationMessagePath: "/dev/termination-log"
+ # FIXME(dulek): This shouldn't be required, but without it selinux is
+ # complaining about access to kuryr.conf.
+ securityContext:
+ privileged: true
+ runAsUser: 0
+ volumeMounts:
+ - name: config-volume
+ mountPath: "/etc/kuryr/kuryr.conf"
+ subPath: kuryr.conf
+ volumes:
+ - name: config-volume
+ configMap:
+ name: kuryr-config
+ defaultMode: 0666
+ restartPolicy: Always
diff --git a/roles/openshift_atomic/README.md b/roles/openshift_atomic/README.md
new file mode 100644
index 000000000..8c10c9991
--- /dev/null
+++ b/roles/openshift_atomic/README.md
@@ -0,0 +1,28 @@
+OpenShift Atomic
+================
+
+This role houses atomic specific tasks.
+
+Requirements
+------------
+
+Role Variables
+--------------
+
+Dependencies
+------------
+
+Example Playbook
+----------------
+
+```
+- name: Ensure atomic proxies are defined
+ hosts: localhost
+ roles:
+ - role: openshift_atomic
+```
+
+License
+-------
+
+Apache License Version 2.0
diff --git a/roles/openshift_atomic/meta/main.yml b/roles/openshift_atomic/meta/main.yml
new file mode 100644
index 000000000..ea129f514
--- /dev/null
+++ b/roles/openshift_atomic/meta/main.yml
@@ -0,0 +1,13 @@
+---
+galaxy_info:
+ author: OpenShift
+ description: Atomic related tasks
+ company: Red Hat, Inc
+ license: ASL 2.0
+ min_ansible_version: 2.2
+ platforms:
+ - name: EL
+ versions:
+ - 7
+dependencies:
+- role: lib_openshift
diff --git a/roles/openshift_atomic/tasks/proxy.yml b/roles/openshift_atomic/tasks/proxy.yml
new file mode 100644
index 000000000..dde099984
--- /dev/null
+++ b/roles/openshift_atomic/tasks/proxy.yml
@@ -0,0 +1,32 @@
+---
+# Set http_proxy, https_proxy, and no_proxy in /etc/atomic.conf
+# regexp: the line starts with or without #, followed by the string
+# http_proxy, then either : or =
+- block:
+
+ - name: Add http_proxy to /etc/atomic.conf
+ lineinfile:
+ dest: /etc/atomic.conf
+ regexp: "^#?http_proxy[:=]{1}"
+ line: "http_proxy: {{ openshift.common.http_proxy | default('') }}"
+ when:
+ - openshift.common.http_proxy is defined
+ - openshift.common.http_proxy != ''
+
+ - name: Add https_proxy to /etc/atomic.conf
+ lineinfile:
+ dest: /etc/atomic.conf
+ regexp: "^#?https_proxy[:=]{1}"
+ line: "https_proxy: {{ openshift.common.https_proxy | default('') }}"
+ when:
+ - openshift.common.https_proxy is defined
+ - openshift.common.https_proxy != ''
+
+ - name: Add no_proxy to /etc/atomic.conf
+ lineinfile:
+ dest: /etc/atomic.conf
+ regexp: "^#?no_proxy[:=]{1}"
+ line: "no_proxy: {{ openshift.common.no_proxy | default('') }}"
+ when:
+ - openshift.common.no_proxy is defined
+ - openshift.common.no_proxy != ''
diff --git a/roles/openshift_cli/tasks/main.yml b/roles/openshift_cli/tasks/main.yml
index 9e61805f9..14d8a3325 100644
--- a/roles/openshift_cli/tasks/main.yml
+++ b/roles/openshift_cli/tasks/main.yml
@@ -1,6 +1,9 @@
---
- set_fact:
- l_use_crio: "{{ openshift_use_crio | default(false) }}"
+ l_use_crio_only: "{{ openshift_use_crio_only | default(false) }}"
+ l_is_system_container_image: "{{ openshift_use_master_system_container | default(openshift_use_system_containers | default(false)) | bool }}"
+- set_fact:
+ l_use_cli_atomic_image: "{{ l_use_crio_only or l_is_system_container_image }}"
- name: Install clients
package: name={{ openshift.common.service_type }}-clients state=present
@@ -20,23 +23,23 @@
backend: "docker"
when:
- openshift.common.is_containerized | bool
- - not l_use_crio
+ - not l_use_cli_atomic_image | bool
- block:
- name: Pull CLI Image
command: >
- atomic pull --storage ostree {{ openshift.common.system_images_registry }}/{{ openshift.common.cli_image }}:{{ openshift_image_tag }}
+ atomic pull --storage ostree {{ 'docker:' if openshift.common.system_images_registry == 'docker' else openshift.common.system_images_registry + '/' }}{{ openshift.common.cli_image }}:{{ openshift_image_tag }}
register: pull_result
changed_when: "'Pulling layer' in pull_result.stdout"
- name: Copy client binaries/symlinks out of CLI image for use on the host
openshift_container_binary_sync:
- image: "{{ openshift.common.system_images_registry }}/{{ openshift.common.cli_image }}"
+ image: "{{ '' if openshift.common.system_images_registry == 'docker' else openshift.common.system_images_registry + '/' }}{{ openshift.common.cli_image }}"
tag: "{{ openshift_image_tag }}"
backend: "atomic"
when:
- openshift.common.is_containerized | bool
- - l_use_crio
+ - l_use_cli_atomic_image | bool
- name: Reload facts to pick up installed OpenShift version
openshift_facts:
diff --git a/roles/openshift_logging/README.md b/roles/openshift_logging/README.md
index 69eb9283d..280d7d24c 100644
--- a/roles/openshift_logging/README.md
+++ b/roles/openshift_logging/README.md
@@ -44,23 +44,23 @@ When `openshift_logging_install_logging` is set to `False` the `openshift_loggin
- `openshift_logging_curator_run_timezone`: The timezone that Curator uses for figuring out its run time. Defaults to 'UTC'.
- `openshift_logging_curator_script_log_level`: The script log level for Curator. Defaults to 'INFO'.
- `openshift_logging_curator_log_level`: The log level for the Curator process. Defaults to 'ERROR'.
-- `openshift_logging_curator_cpu_limit`: The amount of CPU to allocate to Curator. Default is '100m'.
+- `openshift_logging_curator_cpu_request`: The minimum amount of CPU to allocate to Curator. Default is '100m'.
- `openshift_logging_curator_memory_limit`: The amount of memory to allocate to Curator. Unset if not specified.
- `openshift_logging_curator_nodeselector`: A map of labels (e.g. {"node":"infra","region":"west"} to select the nodes where the curator pod will land.
- `openshift_logging_image_pull_secret`: The name of an existing pull secret to link to the logging service accounts
- `openshift_logging_kibana_hostname`: The Kibana hostname. Defaults to 'kibana.example.com'.
-- `openshift_logging_kibana_cpu_limit`: The amount of CPU to allocate to Kibana or unset if not specified.
+- `openshift_logging_kibana_cpu_request`: The minimum amount of CPU to allocate to Kibana or unset if not specified.
- `openshift_logging_kibana_memory_limit`: The amount of memory to allocate to Kibana or unset if not specified.
- `openshift_logging_kibana_proxy_debug`: When "True", set the Kibana Proxy log level to DEBUG. Defaults to 'false'.
-- `openshift_logging_kibana_proxy_cpu_limit`: The amount of CPU to allocate to Kibana proxy or unset if not specified.
+- `openshift_logging_kibana_proxy_cpu_request`: The minimum amount of CPU to allocate to Kibana proxy or unset if not specified.
- `openshift_logging_kibana_proxy_memory_limit`: The amount of memory to allocate to Kibana proxy or unset if not specified.
- `openshift_logging_kibana_replica_count`: The number of replicas Kibana should be scaled up to. Defaults to 1.
- `openshift_logging_kibana_nodeselector`: A map of labels (e.g. {"node":"infra","region":"west"} to select the nodes where the pod will land.
- `openshift_logging_kibana_edge_term_policy`: Insecure Edge Termination Policy. Defaults to Redirect.
- `openshift_logging_fluentd_nodeselector`: The node selector that the Fluentd daemonset uses to determine where to deploy to. Defaults to '"logging-infra-fluentd": "true"'.
-- `openshift_logging_fluentd_cpu_limit`: The CPU limit for Fluentd pods. Defaults to '100m'.
+- `openshift_logging_fluentd_cpu_request`: The minimum amount of CPU to allocate for Fluentd collector pods. Defaults to '100m'.
- `openshift_logging_fluentd_memory_limit`: The memory limit for Fluentd pods. Defaults to '512Mi'.
- `openshift_logging_fluentd_use_journal`: *DEPRECATED - DO NOT USE* Fluentd will automatically detect whether or not Docker is using the journald log driver.
- `openshift_logging_fluentd_journal_read_from_head`: If empty, Fluentd will use its internal default, which is false.
@@ -80,7 +80,7 @@ When `openshift_logging_install_logging` is set to `False` the `openshift_loggin
- `openshift_logging_es_client_key`: The location of the client key Fluentd uses for openshift_logging_es_host. Defaults to '/etc/fluent/keys/key'.
- `openshift_logging_es_cluster_size`: The number of ES cluster members. Defaults to '1'.
-- `openshift_logging_es_cpu_limit`: The amount of CPU limit for the ES cluster. Unused if not set
+- `openshift_logging_es_cpu_request`: The minimum amount of CPU to allocate for an ES pod cluster member. Defaults to 1 CPU.
- `openshift_logging_es_memory_limit`: The amount of RAM that should be assigned to ES. Defaults to '8Gi'.
- `openshift_logging_es_log_appenders`: The list of rootLogger appenders for ES logs which can be: 'file', 'console'. Defaults to 'file'.
- `openshift_logging_es_pv_selector`: A key/value map added to a PVC in order to select specific PVs. Defaults to 'None'.
@@ -107,7 +107,7 @@ same as above for their non-ops counterparts, but apply to the OPS cluster insta
- `openshift_logging_es_ops_client_cert`: /etc/fluent/keys/cert
- `openshift_logging_es_ops_client_key`: /etc/fluent/keys/key
- `openshift_logging_es_ops_cluster_size`: 1
-- `openshift_logging_es_ops_cpu_limit`: The amount of CPU limit for the ES cluster. Unused if not set
+- `openshift_logging_es_ops_cpu_request`: The minimum amount of CPU to allocate for an ES ops pod cluster member. Defaults to 1 CPU.
- `openshift_logging_es_ops_memory_limit`: 8Gi
- `openshift_logging_es_ops_pvc_dynamic`: False
- `openshift_logging_es_ops_pvc_size`: ""
@@ -115,9 +115,9 @@ same as above for their non-ops counterparts, but apply to the OPS cluster insta
- `openshift_logging_es_ops_recover_after_time`: 5m
- `openshift_logging_es_ops_storage_group`: 65534
- `openshift_logging_kibana_ops_hostname`: The Operations Kibana hostname. Defaults to 'kibana-ops.example.com'.
-- `openshift_logging_kibana_ops_cpu_limit`: The amount of CPU to allocate to Kibana or unset if not specified.
+- `openshift_logging_kibana_ops_cpu_request`: The minimum amount of CPU to allocate to Kibana or unset if not specified.
- `openshift_logging_kibana_ops_memory_limit`: The amount of memory to allocate to Kibana or unset if not specified.
-- `openshift_logging_kibana_ops_proxy_cpu_limit`: The amount of CPU to allocate to Kibana proxy or unset if not specified.
+- `openshift_logging_kibana_ops_proxy_cpu_request`: The minimum amount of CPU to allocate to Kibana proxy or unset if not specified.
- `openshift_logging_kibana_ops_proxy_memory_limit`: The amount of memory to allocate to Kibana proxy or unset if not specified.
- `openshift_logging_kibana_ops_replica_count`: The number of replicas Kibana ops should be scaled up to. Defaults to 1.
@@ -176,7 +176,7 @@ Elasticsearch OPS too, if using an OPS cluster:
clients will use to connect to mux, and will be used in the TLS server cert
subject.
- `openshift_logging_mux_port`: 24284
-- `openshift_logging_mux_cpu_limit`: 100m
+- `openshift_logging_mux_cpu_request`: 100m
- `openshift_logging_mux_memory_limit`: 512Mi
- `openshift_logging_mux_default_namespaces`: Default `["mux-undefined"]` - the
first value in the list is the namespace to use for undefined projects,
diff --git a/roles/openshift_logging/defaults/main.yml b/roles/openshift_logging/defaults/main.yml
index 6e7e2557f..626732d16 100644
--- a/roles/openshift_logging/defaults/main.yml
+++ b/roles/openshift_logging/defaults/main.yml
@@ -18,20 +18,24 @@ openshift_logging_curator_run_minute: 0
openshift_logging_curator_run_timezone: UTC
openshift_logging_curator_script_log_level: INFO
openshift_logging_curator_log_level: ERROR
-openshift_logging_curator_cpu_limit: 100m
-openshift_logging_curator_memory_limit: null
+openshift_logging_curator_cpu_limit: null
+openshift_logging_curator_memory_limit: 256Mi
+openshift_logging_curator_cpu_request: 100m
openshift_logging_curator_nodeselector: {}
-openshift_logging_curator_ops_cpu_limit: 100m
-openshift_logging_curator_ops_memory_limit: null
+openshift_logging_curator_ops_cpu_limit: null
+openshift_logging_curator_ops_memory_limit: 256Mi
+openshift_logging_curator_ops_cpu_request: 100m
openshift_logging_curator_ops_nodeselector: {}
openshift_logging_kibana_hostname: "{{ 'kibana.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true)) }}"
openshift_logging_kibana_cpu_limit: null
openshift_logging_kibana_memory_limit: 736Mi
+openshift_logging_kibana_cpu_request: 100m
openshift_logging_kibana_proxy_debug: false
openshift_logging_kibana_proxy_cpu_limit: null
openshift_logging_kibana_proxy_memory_limit: 256Mi
+openshift_logging_kibana_proxy_cpu_request: 100m
openshift_logging_kibana_replica_count: 1
openshift_logging_kibana_edge_term_policy: Redirect
@@ -53,9 +57,11 @@ openshift_logging_kibana_ca: ""
openshift_logging_kibana_ops_hostname: "{{ 'kibana-ops.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true)) }}"
openshift_logging_kibana_ops_cpu_limit: null
openshift_logging_kibana_ops_memory_limit: 736Mi
+openshift_logging_kibana_ops_cpu_request: 100m
openshift_logging_kibana_ops_proxy_debug: false
openshift_logging_kibana_ops_proxy_cpu_limit: null
openshift_logging_kibana_ops_proxy_memory_limit: 256Mi
+openshift_logging_kibana_ops_proxy_cpu_request: 100m
openshift_logging_kibana_ops_replica_count: 1
#The absolute path on the control node to the cert file to use
@@ -71,13 +77,14 @@ openshift_logging_kibana_ops_key: ""
openshift_logging_kibana_ops_ca: ""
openshift_logging_fluentd_nodeselector: {'logging-infra-fluentd': 'true'}
-openshift_logging_fluentd_cpu_limit: 100m
+openshift_logging_fluentd_cpu_limit: null
openshift_logging_fluentd_memory_limit: 512Mi
+openshift_logging_fluentd_cpu_request: 100m
openshift_logging_fluentd_journal_source: ""
openshift_logging_fluentd_journal_read_from_head: ""
openshift_logging_fluentd_hosts: ['--all']
-openshift_logging_fluentd_buffer_queue_limit: 1024
-openshift_logging_fluentd_buffer_size_limit: 1m
+openshift_logging_fluentd_buffer_queue_limit: 32
+openshift_logging_fluentd_buffer_size_limit: 8m
openshift_logging_es_host: logging-es
openshift_logging_es_port: 9200
@@ -85,7 +92,8 @@ openshift_logging_es_ca: /etc/fluent/keys/ca
openshift_logging_es_client_cert: /etc/fluent/keys/cert
openshift_logging_es_client_key: /etc/fluent/keys/key
openshift_logging_es_cluster_size: 1
-openshift_logging_es_cpu_limit: 1000m
+openshift_logging_es_cpu_limit: null
+openshift_logging_es_cpu_request: "1"
# the logging appenders for the root loggers to write ES logs. Valid values: 'file', 'console'
openshift_logging_es_log_appenders: ['file']
openshift_logging_es_memory_limit: "8Gi"
@@ -98,8 +106,6 @@ openshift_logging_es_storage_group: "65534"
openshift_logging_es_nodeselector: {}
# openshift_logging_es_config is a hash to be merged into the defaults for the elasticsearch.yaml
openshift_logging_es_config: {}
-openshift_logging_es_number_of_shards: 1
-openshift_logging_es_number_of_replicas: 0
# for exposing es to external (outside of the cluster) clients
openshift_logging_es_allow_external: False
@@ -126,8 +132,9 @@ openshift_logging_es_ops_ca: /etc/fluent/keys/ca
openshift_logging_es_ops_client_cert: /etc/fluent/keys/cert
openshift_logging_es_ops_client_key: /etc/fluent/keys/key
openshift_logging_es_ops_cluster_size: "{{ openshift_logging_elasticsearch_ops_cluster_size | default(1) }}"
-openshift_logging_es_ops_cpu_limit: 1000m
-openshift_logging_es_ops_memory_limit: "8Gi"
+openshift_logging_es_ops_cpu_limit: null
+openshift_logging_es_ops_memory_limit: 8Gi
+openshift_logging_es_ops_cpu_request: "1"
openshift_logging_es_ops_pv_selector: "{{ openshift_loggingops_storage_labels | default('') }}"
openshift_logging_es_ops_pvc_dynamic: "{{ openshift_logging_elasticsearch_ops_pvc_dynamic | default(False) }}"
openshift_logging_es_ops_pvc_size: "{{ openshift_logging_elasticsearch_ops_pvc_size | default('') }}"
@@ -160,8 +167,9 @@ openshift_logging_mux_allow_external: False
openshift_logging_use_mux: "{{ openshift_logging_mux_allow_external | default(False) }}"
openshift_logging_mux_hostname: "{{ 'mux.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true)) }}"
openshift_logging_mux_port: 24284
-openshift_logging_mux_cpu_limit: 500m
-openshift_logging_mux_memory_limit: 1Gi
+openshift_logging_mux_cpu_limit: null
+openshift_logging_mux_memory_limit: 512Mi
+openshift_logging_mux_cpu_request: 100m
# the namespace to use for undefined projects should come first, followed by any
# additional namespaces to create by default - users will typically not need to set this
openshift_logging_mux_default_namespaces: ["mux-undefined"]
diff --git a/roles/openshift_logging/filter_plugins/openshift_logging.py b/roles/openshift_logging/filter_plugins/openshift_logging.py
index 330e7e59a..959573635 100644
--- a/roles/openshift_logging/filter_plugins/openshift_logging.py
+++ b/roles/openshift_logging/filter_plugins/openshift_logging.py
@@ -17,6 +17,22 @@ def es_storage(os_logging_facts, dc_name, pvc_claim, root='elasticsearch'):
return dict(kind='emptydir')
+def walk(source, path, default, delimiter='.'):
+ '''Walk the sourch hash given the path and return the value or default if not found'''
+ if not isinstance(source, dict):
+ raise RuntimeError('The source is not a walkable dict: {} path: {}'.format(source, path))
+ keys = path.split(delimiter)
+ max_depth = len(keys)
+ cur_depth = 0
+ while cur_depth < max_depth:
+ if keys[cur_depth] in source:
+ source = source[keys[cur_depth]]
+ cur_depth = cur_depth + 1
+ else:
+ return default
+ return source
+
+
def random_word(source_alpha, length):
''' Returns a random word given the source of characters to pick from and resulting length '''
return ''.join(random.choice(source_alpha) for i in range(length))
@@ -73,5 +89,6 @@ class FilterModule(object):
'map_from_pairs': map_from_pairs,
'es_storage': es_storage,
'serviceaccount_name': serviceaccount_name,
- 'serviceaccount_namespace': serviceaccount_namespace
+ 'serviceaccount_namespace': serviceaccount_namespace,
+ 'walk': walk
}
diff --git a/roles/openshift_logging/filter_plugins/test b/roles/openshift_logging/filter_plugins/test
new file mode 100644
index 000000000..3ad956cca
--- /dev/null
+++ b/roles/openshift_logging/filter_plugins/test
@@ -0,0 +1,34 @@
+import unittest
+from openshift_logging import walk
+
+class TestFilterMethods(unittest.TestCase):
+
+
+ def test_walk_find_key(self):
+ source = {'foo': {'bar.xyz': 'myvalue'}}
+ self.assertEquals(walk(source,'foo#bar.xyz', 123, delimiter='#'), 'myvalue')
+
+
+ def test_walk_return_default(self):
+ source = {'foo': {'bar.xyz': 'myvalue'}}
+ self.assertEquals(walk(source,'foo#bar.abc', 123, delimiter='#'), 123)
+
+
+ def test_walk_limit_max_depth(self):
+ source = {'foo': {'bar.xyz': 'myvalue'}}
+ self.assertEquals(walk(source,'foo#bar.abc#dontfindme', 123, delimiter='#'), 123)
+
+ def test_complex_hash(self):
+ source = {
+ 'elasticsearch': {
+ 'configmaps': {
+ 'logging-elasticsearch': {
+ 'elasticsearch.yml': "a string value"
+ }
+ }
+ }
+ }
+ self.assertEquals(walk(source,'elasticsearch#configmaps#logging-elasticsearch#elasticsearch.yml', 123, delimiter='#'), "a string value")
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/roles/openshift_logging/library/openshift_logging_facts.py b/roles/openshift_logging/library/openshift_logging_facts.py
index f10df8da5..98d0d1c4f 100644
--- a/roles/openshift_logging/library/openshift_logging_facts.py
+++ b/roles/openshift_logging/library/openshift_logging_facts.py
@@ -207,7 +207,7 @@ class OpenshiftLoggingFacts(OCBaseCommand):
def facts_for_configmaps(self, namespace):
''' Gathers facts for configmaps in logging namespace '''
self.default_keys_for("configmaps")
- a_list = self.oc_command("get", "configmaps", namespace=namespace, add_options=["-l", LOGGING_SELECTOR])
+ a_list = self.oc_command("get", "configmaps", namespace=namespace)
if len(a_list["items"]) == 0:
return
for item in a_list["items"]:
diff --git a/roles/openshift_logging/tasks/install_logging.yaml b/roles/openshift_logging/tasks/install_logging.yaml
index 21fd79c28..76627acf2 100644
--- a/roles/openshift_logging/tasks/install_logging.yaml
+++ b/roles/openshift_logging/tasks/install_logging.yaml
@@ -78,6 +78,7 @@
openshift_logging_elasticsearch_nodeselector: "{{ openshift_logging_es_nodeselector if item.0.nodeSelector | default(None) is none else item.0.nodeSelector }}"
openshift_logging_elasticsearch_storage_group: "{{ [openshift_logging_es_storage_group] if item.0.storageGroups | default([]) | length == 0 else item.0.storageGroups }}"
_es_containers: "{{item.0.containers}}"
+ _es_configmap: "{{ openshift_logging_facts | walk('elasticsearch#configmaps#logging-elasticsearch#elasticsearch.yml', '{}', delimiter='#') | from_yaml }}"
with_together:
- "{{ openshift_logging_facts.elasticsearch.deploymentconfigs.values() }}"
@@ -133,6 +134,7 @@
openshift_logging_elasticsearch_pvc_pv_selector: "{{ openshift_logging_es_ops_pv_selector }}"
openshift_logging_elasticsearch_memory_limit: "{{ openshift_logging_es_ops_memory_limit }}"
openshift_logging_elasticsearch_cpu_limit: "{{ openshift_logging_es_ops_cpu_limit }}"
+ openshift_logging_elasticsearch_cpu_request: "{{ openshift_logging_es_ops_cpu_request }}"
openshift_logging_elasticsearch_nodeselector: "{{ openshift_logging_es_ops_nodeselector if item.0.nodeSelector | default(None) is none else item.0.nodeSelector }}"
openshift_logging_elasticsearch_storage_group: "{{ [openshift_logging_es_ops_storage_group] if item.0.storageGroups | default([]) | length == 0 else item.0.storageGroups }}"
openshift_logging_es_key: "{{ openshift_logging_es_ops_key }}"
@@ -141,7 +143,10 @@
openshift_logging_es_hostname: "{{ openshift_logging_es_ops_hostname }}"
openshift_logging_es_edge_term_policy: "{{ openshift_logging_es_ops_edge_term_policy | default('') }}"
openshift_logging_es_allow_external: "{{ openshift_logging_es_ops_allow_external }}"
+ openshift_logging_es_number_of_shards: "{{ openshift_logging_es_ops_number_of_shards | default(None) }}"
+ openshift_logging_es_number_of_replicas: "{{ openshift_logging_es_ops_number_of_replicas | default(None) }}"
_es_containers: "{{item.0.containers}}"
+ _es_configmap: "{{ openshift_logging_facts | walk('elasticsearch_ops#configmaps#logging-elasticsearch-ops#elasticsearch.yml', '{}', delimiter='#') | from_yaml }}"
with_together:
- "{{ openshift_logging_facts.elasticsearch_ops.deploymentconfigs.values() }}"
@@ -167,6 +172,7 @@
openshift_logging_elasticsearch_pvc_pv_selector: "{{ openshift_logging_es_ops_pv_selector }}"
openshift_logging_elasticsearch_memory_limit: "{{ openshift_logging_es_ops_memory_limit }}"
openshift_logging_elasticsearch_cpu_limit: "{{ openshift_logging_es_ops_cpu_limit }}"
+ openshift_logging_elasticsearch_cpu_request: "{{ openshift_logging_es_ops_cpu_request }}"
openshift_logging_elasticsearch_nodeselector: "{{ openshift_logging_es_ops_nodeselector }}"
openshift_logging_es_key: "{{ openshift_logging_es_ops_key }}"
openshift_logging_es_cert: "{{ openshift_logging_es_ops_cert }}"
@@ -207,11 +213,13 @@
openshift_logging_kibana_es_port: "{{ openshift_logging_es_ops_port }}"
openshift_logging_kibana_nodeselector: "{{ openshift_logging_kibana_ops_nodeselector }}"
openshift_logging_kibana_cpu_limit: "{{ openshift_logging_kibana_ops_cpu_limit }}"
+ openshift_logging_kibana_cpu_request: "{{ openshift_logging_kibana_ops_cpu_request }}"
openshift_logging_kibana_memory_limit: "{{ openshift_logging_kibana_ops_memory_limit }}"
openshift_logging_kibana_hostname: "{{ openshift_logging_kibana_ops_hostname }}"
openshift_logging_kibana_replicas: "{{ openshift_logging_kibana_ops_replica_count }}"
openshift_logging_kibana_proxy_debug: "{{ openshift_logging_kibana_ops_proxy_debug }}"
openshift_logging_kibana_proxy_cpu_limit: "{{ openshift_logging_kibana_ops_proxy_cpu_limit }}"
+ openshift_logging_kibana_proxy_cpu_request: "{{ openshift_logging_kibana_ops_proxy_cpu_request }}"
openshift_logging_kibana_proxy_memory_limit: "{{ openshift_logging_kibana_ops_proxy_memory_limit }}"
openshift_logging_kibana_cert: "{{ openshift_logging_kibana_ops_cert }}"
openshift_logging_kibana_key: "{{ openshift_logging_kibana_ops_key }}"
@@ -243,6 +251,7 @@
openshift_logging_curator_master_url: "{{ openshift_logging_master_url }}"
openshift_logging_curator_image_pull_secret: "{{ openshift_logging_image_pull_secret }}"
openshift_logging_curator_cpu_limit: "{{ openshift_logging_curator_ops_cpu_limit }}"
+ openshift_logging_curator_cpu_request: "{{ openshift_logging_curator_ops_cpu_request }}"
openshift_logging_curator_memory_limit: "{{ openshift_logging_curator_ops_memory_limit }}"
openshift_logging_curator_nodeselector: "{{ openshift_logging_curator_ops_nodeselector }}"
when:
diff --git a/roles/openshift_logging_curator/defaults/main.yml b/roles/openshift_logging_curator/defaults/main.yml
index 17807b644..9cae9f936 100644
--- a/roles/openshift_logging_curator/defaults/main.yml
+++ b/roles/openshift_logging_curator/defaults/main.yml
@@ -9,8 +9,9 @@ openshift_logging_curator_namespace: logging
### Common settings
openshift_logging_curator_nodeselector: ""
-openshift_logging_curator_cpu_limit: 100m
-openshift_logging_curator_memory_limit: null
+openshift_logging_curator_cpu_limit: null
+openshift_logging_curator_cpu_request: 100m
+openshift_logging_curator_memory_limit: 256Mi
openshift_logging_curator_es_host: "logging-es"
openshift_logging_curator_es_port: 9200
diff --git a/roles/openshift_logging_curator/tasks/main.yaml b/roles/openshift_logging_curator/tasks/main.yaml
index 6e8fab2b5..b4ddf45d9 100644
--- a/roles/openshift_logging_curator/tasks/main.yaml
+++ b/roles/openshift_logging_curator/tasks/main.yaml
@@ -90,6 +90,7 @@
es_host: "{{ openshift_logging_curator_es_host }}"
es_port: "{{ openshift_logging_curator_es_port }}"
curator_cpu_limit: "{{ openshift_logging_curator_cpu_limit }}"
+ curator_cpu_request: "{{ openshift_logging_curator_cpu_request }}"
curator_memory_limit: "{{ openshift_logging_curator_memory_limit }}"
curator_replicas: "{{ openshift_logging_curator_replicas | default (1) }}"
curator_node_selector: "{{openshift_logging_curator_nodeselector | default({})}}"
diff --git a/roles/openshift_logging_curator/templates/curator.j2 b/roles/openshift_logging_curator/templates/curator.j2
index e74918a40..e71393643 100644
--- a/roles/openshift_logging_curator/templates/curator.j2
+++ b/roles/openshift_logging_curator/templates/curator.j2
@@ -39,13 +39,26 @@ spec:
name: "curator"
image: {{image}}
imagePullPolicy: Always
+{% if (curator_memory_limit is defined and curator_memory_limit is not none and curator_memory_limit != "") or (curator_cpu_limit is defined and curator_cpu_limit is not none and curator_cpu_limit != "") or (curator_cpu_request is defined and curator_cpu_request is not none and curator_cpu_request != "") %}
resources:
+{% if (curator_memory_limit is defined and curator_memory_limit is not none and curator_memory_limit != "") or (curator_cpu_limit is defined and curator_cpu_limit is not none and curator_cpu_limit != "") %}
limits:
+{% if curator_cpu_limit is defined and curator_cpu_limit is not none and curator_cpu_limit != "" %}
cpu: "{{curator_cpu_limit}}"
-{% if curator_memory_limit is defined and curator_memory_limit is not none and curator_memory_limit != "" %}
+{% endif %}
+{% if curator_memory_limit is defined and curator_memory_limit is not none and curator_memory_limit != "" %}
memory: "{{curator_memory_limit}}"
+{% endif %}
+{% endif %}
+{% if (curator_memory_limit is defined and curator_memory_limit is not none and curator_memory_limit != "") or (curator_cpu_request is defined and curator_cpu_request is not none and curator_cpu_request != "") %}
requests:
+{% if curator_cpu_request is defined and curator_cpu_request is not none and curator_cpu_request != "" %}
+ cpu: "{{curator_cpu_request}}"
+{% endif %}
+{% if curator_memory_limit is defined and curator_memory_limit is not none and curator_memory_limit != "" %}
memory: "{{curator_memory_limit}}"
+{% endif %}
+{% endif %}
{% endif %}
env:
-
diff --git a/roles/openshift_logging_elasticsearch/defaults/main.yml b/roles/openshift_logging_elasticsearch/defaults/main.yml
index fc48b7f71..9fc6fd1d8 100644
--- a/roles/openshift_logging_elasticsearch/defaults/main.yml
+++ b/roles/openshift_logging_elasticsearch/defaults/main.yml
@@ -6,7 +6,8 @@ openshift_logging_elasticsearch_image_pull_secret: "{{ openshift_hosted_logging_
openshift_logging_elasticsearch_namespace: logging
openshift_logging_elasticsearch_nodeselector: "{{ openshift_logging_es_nodeselector | default('') }}"
-openshift_logging_elasticsearch_cpu_limit: "{{ openshift_logging_es_cpu_limit | default('1000m') }}"
+openshift_logging_elasticsearch_cpu_limit: "{{ openshift_logging_es_cpu_limit | default('') }}"
+openshift_logging_elasticsearch_cpu_request: "{{ openshift_logging_es_cpu_request | default('1000m') }}"
openshift_logging_elasticsearch_memory_limit: "{{ openshift_logging_es_memory_limit | default('1Gi') }}"
openshift_logging_elasticsearch_recover_after_time: "{{ openshift_logging_es_recover_after_time | default('5m') }}"
@@ -40,7 +41,7 @@ openshift_logging_es_pvc_prefix: "{{ openshift_hosted_logging_elasticsearch_pvc_
# config the es plugin to write kibana index based on the index mode
openshift_logging_elasticsearch_kibana_index_mode: 'unique'
-openshift_logging_elasticsearch_proxy_cpu_limit: "100m"
+openshift_logging_elasticsearch_proxy_cpu_request: "100m"
openshift_logging_elasticsearch_proxy_memory_limit: "64Mi"
openshift_logging_elasticsearch_prometheus_sa: "system:serviceaccount:{{openshift_prometheus_namespace | default('prometheus')}}:prometheus"
diff --git a/roles/openshift_logging_elasticsearch/tasks/main.yaml b/roles/openshift_logging_elasticsearch/tasks/main.yaml
index aeff2d198..7aabdc861 100644
--- a/roles/openshift_logging_elasticsearch/tasks/main.yaml
+++ b/roles/openshift_logging_elasticsearch/tasks/main.yaml
@@ -164,13 +164,17 @@
when: es_logging_contents is undefined
changed_when: no
+- set_fact:
+ __es_num_of_shards: "{{ _es_configmap | default({}) | walk('index.number_of_shards', '1') }}"
+ __es_num_of_replicas: "{{ _es_configmap | default({}) | walk('index.number_of_replicas', '0') }}"
+
- template:
src: elasticsearch.yml.j2
dest: "{{ tempdir }}/elasticsearch.yml"
vars:
allow_cluster_reader: "{{ openshift_logging_elasticsearch_ops_allow_cluster_reader | lower | default('false') }}"
- es_number_of_shards: "{{ openshift_logging_es_number_of_shards | default(1) }}"
- es_number_of_replicas: "{{ openshift_logging_es_number_of_replicas | default(0) }}"
+ es_number_of_shards: "{{ openshift_logging_es_number_of_shards | default(None) or __es_num_of_shards }}"
+ es_number_of_replicas: "{{ openshift_logging_es_number_of_replicas | default(None) or __es_num_of_replicas }}"
es_kibana_index_mode: "{{ openshift_logging_elasticsearch_kibana_index_mode | default('unique') }}"
when: es_config_contents is undefined
@@ -349,7 +353,8 @@
deploy_name: "{{ es_deploy_name }}"
image: "{{ openshift_logging_elasticsearch_image_prefix }}logging-elasticsearch:{{ openshift_logging_elasticsearch_image_version }}"
proxy_image: "{{ openshift_logging_elasticsearch_proxy_image_prefix }}oauth-proxy:{{ openshift_logging_elasticsearch_proxy_image_version }}"
- es_cpu_limit: "{{ openshift_logging_elasticsearch_cpu_limit }}"
+ es_cpu_limit: "{{ openshift_logging_elasticsearch_cpu_limit | default('') }}"
+ es_cpu_request: "{{ openshift_logging_elasticsearch_cpu_request }}"
es_memory_limit: "{{ openshift_logging_elasticsearch_memory_limit }}"
es_node_selector: "{{ openshift_logging_elasticsearch_nodeselector | default({}) }}"
es_storage_groups: "{{ openshift_logging_elasticsearch_storage_group | default([]) }}"
diff --git a/roles/openshift_logging_elasticsearch/templates/es.j2 b/roles/openshift_logging_elasticsearch/templates/es.j2
index ce3b2eb83..7966d219e 100644
--- a/roles/openshift_logging_elasticsearch/templates/es.j2
+++ b/roles/openshift_logging_elasticsearch/templates/es.j2
@@ -69,9 +69,9 @@ spec:
readOnly: true
resources:
limits:
- cpu: "{{openshift_logging_elasticsearch_proxy_cpu_limit }}"
memory: "{{openshift_logging_elasticsearch_proxy_memory_limit }}"
requests:
+ cpu: "{{openshift_logging_elasticsearch_proxy_cpu_request }}"
memory: "{{openshift_logging_elasticsearch_proxy_memory_limit }}"
-
name: "elasticsearch"
@@ -79,11 +79,12 @@ spec:
imagePullPolicy: Always
resources:
limits:
- memory: "{{es_memory_limit}}"
-{% if es_cpu_limit is defined and es_cpu_limit is not none %}
+{% if es_cpu_limit is defined and es_cpu_limit is not none and es_cpu_limit != '' %}
cpu: "{{es_cpu_limit}}"
{% endif %}
+ memory: "{{es_memory_limit}}"
requests:
+ cpu: "{{es_cpu_request}}"
memory: "{{es_memory_limit}}"
{% if es_container_security_context %}
securityContext: {{ es_container_security_context | to_yaml }}
diff --git a/roles/openshift_logging_eventrouter/README.md b/roles/openshift_logging_eventrouter/README.md
index da313d68b..611bdaee0 100644
--- a/roles/openshift_logging_eventrouter/README.md
+++ b/roles/openshift_logging_eventrouter/README.md
@@ -3,9 +3,9 @@ Event router
A pod forwarding kubernetes events to EFK aggregated logging stack.
-- **eventrouter** is deployed to logging project, has a service account and its own role to read events
+- **eventrouter** is deployed to default project, has a service account and its own role to read events
- **eventrouter** watches kubernetes events, marshalls them to JSON and outputs to its sink, currently only various formatting to STDOUT
-- **fluentd** picks them up and inserts to elasticsearch *.operations* index
+- **fluentd** ingests as logs from **eventrouter** container (as it would any other container), and writes them to the appropriate index for the **eventrouter**'s namespace (in the 'default' namespace, the *.operations* index is used)
- `openshift_logging_install_eventrouter`: When 'True', eventrouter will be installed. When 'False', eventrouter will be uninstalled.
@@ -15,6 +15,6 @@ Configuration variables:
- `openshift_logging_eventrouter_image_version`: The image version for the logging eventrouter. Defaults to 'latest'.
- `openshift_logging_eventrouter_sink`: Select a sink for eventrouter, supported 'stdout' and 'glog'. Defaults to 'stdout'.
- `openshift_logging_eventrouter_nodeselector`: A map of labels (e.g. {"node":"infra","region":"west"} to select the nodes where the pod will land.
-- `openshift_logging_eventrouter_cpu_limit`: The amount of CPU to allocate to eventrouter. Defaults to '100m'.
+- `openshift_logging_eventrouter_cpu_request`: The minimum amount of CPU to allocate to eventrouter. Defaults to '100m'.
- `openshift_logging_eventrouter_memory_limit`: The memory limit for eventrouter pods. Defaults to '128Mi'.
- `openshift_logging_eventrouter_namespace`: The namespace where eventrouter is deployed. Defaults to 'default'.
diff --git a/roles/openshift_logging_eventrouter/defaults/main.yaml b/roles/openshift_logging_eventrouter/defaults/main.yaml
index 34e33f75f..4c0350c98 100644
--- a/roles/openshift_logging_eventrouter/defaults/main.yaml
+++ b/roles/openshift_logging_eventrouter/defaults/main.yaml
@@ -4,6 +4,7 @@ openshift_logging_eventrouter_image_version: "{{ openshift_logging_image_version
openshift_logging_eventrouter_replicas: 1
openshift_logging_eventrouter_sink: stdout
openshift_logging_eventrouter_nodeselector: ""
-openshift_logging_eventrouter_cpu_limit: 100m
+openshift_logging_eventrouter_cpu_limit: null
+openshift_logging_eventrouter_cpu_request: 100m
openshift_logging_eventrouter_memory_limit: 128Mi
openshift_logging_eventrouter_namespace: default
diff --git a/roles/openshift_logging_eventrouter/tasks/install_eventrouter.yaml b/roles/openshift_logging_eventrouter/tasks/install_eventrouter.yaml
index 8df7435e2..cbbc6a8ec 100644
--- a/roles/openshift_logging_eventrouter/tasks/install_eventrouter.yaml
+++ b/roles/openshift_logging_eventrouter/tasks/install_eventrouter.yaml
@@ -45,7 +45,7 @@
params:
IMAGE: "{{openshift_logging_eventrouter_image_prefix}}logging-eventrouter:{{openshift_logging_eventrouter_image_version}}"
REPLICAS: "{{ openshift_logging_eventrouter_replicas }}"
- CPU: "{{ openshift_logging_eventrouter_cpu_limit }}"
+ CPU: "{{ openshift_logging_eventrouter_cpu_request }}"
MEMORY: "{{ openshift_logging_eventrouter_memory_limit }}"
NAMESPACE: "{{ openshift_logging_eventrouter_namespace }}"
SINK: "{{ openshift_logging_eventrouter_sink }}"
diff --git a/roles/openshift_logging_eventrouter/templates/eventrouter-template.j2 b/roles/openshift_logging_eventrouter/templates/eventrouter-template.j2
index ea1fd3efd..7fdf959d3 100644
--- a/roles/openshift_logging_eventrouter/templates/eventrouter-template.j2
+++ b/roles/openshift_logging_eventrouter/templates/eventrouter-template.j2
@@ -25,7 +25,7 @@ objects:
metadata:
name: logging-eventrouter
data:
- config.json: |-
+ config.json: |-
{
"sink": "${SINK}"
}
@@ -65,9 +65,9 @@ objects:
imagePullPolicy: Always
resources:
limits:
- memory: ${MEMORY}
- cpu: ${CPU}
+ memory: ${MEMORY}
requires:
+ cpu: ${CPU}
memory: ${MEMORY}
volumeMounts:
- name: config-volume
diff --git a/roles/openshift_logging_fluentd/defaults/main.yml b/roles/openshift_logging_fluentd/defaults/main.yml
index 25f7580a4..861935c99 100644
--- a/roles/openshift_logging_fluentd/defaults/main.yml
+++ b/roles/openshift_logging_fluentd/defaults/main.yml
@@ -8,7 +8,8 @@ openshift_logging_fluentd_namespace: logging
### Common settings
openshift_logging_fluentd_nodeselector: "{{ openshift_hosted_logging_fluentd_nodeselector_label | default('logging-infra-fluentd=true') | map_from_pairs }}"
-openshift_logging_fluentd_cpu_limit: 100m
+openshift_logging_fluentd_cpu_limit: null
+openshift_logging_fluentd_cpu_request: 100m
openshift_logging_fluentd_memory_limit: 512Mi
openshift_logging_fluentd_hosts: ['--all']
@@ -55,7 +56,7 @@ openshift_logging_fluentd_aggregating_passphrase: none
#fluentd_throttle_contents:
#fluentd_secureforward_contents:
-openshift_logging_fluentd_file_buffer_limit: 1Gi
+openshift_logging_fluentd_file_buffer_limit: 256Mi
# Configure fluentd to tail audit log file and filter out container engine's logs from there
# These logs are then stored in ES operation index
diff --git a/roles/openshift_logging_fluentd/tasks/main.yaml b/roles/openshift_logging_fluentd/tasks/main.yaml
index 06bb35dbc..f56810610 100644
--- a/roles/openshift_logging_fluentd/tasks/main.yaml
+++ b/roles/openshift_logging_fluentd/tasks/main.yaml
@@ -172,6 +172,9 @@
ops_port: "{{ openshift_logging_fluentd_ops_port }}"
fluentd_nodeselector_key: "{{ openshift_logging_fluentd_nodeselector.keys()[0] }}"
fluentd_nodeselector_value: "{{ openshift_logging_fluentd_nodeselector.values()[0] }}"
+ fluentd_cpu_limit: "{{ openshift_logging_fluentd_cpu_limit }}"
+ fluentd_cpu_request: "{{ openshift_logging_fluentd_cpu_request }}"
+ fluentd_memory_limit: "{{ openshift_logging_fluentd_memory_limit }}"
audit_container_engine: "{{ openshift_logging_fluentd_audit_container_engine | default(False) | bool }}"
audit_log_file: "{{ openshift_logging_fluentd_audit_file | default() }}"
audit_pos_log_file: "{{ openshift_logging_fluentd_audit_pos_file | default() }}"
diff --git a/roles/openshift_logging_fluentd/templates/fluentd.j2 b/roles/openshift_logging_fluentd/templates/fluentd.j2
index 644b70031..b07175a50 100644
--- a/roles/openshift_logging_fluentd/templates/fluentd.j2
+++ b/roles/openshift_logging_fluentd/templates/fluentd.j2
@@ -32,12 +32,27 @@ spec:
imagePullPolicy: Always
securityContext:
privileged: true
+{% if (fluentd_memory_limit is defined and fluentd_memory_limit is not none) or (fluentd_cpu_limit is defined and fluentd_cpu_limit is not none) or (fluentd_cpu_request is defined and fluentd_cpu_request is not none) %}
resources:
+{% if (fluentd_memory_limit is defined and fluentd_memory_limit is not none) or (fluentd_cpu_limit is defined and fluentd_cpu_limit is not none) %}
limits:
- cpu: {{ openshift_logging_fluentd_cpu_limit }}
- memory: {{ openshift_logging_fluentd_memory_limit }}
+{% if fluentd_cpu_limit is not none %}
+ cpu: "{{fluentd_cpu_limit}}"
+{% endif %}
+{% if fluentd_memory_limit is not none %}
+ memory: "{{fluentd_memory_limit}}"
+{% endif %}
+{% endif %}
+{% if (fluentd_memory_limit is defined and fluentd_memory_limit is not none) or (fluentd_cpu_request is defined and fluentd_cpu_request is not none) %}
requests:
- memory: {{ openshift_logging_fluentd_memory_limit }}
+{% if fluentd_cpu_request is not none %}
+ cpu: "{{fluentd_cpu_request}}"
+{% endif %}
+{% if fluentd_memory_limit is not none %}
+ memory: "{{fluentd_memory_limit}}"
+{% endif %}
+{% endif %}
+{% endif %}
volumeMounts:
- name: runlogjournal
mountPath: /run/log/journal
@@ -115,7 +130,7 @@ spec:
containerName: "{{ daemonset_container_name }}"
resource: limits.memory
- name: "FILE_BUFFER_LIMIT"
- value: "{{ openshift_logging_fluentd_file_buffer_limit | default('1Gi') }}"
+ value: "{{ openshift_logging_fluentd_file_buffer_limit | default('256i') }}"
{% if openshift_logging_mux_client_mode is defined and
((openshift_logging_mux_allow_external is defined and openshift_logging_mux_allow_external | bool) or
(openshift_logging_use_mux is defined and openshift_logging_use_mux | bool)) %}
diff --git a/roles/openshift_logging_kibana/defaults/main.yml b/roles/openshift_logging_kibana/defaults/main.yml
index ee265bb14..1366e96cd 100644
--- a/roles/openshift_logging_kibana/defaults/main.yml
+++ b/roles/openshift_logging_kibana/defaults/main.yml
@@ -9,6 +9,7 @@ openshift_logging_kibana_namespace: logging
openshift_logging_kibana_nodeselector: ""
openshift_logging_kibana_cpu_limit: null
+openshift_logging_kibana_cpu_request: 100m
openshift_logging_kibana_memory_limit: 736Mi
openshift_logging_kibana_hostname: "{{ openshift_hosted_logging_hostname | default('kibana.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true))) }}"
@@ -28,6 +29,7 @@ openshift_logging_kibana_proxy_image_prefix: "{{ openshift_logging_image_prefix
openshift_logging_kibana_proxy_image_version: "{{ openshift_logging_image_version | default('latest') }}"
openshift_logging_kibana_proxy_debug: false
openshift_logging_kibana_proxy_cpu_limit: null
+openshift_logging_kibana_proxy_cpu_request: 100m
openshift_logging_kibana_proxy_memory_limit: 256Mi
#The absolute path on the control node to the cert file to use
diff --git a/roles/openshift_logging_kibana/tasks/main.yaml b/roles/openshift_logging_kibana/tasks/main.yaml
index e17e8c1f2..809f7a631 100644
--- a/roles/openshift_logging_kibana/tasks/main.yaml
+++ b/roles/openshift_logging_kibana/tasks/main.yaml
@@ -230,8 +230,10 @@
es_host: "{{ openshift_logging_kibana_es_host }}"
es_port: "{{ openshift_logging_kibana_es_port }}"
kibana_cpu_limit: "{{ openshift_logging_kibana_cpu_limit }}"
+ kibana_cpu_request: "{{ openshift_logging_kibana_cpu_request }}"
kibana_memory_limit: "{{ openshift_logging_kibana_memory_limit }}"
kibana_proxy_cpu_limit: "{{ openshift_logging_kibana_proxy_cpu_limit }}"
+ kibana_proxy_cpu_request: "{{ openshift_logging_kibana_proxy_cpu_request }}"
kibana_proxy_memory_limit: "{{ openshift_logging_kibana_proxy_memory_limit }}"
kibana_replicas: "{{ openshift_logging_kibana_replicas | default (1) }}"
kibana_node_selector: "{{ openshift_logging_kibana_nodeselector | default({}) }}"
diff --git a/roles/openshift_logging_kibana/templates/kibana.j2 b/roles/openshift_logging_kibana/templates/kibana.j2
index da1386d3e..329ccbde2 100644
--- a/roles/openshift_logging_kibana/templates/kibana.j2
+++ b/roles/openshift_logging_kibana/templates/kibana.j2
@@ -38,17 +38,26 @@ spec:
name: "kibana"
image: {{ image }}
imagePullPolicy: Always
-{% if (kibana_memory_limit is defined and kibana_memory_limit is not none and kibana_memory_limit != "") or (kibana_cpu_limit is defined and kibana_cpu_limit is not none and kibana_cpu_limit != "") %}
+{% if (kibana_memory_limit is defined and kibana_memory_limit is not none and kibana_memory_limit != "") or (kibana_cpu_limit is defined and kibana_cpu_limit is not none and kibana_cpu_limit != "") or (kibana_cpu_request is defined and kibana_cpu_request is not none and kibana_cpu_request != "") %}
resources:
+{% if (kibana_memory_limit is defined and kibana_memory_limit is not none and kibana_memory_limit != "") or (kibana_cpu_limit is defined and kibana_cpu_limit is not none and kibana_cpu_limit != "") %}
limits:
-{% if kibana_cpu_limit is not none and kibana_cpu_limit != "" %}
+{% if kibana_cpu_limit is not none and kibana_cpu_limit != "" %}
cpu: "{{ kibana_cpu_limit }}"
-{% endif %}
-{% if kibana_memory_limit is not none and kibana_memory_limit != "" %}
+{% endif %}
+{% if kibana_memory_limit is not none and kibana_memory_limit != "" %}
memory: "{{ kibana_memory_limit }}"
+{% endif %}
+{% endif %}
+{% if (kibana_memory_limit is defined and kibana_memory_limit is not none and kibana_memory_limit != "") or (kibana_cpu_request is defined and kibana_cpu_request is not none and kibana_cpu_request != "") %}
requests:
+{% if kibana_cpu_request is not none and kibana_cpu_request != "" %}
+ cpu: "{{ kibana_cpu_request }}"
+{% endif %}
+{% if kibana_memory_limit is not none and kibana_memory_limit != "" %}
memory: "{{ kibana_memory_limit }}"
-{% endif %}
+{% endif %}
+{% endif %}
{% endif %}
env:
- name: "ES_HOST"
@@ -76,17 +85,26 @@ spec:
name: "kibana-proxy"
image: {{ proxy_image }}
imagePullPolicy: Always
-{% if (kibana_proxy_memory_limit is defined and kibana_proxy_memory_limit is not none and kibana_proxy_memory_limit != "") or (kibana_proxy_cpu_limit is defined and kibana_proxy_cpu_limit is not none and kibana_proxy_cpu_limit != "") %}
+{% if (kibana_proxy_memory_limit is defined and kibana_proxy_memory_limit is not none and kibana_proxy_memory_limit != "") or (kibana_proxy_cpu_limit is defined and kibana_proxy_cpu_limit is not none and kibana_proxy_cpu_limit != "") or (kibana_proxy_cpu_request is defined and kibana_proxy_cpu_request is not none and kibana_proxy_cpu_request != "") %}
resources:
+{% if (kibana_proxy_memory_limit is defined and kibana_proxy_memory_limit is not none and kibana_proxy_memory_limit != "") or (kibana_proxy_cpu_limit is defined and kibana_proxy_cpu_limit is not none and kibana_proxy_cpu_limit != "") %}
limits:
-{% if kibana_proxy_cpu_limit is not none and kibana_proxy_cpu_limit != "" %}
+{% if kibana_proxy_cpu_limit is not none and kibana_proxy_cpu_limit != "" %}
cpu: "{{ kibana_proxy_cpu_limit }}"
-{% endif %}
-{% if kibana_proxy_memory_limit is not none and kibana_proxy_memory_limit != "" %}
+{% endif %}
+{% if kibana_proxy_memory_limit is not none and kibana_proxy_memory_limit != "" %}
memory: "{{ kibana_proxy_memory_limit }}"
+{% endif %}
+{% endif %}
+{% if (kibana_proxy_memory_limit is defined and kibana_proxy_memory_limit is not none and kibana_proxy_memory_limit != "") or (kibana_proxy_cpu_request is defined and kibana_proxy_cpu_request is not none and kibana_proxy_cpu_request != "") %}
requests:
+{% if kibana_proxy_cpu_request is not none and kibana_proxy_cpu_request != "" %}
+ cpu: "{{ kibana_proxy_cpu_request }}"
+{% endif %}
+{% if kibana_proxy_memory_limit is not none and kibana_proxy_memory_limit != "" %}
memory: "{{ kibana_proxy_memory_limit }}"
-{% endif %}
+{% endif %}
+{% endif %}
{% endif %}
ports:
-
diff --git a/roles/openshift_logging_mux/defaults/main.yml b/roles/openshift_logging_mux/defaults/main.yml
index 68412aec8..9de686576 100644
--- a/roles/openshift_logging_mux/defaults/main.yml
+++ b/roles/openshift_logging_mux/defaults/main.yml
@@ -9,10 +9,11 @@ openshift_logging_mux_namespace: logging
### Common settings
openshift_logging_mux_nodeselector: "{{ openshift_hosted_logging_mux_nodeselector_label | default('') | map_from_pairs }}"
-openshift_logging_mux_cpu_limit: 500m
-openshift_logging_mux_memory_limit: 2Gi
-openshift_logging_mux_buffer_queue_limit: 1024
-openshift_logging_mux_buffer_size_limit: 1m
+openshift_logging_mux_cpu_limit: null
+openshift_logging_mux_cpu_request: 100m
+openshift_logging_mux_memory_limit: 512Mi
+openshift_logging_mux_buffer_queue_limit: 32
+openshift_logging_mux_buffer_size_limit: 8m
openshift_logging_mux_replicas: 1
@@ -57,11 +58,11 @@ openshift_logging_mux_file_buffer_storage_type: "emptydir"
openshift_logging_mux_file_buffer_pvc_name: "logging-mux-pvc"
# required if the PVC does not already exist
-openshift_logging_mux_file_buffer_pvc_size: 4Gi
+openshift_logging_mux_file_buffer_pvc_size: 1Gi
openshift_logging_mux_file_buffer_pvc_dynamic: false
openshift_logging_mux_file_buffer_pvc_pv_selector: {}
openshift_logging_mux_file_buffer_pvc_access_modes: ['ReadWriteOnce']
openshift_logging_mux_file_buffer_storage_group: '65534'
openshift_logging_mux_file_buffer_pvc_prefix: "logging-mux"
-openshift_logging_mux_file_buffer_limit: 2Gi
+openshift_logging_mux_file_buffer_limit: 256Mi
diff --git a/roles/openshift_logging_mux/tasks/main.yaml b/roles/openshift_logging_mux/tasks/main.yaml
index 2ec863afa..1b46a7ac3 100644
--- a/roles/openshift_logging_mux/tasks/main.yaml
+++ b/roles/openshift_logging_mux/tasks/main.yaml
@@ -171,6 +171,7 @@
ops_host: "{{ openshift_logging_mux_ops_host }}"
ops_port: "{{ openshift_logging_mux_ops_port }}"
mux_cpu_limit: "{{ openshift_logging_mux_cpu_limit }}"
+ mux_cpu_request: "{{ openshift_logging_mux_cpu_request }}"
mux_memory_limit: "{{ openshift_logging_mux_memory_limit }}"
mux_replicas: "{{ openshift_logging_mux_replicas | default(1) }}"
mux_node_selector: "{{ openshift_logging_mux_nodeselector | default({}) }}"
diff --git a/roles/openshift_logging_mux/templates/mux.j2 b/roles/openshift_logging_mux/templates/mux.j2
index 4cc48139f..7e88e3964 100644
--- a/roles/openshift_logging_mux/templates/mux.j2
+++ b/roles/openshift_logging_mux/templates/mux.j2
@@ -37,17 +37,26 @@ spec:
- name: "mux"
image: {{image}}
imagePullPolicy: Always
-{% if (mux_memory_limit is defined and mux_memory_limit is not none) or (mux_cpu_limit is defined and mux_cpu_limit is not none) %}
+{% if (mux_memory_limit is defined and mux_memory_limit is not none) or (mux_cpu_limit is defined and mux_cpu_limit is not none) or (mux_cpu_request is defined and mux_cpu_request is not none) %}
resources:
+{% if (mux_memory_limit is defined and mux_memory_limit is not none) or (mux_cpu_limit is defined and mux_cpu_limit is not none) %}
limits:
-{% if mux_cpu_limit is not none %}
+{% if mux_cpu_limit is not none %}
cpu: "{{mux_cpu_limit}}"
-{% endif %}
-{% if mux_memory_limit is not none %}
+{% endif %}
+{% if mux_memory_limit is not none %}
memory: "{{mux_memory_limit}}"
+{% endif %}
+{% endif %}
+{% if (mux_memory_limit is defined and mux_memory_limit is not none) or (mux_cpu_request is defined and mux_cpu_request is not none) %}
requests:
+{% if mux_cpu_request is not none %}
+ cpu: "{{mux_cpu_request}}"
+{% endif %}
+{% if mux_memory_limit is not none %}
memory: "{{mux_memory_limit}}"
-{% endif %}
+{% endif %}
+{% endif %}
{% endif %}
ports:
- containerPort: "{{ openshift_logging_mux_port }}"
diff --git a/roles/openshift_master/defaults/main.yml b/roles/openshift_master/defaults/main.yml
index b6875ebd4..3da861d03 100644
--- a/roles/openshift_master/defaults/main.yml
+++ b/roles/openshift_master/defaults/main.yml
@@ -46,6 +46,9 @@ r_openshift_master_use_nuage: "{{ r_openshift_master_use_nuage_default }}"
r_openshift_master_use_contiv_default: "{{ openshift_use_contiv | default(False) }}"
r_openshift_master_use_contiv: "{{ r_openshift_master_use_contiv_default }}"
+r_openshift_master_use_kuryr_default: "{{ openshift_use_kuryr | default(False) }}"
+r_openshift_master_use_kuryr: "{{ r_openshift_master_use_kuryr_default }}"
+
r_openshift_master_data_dir_default: "{{ openshift_data_dir | default('/var/lib/origin') }}"
r_openshift_master_data_dir: "{{ r_openshift_master_data_dir_default }}"
diff --git a/roles/openshift_master/tasks/system_container.yml b/roles/openshift_master/tasks/system_container.yml
index 91332acfb..843352532 100644
--- a/roles/openshift_master/tasks/system_container.yml
+++ b/roles/openshift_master/tasks/system_container.yml
@@ -1,4 +1,9 @@
---
+- name: Ensure proxies are in the atomic.conf
+ include_role:
+ name: openshift_atomic
+ tasks_from: proxy
+
- name: Pre-pull master system container image
command: >
atomic pull --storage=ostree {{ 'docker:' if openshift.common.system_images_registry == 'docker' else openshift.common.system_images_registry + '/' }}{{ openshift.master.master_system_image }}:{{ openshift_image_tag }}
diff --git a/roles/openshift_master/tasks/upgrade_facts.yml b/roles/openshift_master/tasks/upgrade_facts.yml
index f6ad438aa..2252c003a 100644
--- a/roles/openshift_master/tasks/upgrade_facts.yml
+++ b/roles/openshift_master/tasks/upgrade_facts.yml
@@ -21,6 +21,10 @@
oreg_host: "{{ oreg_url.split('/')[0] if (oreg_url is defined and '.' in oreg_url.split('/')[0]) else '' }}"
when: oreg_host is not defined
+- set_fact:
+ oreg_auth_credentials_replace: False
+ when: oreg_auth_credentials_replace is not defined
+
- name: Set openshift_master_debug_level
set_fact:
openshift_master_debug_level: "{{ debug_level | default(2) }}"
diff --git a/roles/openshift_master/templates/master.yaml.v1.j2 b/roles/openshift_master/templates/master.yaml.v1.j2
index 7159ccc7f..40775571f 100644
--- a/roles/openshift_master/templates/master.yaml.v1.j2
+++ b/roles/openshift_master/templates/master.yaml.v1.j2
@@ -179,7 +179,7 @@ masterPublicURL: {{ openshift.master.public_api_url }}
networkConfig:
clusterNetworkCIDR: {{ openshift.master.sdn_cluster_network_cidr }}
hostSubnetLength: {{ openshift.master.sdn_host_subnet_length }}
-{% if r_openshift_master_use_openshift_sdn or r_openshift_master_use_nuage or r_openshift_master_use_contiv or r_openshift_master_sdn_network_plugin_name == 'cni' %}
+{% if r_openshift_master_use_openshift_sdn or r_openshift_master_use_nuage or r_openshift_master_use_contiv or r_openshift_master_use_kuryr or r_openshift_master_sdn_network_plugin_name == 'cni' %}
networkPluginName: {{ r_openshift_master_sdn_network_plugin_name_default }}
{% endif %}
# serviceNetworkCIDR must match kubernetesMasterConfig.servicesSubnet
diff --git a/roles/openshift_node/defaults/main.yml b/roles/openshift_node/defaults/main.yml
index b310a8f64..b9f16dfd4 100644
--- a/roles/openshift_node/defaults/main.yml
+++ b/roles/openshift_node/defaults/main.yml
@@ -104,6 +104,9 @@ openshift_node_use_nuage: "{{ openshift_node_use_nuage_default }}"
openshift_node_use_contiv_default: "{{ openshift_use_contiv | default(False) }}"
openshift_node_use_contiv: "{{ openshift_node_use_contiv_default }}"
+openshift_node_use_kuryr_default: "{{ openshift_use_kuryr | default(False) }}"
+openshift_node_use_kuryr: "{{ openshift_node_use_kuryr_default }}"
+
openshift_node_data_dir_default: "{{ openshift_data_dir | default('/var/lib/origin') }}"
openshift_node_data_dir: "{{ openshift_node_data_dir_default }}"
diff --git a/roles/openshift_node/tasks/node_system_container.yml b/roles/openshift_node/tasks/node_system_container.yml
index 20d7a9539..164a79b39 100644
--- a/roles/openshift_node/tasks/node_system_container.yml
+++ b/roles/openshift_node/tasks/node_system_container.yml
@@ -1,4 +1,9 @@
---
+- name: Ensure proxies are in the atomic.conf
+ include_role:
+ name: openshift_atomic
+ tasks_from: proxy
+
- name: Pre-pull node system container image
command: >
atomic pull --storage=ostree {{ 'docker:' if openshift.common.system_images_registry == 'docker' else openshift.common.system_images_registry + '/' }}{{ openshift.node.node_system_image }}:{{ openshift_image_tag }}
diff --git a/roles/openshift_node/tasks/openvswitch_system_container.yml b/roles/openshift_node/tasks/openvswitch_system_container.yml
index e09063aa5..0f73ce454 100644
--- a/roles/openshift_node/tasks/openvswitch_system_container.yml
+++ b/roles/openshift_node/tasks/openvswitch_system_container.yml
@@ -10,6 +10,11 @@
l_service_name: "{{ openshift.docker.service_name }}"
when: not l_use_crio
+- name: Ensure proxies are in the atomic.conf
+ include_role:
+ name: openshift_atomic
+ tasks_from: proxy
+
- name: Pre-pull OpenVSwitch system container image
command: >
atomic pull --storage=ostree {{ 'docker:' if openshift.common.system_images_registry == 'docker' else openshift.common.system_images_registry + '/' }}{{ openshift.node.ovs_system_image }}:{{ openshift_image_tag }}
diff --git a/roles/openshift_node/templates/node.yaml.v1.j2 b/roles/openshift_node/templates/node.yaml.v1.j2
index 08e1c7f4f..718d35dca 100644
--- a/roles/openshift_node/templates/node.yaml.v1.j2
+++ b/roles/openshift_node/templates/node.yaml.v1.j2
@@ -44,7 +44,7 @@ networkPluginName: {{ openshift_node_sdn_network_plugin_name }}
# deprecates networkPluginName above. The two should match.
networkConfig:
mtu: {{ openshift.node.sdn_mtu }}
-{% if openshift_node_use_openshift_sdn | bool or openshift_node_use_nuage | bool or openshift_node_use_contiv | bool or openshift_node_sdn_network_plugin_name == 'cni' %}
+{% if openshift_node_use_openshift_sdn | bool or openshift_node_use_nuage | bool or openshift_node_use_contiv | bool or openshift_node_use_kuryr | bool or openshift_node_sdn_network_plugin_name == 'cni' %}
networkPluginName: {{ openshift_node_sdn_network_plugin_name }}
{% endif %}
{% if openshift.node.set_node_ip | bool %}
@@ -67,9 +67,11 @@ servingInfo:
{% endfor %}
{% endif %}
volumeDirectory: {{ openshift_node_data_dir }}/openshift.local.volumes
+{% if not (openshift_node_use_kuryr | default(False)) | bool %}
proxyArguments:
proxy-mode:
- {{ openshift.node.proxy_mode }}
+{% endif %}
volumeConfig:
localQuota:
perFSGroup: {{ openshift.node.local_quota_per_fsgroup }}
diff --git a/roles/openshift_node_certificates/handlers/main.yml b/roles/openshift_node_certificates/handlers/main.yml
index 4abe8bcaf..ef66bf9ca 100644
--- a/roles/openshift_node_certificates/handlers/main.yml
+++ b/roles/openshift_node_certificates/handlers/main.yml
@@ -2,9 +2,21 @@
- name: update ca trust
command: update-ca-trust
notify:
- - restart docker after updating ca trust
+ - check for container runtime after updating ca trust
-- name: restart docker after updating ca trust
+- name: check for container runtime after updating ca trust
+ command: >
+ systemctl -q is-active {{ openshift.docker.service_name }}.service
+ register: l_docker_installed
+ # An rc of 0 indicates that the container runtime service is
+ # running. We will restart it by notifying the restart handler since
+ # we have updated the system CA trust.
+ changed_when: l_docker_installed.rc == 0
+ failed_when: false
+ notify:
+ - restart container runtime after updating ca trust
+
+- name: restart container runtime after updating ca trust
systemd:
name: "{{ openshift.docker.service_name }}"
state: restarted
diff --git a/roles/openshift_node_dnsmasq/README.md b/roles/openshift_node_dnsmasq/README.md
new file mode 100644
index 000000000..4596190d7
--- /dev/null
+++ b/roles/openshift_node_dnsmasq/README.md
@@ -0,0 +1,27 @@
+OpenShift Node DNS resolver
+===========================
+
+Configure dnsmasq to act as a DNS resolver for an OpenShift node.
+
+Requirements
+------------
+
+Role Variables
+--------------
+
+From this role:
+
+| Name | Default value | Description |
+|-----------------------------------------------------|---------------|-----------------------------------------------------------------------------------|
+| openshift_node_dnsmasq_install_network_manager_hook | true | Install NetworkManager hook updating /etc/resolv.conf with local dnsmasq instance |
+
+Dependencies
+------------
+
+* openshift_common
+* openshift_node_facts
+
+License
+-------
+
+Apache License Version 2.0
diff --git a/roles/openshift_node_dnsmasq/defaults/main.yml b/roles/openshift_node_dnsmasq/defaults/main.yml
index ed97d539c..eae832fcf 100644
--- a/roles/openshift_node_dnsmasq/defaults/main.yml
+++ b/roles/openshift_node_dnsmasq/defaults/main.yml
@@ -1 +1,2 @@
---
+openshift_node_dnsmasq_install_network_manager_hook: true
diff --git a/roles/openshift_node_dnsmasq/tasks/network-manager.yml b/roles/openshift_node_dnsmasq/tasks/network-manager.yml
index dddcfc9da..e5a92a630 100644
--- a/roles/openshift_node_dnsmasq/tasks/network-manager.yml
+++ b/roles/openshift_node_dnsmasq/tasks/network-manager.yml
@@ -5,5 +5,6 @@
dest: /etc/NetworkManager/dispatcher.d/
mode: 0755
notify: restart NetworkManager
+ when: openshift_node_dnsmasq_install_network_manager_hook | default(true) | bool
- meta: flush_handlers
diff --git a/roles/openshift_sanitize_inventory/tasks/main.yml b/roles/openshift_sanitize_inventory/tasks/main.yml
index e327ee9f5..74c1a51a8 100644
--- a/roles/openshift_sanitize_inventory/tasks/main.yml
+++ b/roles/openshift_sanitize_inventory/tasks/main.yml
@@ -54,3 +54,16 @@
- include: unsupported.yml
when:
- not openshift_enable_unsupported_configurations | default(false) | bool
+
+- name: Ensure clusterid is set along with the cloudprovider
+ fail:
+ msg: >
+ Ensure that the openshift_clusterid is set and that all infrastructure has the required tags.
+
+ For dynamic provisioning when using multiple clusters in different zones, tag each node with Key=kubernetes.io/cluster/xxxx,Value=clusterid where xxxx and clusterid are unique per cluster. In versions prior to 3.6, this was Key=KubernetesCluster,Value=clusterid.
+
+ https://github.com/openshift/openshift-docs/blob/master/install_config/persistent_storage/dynamically_provisioning_pvs.adoc#available-dynamically-provisioned-plug-ins
+ when:
+ - openshift_clusterid is not defined
+ - openshift_cloudprovider_kind is defined
+ - openshift_cloudprovider_kind == 'aws'
diff --git a/roles/openshift_sanitize_inventory/tasks/unsupported.yml b/roles/openshift_sanitize_inventory/tasks/unsupported.yml
index 39bf1780a..b70ab90a1 100644
--- a/roles/openshift_sanitize_inventory/tasks/unsupported.yml
+++ b/roles/openshift_sanitize_inventory/tasks/unsupported.yml
@@ -11,6 +11,14 @@
will not function. This also means that NetworkManager must be installed
enabled and responsible for management of the primary interface.
+- name: Ensure that openshift_node_dnsmasq_install_network_manager_hook is true
+ when:
+ - not openshift_node_dnsmasq_install_network_manager_hook | default(true) | bool
+ fail:
+ msg: |-
+ The NetworkManager hook is considered a critical part of the DNS
+ infrastructure.
+
- set_fact:
__using_dynamic: True
when: