summaryrefslogtreecommitdiffstats
path: root/roles
diff options
context:
space:
mode:
authorJeff Cantrill <jcantril@redhat.com>2017-01-24 17:39:07 -0500
committerJeff Cantrill <jcantril@redhat.com>2017-01-24 17:39:59 -0500
commitd86bf1c52ca9e92679f6ad3ab4cc1315f067ff26 (patch)
tree46a8f0454290a4e155d8a5cad544eed642bedb31 /roles
parent60808d88521ada44948632825cadb7e9752c6d97 (diff)
downloadopenshift-d86bf1c52ca9e92679f6ad3ab4cc1315f067ff26.tar.gz
openshift-d86bf1c52ca9e92679f6ad3ab4cc1315f067ff26.tar.bz2
openshift-d86bf1c52ca9e92679f6ad3ab4cc1315f067ff26.tar.xz
openshift-d86bf1c52ca9e92679f6ad3ab4cc1315f067ff26.zip
allow openshift_logging role to specify nodeSelectors
Diffstat (limited to 'roles')
-rw-r--r--roles/openshift_logging/README.md3
-rw-r--r--roles/openshift_logging/tasks/install_curator.yaml2
-rw-r--r--roles/openshift_logging/tasks/install_elasticsearch.yaml2
-rw-r--r--roles/openshift_logging/tasks/install_kibana.yaml2
-rw-r--r--roles/openshift_logging/templates/curator.j26
-rw-r--r--roles/openshift_logging/templates/es.j26
-rw-r--r--roles/openshift_logging/templates/kibana.j26
7 files changed, 27 insertions, 0 deletions
diff --git a/roles/openshift_logging/README.md b/roles/openshift_logging/README.md
index 9b71dc676..856cfa2b9 100644
--- a/roles/openshift_logging/README.md
+++ b/roles/openshift_logging/README.md
@@ -35,6 +35,7 @@ When both `openshift_logging_install_logging` and `openshift_logging_upgrade_log
- `openshift_logging_curator_log_level`: The log level for the Curator process. Defaults to 'ERROR'.
- `openshift_logging_curator_cpu_limit`: The amount of CPU to allocate to Curator. Default is '100m'.
- `openshift_logging_curator_memory_limit`: The amount of memory to allocate to Curator. Unset if not specified.
+- `openshift_logging_curator_nodeselector`: A map of labels (e.g. {"node":"infra","region":"west"} to select the nodes where the curator pod will land.
- `openshift_logging_kibana_hostname`: The Kibana hostname. Defaults to 'kibana.example.com'.
- `openshift_logging_kibana_cpu_limit`: The amount of CPU to allocate to Kibana or unset if not specified.
@@ -43,6 +44,7 @@ When both `openshift_logging_install_logging` and `openshift_logging_upgrade_log
- `openshift_logging_kibana_proxy_cpu_limit`: The amount of CPU to allocate to Kibana proxy or unset if not specified.
- `openshift_logging_kibana_proxy_memory_limit`: The amount of memory to allocate to Kibana proxy or unset if not specified.
- `openshift_logging_kibana_replica_count`: The number of replicas Kibana should be scaled up to. Defaults to 1.
+- `openshift_logging_kibana_nodeselector`: A map of labels (e.g. {"node":"infra","region":"west"} to select the nodes where the pod will land.
- `openshift_logging_fluentd_nodeselector`: The node selector that the Fluentd daemonset uses to determine where to deploy to. Defaults to '"logging-infra-fluentd": "true"'.
- `openshift_logging_fluentd_cpu_limit`: The CPU limit for Fluentd pods. Defaults to '100m'.
@@ -67,6 +69,7 @@ When both `openshift_logging_install_logging` and `openshift_logging_upgrade_log
- `openshift_logging_es_pvc_prefix`: The prefix for the generated PVCs. Defaults to 'logging-es'.
- `openshift_logging_es_recover_after_time`: The amount of time ES will wait before it tries to recover. Defaults to '5m'.
- `openshift_logging_es_storage_group`: The storage group used for ES. Defaults to '65534'.
+- `openshift_logging_es_nodeselector`: A map of labels (e.g. {"node":"infra","region":"west"} to select the nodes where the pod will land.
When `openshift_logging_use_ops` is `True`, there are some additional vars. These work the
same as above for their non-ops counterparts, but apply to the OPS cluster instance:
diff --git a/roles/openshift_logging/tasks/install_curator.yaml b/roles/openshift_logging/tasks/install_curator.yaml
index 8f2825552..fcfce4e1e 100644
--- a/roles/openshift_logging/tasks/install_curator.yaml
+++ b/roles/openshift_logging/tasks/install_curator.yaml
@@ -31,6 +31,7 @@
curator_cpu_limit: "{{openshift_logging_curator_cpu_limit }}"
curator_memory_limit: "{{openshift_logging_curator_memory_limit }}"
replicas: "{{curator_replica_count.stdout | default (0)}}"
+ curator_node_selector: "{{openshift_logging_curator_nodeselector | default({}) }}"
check_mode: no
changed_when: no
@@ -46,6 +47,7 @@
curator_cpu_limit: "{{openshift_logging_curator_ops_cpu_limit }}"
curator_memory_limit: "{{openshift_logging_curator_ops_memory_limit }}"
replicas: "{{curator_ops_replica_count.stdout | default (0)}}"
+ curator_node_selector: "{{openshift_logging_curator_ops_nodeselector | default({}) }}"
when: openshift_logging_use_ops
check_mode: no
changed_when: no
diff --git a/roles/openshift_logging/tasks/install_elasticsearch.yaml b/roles/openshift_logging/tasks/install_elasticsearch.yaml
index fbba46a35..9b1c004f2 100644
--- a/roles/openshift_logging/tasks/install_elasticsearch.yaml
+++ b/roles/openshift_logging/tasks/install_elasticsearch.yaml
@@ -33,6 +33,7 @@
volume_names: "{{es_pvc_pool | default([])}}"
pvc_claim: "{{(volume_names | length > item.0) | ternary(volume_names[item.0], None)}}"
deploy_name: "{{item.1}}"
+ es_node_selector: "{{openshift_logging_es_nodeselector | default({})}}"
with_indexed_items:
- "{{es_dc_pool | default([])}}"
check_mode: no
@@ -98,6 +99,7 @@
es_recover_after_nodes: "{{es_ops_recover_after_nodes}}"
es_recover_expected_nodes: "{{es_ops_recover_expected_nodes}}"
openshift_logging_es_recover_after_time: "{{openshift_logging_es_ops_recover_after_time}}"
+ es_node_selector: "{{openshift_logging_es_ops_nodeselector | default({})}}"
with_indexed_items:
- "{{es_dc_pool_ops | default([])}}"
when:
diff --git a/roles/openshift_logging/tasks/install_kibana.yaml b/roles/openshift_logging/tasks/install_kibana.yaml
index de4b018dd..f4df7de0c 100644
--- a/roles/openshift_logging/tasks/install_kibana.yaml
+++ b/roles/openshift_logging/tasks/install_kibana.yaml
@@ -35,6 +35,7 @@
kibana_proxy_cpu_limit: "{{openshift_logging_kibana_proxy_cpu_limit }}"
kibana_proxy_memory_limit: "{{openshift_logging_kibana_proxy_memory_limit }}"
replicas: "{{kibana_replica_count.stdout | default (0)}}"
+ kibana_node_selector: "{{openshift_logging_kibana_nodeselector | default({}) }}"
check_mode: no
changed_when: no
@@ -53,6 +54,7 @@
kibana_proxy_cpu_limit: "{{openshift_logging_kibana_ops_proxy_cpu_limit }}"
kibana_proxy_memory_limit: "{{openshift_logging_kibana_ops_proxy_memory_limit }}"
replicas: "{{kibana_ops_replica_count.stdout | default (0)}}"
+ kibana_node_selector: "{{openshift_logging_kibana_ops_nodeselector | default({}) }}"
when: openshift_logging_use_ops
check_mode: no
changed_when: no
diff --git a/roles/openshift_logging/templates/curator.j2 b/roles/openshift_logging/templates/curator.j2
index d3b5d33a2..de6258eaa 100644
--- a/roles/openshift_logging/templates/curator.j2
+++ b/roles/openshift_logging/templates/curator.j2
@@ -28,6 +28,12 @@ spec:
spec:
terminationGracePeriod: 600
serviceAccountName: aggregated-logging-curator
+{% if curator_node_selector is iterable and curator_node_selector | length > 0 %}
+ nodeSelector:
+{% for key, value in curator_node_selector.iteritems() %}
+ {{key}}: {{value}}
+{% endfor %}
+{% endif %}
containers:
-
name: "curator"
diff --git a/roles/openshift_logging/templates/es.j2 b/roles/openshift_logging/templates/es.j2
index 291589690..ec84c6b76 100644
--- a/roles/openshift_logging/templates/es.j2
+++ b/roles/openshift_logging/templates/es.j2
@@ -30,6 +30,12 @@ spec:
securityContext:
supplementalGroups:
- {{openshift_logging_es_storage_group}}
+{% if es_node_selector is iterable and es_node_selector | length > 0 %}
+ nodeSelector:
+{% for key, value in es_node_selector.iteritems() %}
+ {{key}}: {{value}}
+{% endfor %}
+{% endif %}
containers:
-
name: "elasticsearch"
diff --git a/roles/openshift_logging/templates/kibana.j2 b/roles/openshift_logging/templates/kibana.j2
index 1ec97701a..b42f62850 100644
--- a/roles/openshift_logging/templates/kibana.j2
+++ b/roles/openshift_logging/templates/kibana.j2
@@ -27,6 +27,12 @@ spec:
component: "{{component}}"
spec:
serviceAccountName: aggregated-logging-kibana
+{% if kibana_node_selector is iterable and kibana_node_selector | length > 0 %}
+ nodeSelector:
+{% for key, value in kibana_node_selector.iteritems() %}
+ {{key}}: {{value}}
+{% endfor %}
+{% endif %}
containers:
-
name: "kibana"