summaryrefslogtreecommitdiffstats
path: root/roles
diff options
context:
space:
mode:
Diffstat (limited to 'roles')
-rw-r--r--roles/openshift_logging/README.md86
-rw-r--r--roles/openshift_logging/defaults/main.yml83
-rw-r--r--roles/openshift_logging/files/curator.yml18
-rw-r--r--roles/openshift_logging/files/elasticsearch-logging.yml72
-rw-r--r--roles/openshift_logging/files/elasticsearch.yml74
-rw-r--r--roles/openshift_logging/files/es_migration.sh81
-rw-r--r--roles/openshift_logging/files/fluent.conf34
-rw-r--r--roles/openshift_logging/files/fluentd-throttle-config.yaml7
-rw-r--r--roles/openshift_logging/files/generate-jks.sh71
-rw-r--r--roles/openshift_logging/files/logging-deployer-sa.yaml6
-rw-r--r--roles/openshift_logging/files/secure-forward.conf24
-rw-r--r--roles/openshift_logging/files/server-tls.json5
-rw-r--r--roles/openshift_logging/files/signing.conf103
-rw-r--r--roles/openshift_logging/files/util.sh192
-rw-r--r--roles/openshift_logging/filter_plugins/__init__.py0
-rw-r--r--roles/openshift_logging/filter_plugins/openshift_logging.py29
-rw-r--r--roles/openshift_logging/library/__init.py__0
-rw-r--r--roles/openshift_logging/library/openshift_logging_facts.py303
-rw-r--r--roles/openshift_logging/meta/main.yaml3
-rw-r--r--roles/openshift_logging/tasks/delete_logging.yaml93
-rw-r--r--roles/openshift_logging/tasks/generate_certs.yaml168
-rw-r--r--roles/openshift_logging/tasks/generate_clusterrolebindings.yaml12
-rw-r--r--roles/openshift_logging/tasks/generate_clusterroles.yaml10
-rw-r--r--roles/openshift_logging/tasks/generate_configmaps.yaml103
-rw-r--r--roles/openshift_logging/tasks/generate_deploymentconfigs.yaml59
-rw-r--r--roles/openshift_logging/tasks/generate_jks_chain.yaml60
-rw-r--r--roles/openshift_logging/tasks/generate_pems.yaml36
-rw-r--r--roles/openshift_logging/tasks/generate_pkcs12.yaml24
-rw-r--r--roles/openshift_logging/tasks/generate_pvcs.yaml47
-rw-r--r--roles/openshift_logging/tasks/generate_rolebindings.yaml11
-rw-r--r--roles/openshift_logging/tasks/generate_routes.yaml20
-rw-r--r--roles/openshift_logging/tasks/generate_secrets.yaml73
-rw-r--r--roles/openshift_logging/tasks/generate_serviceaccounts.yaml13
-rw-r--r--roles/openshift_logging/tasks/generate_services.yaml81
-rw-r--r--roles/openshift_logging/tasks/install_curator.yaml27
-rw-r--r--roles/openshift_logging/tasks/install_elasticsearch.yaml105
-rw-r--r--roles/openshift_logging/tasks/install_fluentd.yaml38
-rw-r--r--roles/openshift_logging/tasks/install_kibana.yaml33
-rw-r--r--roles/openshift_logging/tasks/install_logging.yaml49
-rw-r--r--roles/openshift_logging/tasks/install_support.yaml52
-rw-r--r--roles/openshift_logging/tasks/label_node.yaml27
-rw-r--r--roles/openshift_logging/tasks/main.yaml35
-rw-r--r--roles/openshift_logging/tasks/procure_server_certs.yaml54
-rw-r--r--roles/openshift_logging/tasks/scale.yaml26
-rw-r--r--roles/openshift_logging/tasks/start_cluster.yaml107
-rw-r--r--roles/openshift_logging/tasks/stop_cluster.yaml98
-rw-r--r--roles/openshift_logging/tasks/upgrade_logging.yaml33
-rw-r--r--roles/openshift_logging/templates/clusterrole.j221
-rw-r--r--roles/openshift_logging/templates/clusterrolebinding.j224
-rw-r--r--roles/openshift_logging/templates/curator.j297
-rw-r--r--roles/openshift_logging/templates/es.j2105
-rw-r--r--roles/openshift_logging/templates/fluentd.j2149
-rw-r--r--roles/openshift_logging/templates/job.j226
-rw-r--r--roles/openshift_logging/templates/kibana.j2110
-rw-r--r--roles/openshift_logging/templates/oauth-client.j215
-rw-r--r--roles/openshift_logging/templates/pvc.j227
-rw-r--r--roles/openshift_logging/templates/rolebinding.j214
-rw-r--r--roles/openshift_logging/templates/route_reencrypt.j225
-rw-r--r--roles/openshift_logging/templates/secret.j29
-rw-r--r--roles/openshift_logging/templates/service.j228
-rw-r--r--roles/openshift_logging/templates/serviceaccount.j216
-rw-r--r--roles/openshift_logging/vars/main.yaml40
62 files changed, 3391 insertions, 0 deletions
diff --git a/roles/openshift_logging/README.md b/roles/openshift_logging/README.md
new file mode 100644
index 000000000..9836fc217
--- /dev/null
+++ b/roles/openshift_logging/README.md
@@ -0,0 +1,86 @@
+## openshift_logging Role
+
+### Please note this role is still a work in progress
+
+This role is used for installing the Aggregated Logging stack. It should be run against
+a single host, it will create any missing certificates and API objects that the current
+[logging deployer](https://github.com/openshift/origin-aggregated-logging/tree/master/deployer) does.
+
+As part of the installation, it is recommended that you add the Fluentd node selector label
+to the list of persisted [node labels](https://docs.openshift.org/latest/install_config/install/advanced_install.html#configuring-node-host-labels).
+
+###Required vars:
+
+- `openshift_logging_install_logging`: When `True` the `openshift_logging` role will install Aggregated Logging.
+- `openshift_logging_upgrade_logging`: When `True` the `openshift_logging` role will upgrade Aggregated Logging.
+
+When both `openshift_logging_install_logging` and `openshift_logging_upgrade_logging` are `False` the `openshift_logging` role will uninstall Aggregated Logging.
+
+###Optional vars:
+
+- `openshift_logging_image_prefix`: The prefix for the logging images to use. Defaults to 'docker.io/openshift/origin-'.
+- `openshift_logging_image_version`: The image version for the logging images to use. Defaults to 'latest'.
+- `openshift_logging_use_ops`: If 'True', set up a second ES and Kibana cluster for infrastructure logs. Defaults to 'False'.
+- `master_url`: The URL for the Kubernetes master, this does not need to be public facing but should be accessible from within the cluster. Defaults to 'https://kubernetes.default.svc.cluster.local'.
+- `public_master_url`: The public facing URL for the Kubernetes master, this is used for Authentication redirection. Defaults to 'https://localhost:8443'.
+- `openshift_logging_namespace`: The namespace that Aggregated Logging will be installed in. Defaults to 'logging'.
+- `openshift_logging_curator_default_days`: The default minimum age (in days) Curator uses for deleting log records. Defaults to '30'.
+- `openshift_logging_curator_run_hour`: The hour of the day that Curator will run at. Defaults to '0'.
+- `openshift_logging_curator_run_minute`: The minute of the hour that Curator will run at. Defaults to '0'.
+- `openshift_logging_curator_run_timezone`: The timezone that Curator uses for figuring out its run time. Defaults to 'UTC'.
+- `openshift_logging_curator_script_log_level`: The script log level for Curator. Defaults to 'INFO'.
+- `openshift_logging_curator_log_level`: The log level for the Curator process. Defaults to 'ERROR'.
+- `openshift_logging_curator_cpu_limit`: The amount of CPU to allocate to Curator. Default is '100m'.
+- `openshift_logging_curator_memory_limit`: The amount of memor to allocate to Curator. Unset if not specified.
+
+- `openshift_logging_kibana_hostname`: The Kibana hostname. Defaults to 'kibana.example.com'.
+- `openshift_logging_kibana_cpu_limit`: The amount of CPU to allocate to Kibana or unset if not specified.
+- `openshift_logging_kibana_memory_limit`: The amount of memory to allocate to Kibana or unset if not specified.
+- `openshift_logging_kibana_proxy_debug`: When "True", set the Kibana Proxy log level to DEBUG. Defaults to 'false'.
+- `openshift_logging_kibana_proxy_cpu_limit`: The amount of CPU to allocate to Kibana proxy or unset if not specified.
+- `openshift_logging_kibana_proxy_memory_limit`: The amount of memory to allocate to Kibana proxy or unset if not specified.
+
+- `openshift_logging_fluentd_nodeselector`: The node selector that the Fluentd daemonset uses to determine where to deploy to. Defaults to '"logging-infra-fluentd": "true"'.
+- `openshift_logging_fluentd_cpu_limit`: The CPU limit for Fluentd pods. Defaults to '100m'.
+- `openshift_logging_fluentd_memory_limit`: The memory limit for Fluentd pods. Defaults to '512Mi'.
+- `openshift_logging_fluentd_es_copy`: Whether or not to use the ES_COPY feature for Fluentd (DEPRECATED). Defaults to 'False'.
+- `openshift_logging_fluentd_use_journal`: Whether or not Fluentd should read log entries from Journal. Defaults to 'False'. NOTE: Fluentd will attempt to detect whether or not Docker is using the journald log driver and may overwrite this value.
+- `openshift_logging_fluentd_journal_read_from_head`: Whether or not Fluentd will try to read from the head of Journal when first starting up, using this may cause a delay in ES receiving current log records. Defaults to 'False'.
+- `openshift_logging_fluentd_hosts`: List of nodes that should be labeled for Fluentd to be deployed to. Defaults to ['--all'].
+
+- `openshift_logging_es_host`: The name of the ES service Fluentd should send logs to. Defaults to 'logging-es'.
+- `openshift_logging_es_port`: The port for the ES service Fluentd should sent its logs to. Defaults to '9200'.
+- `openshift_logging_es_ca`: The location of the ca Fluentd uses to communicate with its openshift_logging_es_host. Defaults to '/etc/fluent/keys/ca'.
+- `openshift_logging_es_client_cert`: The location of the client certificate Fluentd uses for openshift_logging_es_host. Defaults to '/etc/fluent/keys/cert'.
+- `openshift_logging_es_client_key`: The location of the client key Fluentd uses for openshift_logging_es_host. Defaults to '/etc/fluent/keys/key'.
+
+- `openshift_logging_es_cluster_size`: The number of ES cluster members. Defaults to '1'.
+- `openshift_logging_es_cpu_limit`: The amount of CPU limit for the ES cluster. Unused if not set
+- `openshift_logging_es_memory_limit`: The amount of RAM that should be assigned to ES. Defaults to '1024Mi'.
+- `openshift_logging_es_pv_selector`: A key/value map added to a PVC in order to select specific PVs. Defaults to 'None'.
+- `openshift_logging_es_pvc_dynamic`: Whether or not to add the dynamic PVC annotation for any generated PVCs. Defaults to 'False'.
+- `openshift_logging_es_pvc_size`: The requested size for the ES PVCs, when not provided the role will not generate any PVCs. Defaults to '""'.
+- `openshift_logging_es_pvc_prefix`: The prefix for the generated PVCs. Defaults to 'logging-es'.
+- `openshift_logging_es_recover_after_time`: The amount of time ES will wait before it tries to recover. Defaults to '5m'.
+- `openshift_logging_es_storage_group`: The storage group used for ES. Defaults to '65534'.
+
+When `openshift_logging_use_ops` is `True`, there are some additional vars. These work the
+same as above for their non-ops counterparts, but apply to the OPS cluster instance:
+- `openshift_logging_es_ops_host`: logging-es-ops
+- `openshift_logging_es_ops_port`: 9200
+- `openshift_logging_es_ops_ca`: /etc/fluent/keys/ca
+- `openshift_logging_es_ops_client_cert`: /etc/fluent/keys/cert
+- `openshift_logging_es_ops_client_key`: /etc/fluent/keys/key
+- `openshift_logging_es_ops_cluster_size`: 1
+- `openshift_logging_es_ops_cpu_limit`: The amount of CPU limit for the ES cluster. Unused if not set
+- `openshift_logging_es_ops_memory_limit`: 1024Mi
+- `openshift_logging_es_ops_pvc_dynamic`: False
+- `openshift_logging_es_ops_pvc_size`: ""
+- `openshift_logging_es_ops_pvc_prefix`: logging-es-ops
+- `openshift_logging_es_ops_recover_after_time`: 5m
+- `openshift_logging_es_ops_storage_group`: 65534
+- `openshift_logging_kibana_ops_hostname`: The Operations Kibana hostname. Defaults to 'kibana-ops.example.com'.
+- `openshift_logging_kibana_ops_cpu_limit`: The amount of CPU to allocate to Kibana or unset if not specified.
+- `openshift_logging_kibana_ops_memory_limit`: The amount of memory to allocate to Kibana or unset if not specified.
+- `openshift_logging_kibana_ops_proxy_cpu_limit`: The amount of CPU to allocate to Kibana proxy or unset if not specified.
+- `openshift_logging_kibana_ops_proxy_memory_limit`: The amount of memory to allocate to Kibana proxy or unset if not specified.
diff --git a/roles/openshift_logging/defaults/main.yml b/roles/openshift_logging/defaults/main.yml
new file mode 100644
index 000000000..a441f10b9
--- /dev/null
+++ b/roles/openshift_logging/defaults/main.yml
@@ -0,0 +1,83 @@
+---
+openshift_logging_image_prefix: docker.io/openshift/origin-
+openshift_logging_image_version: latest
+openshift_logging_use_ops: False
+master_url: "https://kubernetes.default.svc.{{ openshift.common.dns_domain }}"
+public_master_url: "https://{{openshift.common.public_hostname}}:8443"
+openshift_logging_namespace: logging
+openshift_logging_install_logging: True
+
+openshift_logging_curator_default_days: 30
+openshift_logging_curator_run_hour: 0
+openshift_logging_curator_run_minute: 0
+openshift_logging_curator_run_timezone: UTC
+openshift_logging_curator_script_log_level: INFO
+openshift_logging_curator_log_level: ERROR
+openshift_logging_curator_cpu_limit: 100m
+openshift_logging_curator_memory_limit: null
+
+openshift_logging_curator_ops_cpu_limit: 100m
+openshift_logging_curator_ops_memory_limit: null
+
+openshift_logging_kibana_hostname: "kibana.{{openshift.common.dns_domain}}"
+openshift_logging_kibana_cpu_limit: null
+openshift_logging_kibana_memory_limit: null
+openshift_logging_kibana_proxy_debug: false
+openshift_logging_kibana_proxy_cpu_limit: null
+openshift_logging_kibana_proxy_memory_limit: null
+
+openshift_logging_kibana_ops_hostname: "kibana-ops.{{openshift.common.dns_domain}}"
+openshift_logging_kibana_ops_cpu_limit: null
+openshift_logging_kibana_ops_memory_limit: null
+openshift_logging_kibana_ops_proxy_debug: false
+openshift_logging_kibana_ops_proxy_cpu_limit: null
+openshift_logging_kibana_ops_proxy_memory_limit: null
+
+openshift_logging_fluentd_nodeselector: '"logging-infra-fluentd": "true"'
+openshift_logging_fluentd_cpu_limit: 100m
+openshift_logging_fluentd_memory_limit: 512Mi
+openshift_logging_fluentd_es_copy: false
+openshift_logging_fluentd_use_journal: false
+openshift_logging_fluentd_journal_read_from_head: false
+openshift_logging_fluentd_hosts: ['--all']
+
+openshift_logging_es_host: logging-es
+openshift_logging_es_port: 9200
+openshift_logging_es_ca: /etc/fluent/keys/ca
+openshift_logging_es_client_cert: /etc/fluent/keys/cert
+openshift_logging_es_client_key: /etc/fluent/keys/key
+openshift_logging_es_cluster_size: 1
+openshift_logging_es_cpu_limit: null
+openshift_logging_es_memory_limit: 1024Mi
+openshift_logging_es_pv_selector: null
+openshift_logging_es_pvc_dynamic: False
+openshift_logging_es_pvc_size: ""
+openshift_logging_es_pvc_prefix: logging-es
+openshift_logging_es_recover_after_time: 5m
+openshift_logging_es_storage_group: 65534
+
+# allow cluster-admin or cluster-reader to view operations index
+openshift_logging_es_ops_allow_cluster_reader: False
+
+openshift_logging_es_ops_host: logging-es-ops
+openshift_logging_es_ops_port: 9200
+openshift_logging_es_ops_ca: /etc/fluent/keys/ca
+openshift_logging_es_ops_client_cert: /etc/fluent/keys/cert
+openshift_logging_es_ops_client_key: /etc/fluent/keys/key
+openshift_logging_es_ops_cluster_size: 1
+openshift_logging_es_ops_cpu_limit: null
+openshift_logging_es_ops_memory_limit: 1024Mi
+openshift_logging_es_ops_pv_selector: None
+openshift_logging_es_ops_pvc_dynamic: False
+openshift_logging_es_ops_pvc_size: ""
+openshift_logging_es_ops_pvc_prefix: logging-es-ops
+openshift_logging_es_ops_recover_after_time: 5m
+openshift_logging_es_ops_storage_group: 65534
+
+# following can be uncommented to provide values for configmaps -- take care when providing file contents as it may cause your cluster to not operate correctly
+#es_logging_contents:
+#es_config_contents:
+#curator_config_contents:
+#fluentd_config_contents:
+#fluentd_throttle_contents:
+#fluentd_secureforward_contents:
diff --git a/roles/openshift_logging/files/curator.yml b/roles/openshift_logging/files/curator.yml
new file mode 100644
index 000000000..8d62d8e7d
--- /dev/null
+++ b/roles/openshift_logging/files/curator.yml
@@ -0,0 +1,18 @@
+# Logging example curator config file
+
+# uncomment and use this to override the defaults from env vars
+#.defaults:
+# delete:
+# days: 30
+# runhour: 0
+# runminute: 0
+
+# to keep ops logs for a different duration:
+#.operations:
+# delete:
+# weeks: 8
+
+# example for a normal project
+#myapp:
+# delete:
+# weeks: 1
diff --git a/roles/openshift_logging/files/elasticsearch-logging.yml b/roles/openshift_logging/files/elasticsearch-logging.yml
new file mode 100644
index 000000000..377abe21f
--- /dev/null
+++ b/roles/openshift_logging/files/elasticsearch-logging.yml
@@ -0,0 +1,72 @@
+# you can override this using by setting a system property, for example -Des.logger.level=DEBUG
+es.logger.level: INFO
+rootLogger: ${es.logger.level}, console, file
+logger:
+ # log action execution errors for easier debugging
+ action: WARN
+ # reduce the logging for aws, too much is logged under the default INFO
+ com.amazonaws: WARN
+ io.fabric8.elasticsearch: ${PLUGIN_LOGLEVEL}
+ io.fabric8.kubernetes: ${PLUGIN_LOGLEVEL}
+
+ # gateway
+ #gateway: DEBUG
+ #index.gateway: DEBUG
+
+ # peer shard recovery
+ #indices.recovery: DEBUG
+
+ # discovery
+ #discovery: TRACE
+
+ index.search.slowlog: TRACE, index_search_slow_log_file
+ index.indexing.slowlog: TRACE, index_indexing_slow_log_file
+
+ # search-guard
+ com.floragunn.searchguard: WARN
+
+additivity:
+ index.search.slowlog: false
+ index.indexing.slowlog: false
+
+appender:
+ console:
+ type: console
+ layout:
+ type: consolePattern
+ conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n"
+
+ file:
+ type: dailyRollingFile
+ file: ${path.logs}/${cluster.name}.log
+ datePattern: "'.'yyyy-MM-dd"
+ layout:
+ type: pattern
+ conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n"
+
+ # Use the following log4j-extras RollingFileAppender to enable gzip compression of log files.
+ # For more information see https://logging.apache.org/log4j/extras/apidocs/org/apache/log4j/rolling/RollingFileAppender.html
+ #file:
+ #type: extrasRollingFile
+ #file: ${path.logs}/${cluster.name}.log
+ #rollingPolicy: timeBased
+ #rollingPolicy.FileNamePattern: ${path.logs}/${cluster.name}.log.%d{yyyy-MM-dd}.gz
+ #layout:
+ #type: pattern
+ #conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n"
+
+ index_search_slow_log_file:
+ type: dailyRollingFile
+ file: ${path.logs}/${cluster.name}_index_search_slowlog.log
+ datePattern: "'.'yyyy-MM-dd"
+ layout:
+ type: pattern
+ conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n"
+
+ index_indexing_slow_log_file:
+ type: dailyRollingFile
+ file: ${path.logs}/${cluster.name}_index_indexing_slowlog.log
+ datePattern: "'.'yyyy-MM-dd"
+ layout:
+ type: pattern
+ conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n"
diff --git a/roles/openshift_logging/files/elasticsearch.yml b/roles/openshift_logging/files/elasticsearch.yml
new file mode 100644
index 000000000..4eff30e61
--- /dev/null
+++ b/roles/openshift_logging/files/elasticsearch.yml
@@ -0,0 +1,74 @@
+cluster:
+ name: ${CLUSTER_NAME}
+
+script:
+ inline: on
+ indexed: on
+
+index:
+ number_of_shards: 1
+ number_of_replicas: 0
+ auto_expand_replicas: 0-3
+ unassigned.node_left.delayed_timeout: 2m
+ translog:
+ flush_threshold_size: 256mb
+ flush_threshold_period: 5m
+
+node:
+ master: true
+ data: true
+
+network:
+ host: 0.0.0.0
+
+cloud:
+ kubernetes:
+ service: ${SERVICE_DNS}
+ namespace: ${NAMESPACE}
+
+discovery:
+ type: kubernetes
+ zen.ping.multicast.enabled: false
+
+gateway:
+ expected_master_nodes: ${NODE_QUORUM}
+ recover_after_nodes: ${RECOVER_AFTER_NODES}
+ expected_nodes: ${RECOVER_EXPECTED_NODES}
+ recover_after_time: ${RECOVER_AFTER_TIME}
+
+io.fabric8.elasticsearch.authentication.users: ["system.logging.kibana", "system.logging.fluentd", "system.logging.curator", "system.admin"]
+
+openshift.searchguard:
+ keystore.path: /etc/elasticsearch/secret/admin.jks
+ truststore.path: /etc/elasticsearch/secret/searchguard.truststore
+
+
+path:
+ data: /elasticsearch/persistent/${CLUSTER_NAME}/data
+ logs: /elasticsearch/${CLUSTER_NAME}/logs
+ work: /elasticsearch/${CLUSTER_NAME}/work
+ scripts: /elasticsearch/${CLUSTER_NAME}/scripts
+
+searchguard:
+ authcz.admin_dn:
+ - CN=system.admin,OU=OpenShift,O=Logging
+ config_index_name: ".searchguard.${HOSTNAME}"
+ ssl:
+ transport:
+ enabled: true
+ enforce_hostname_verification: false
+ keystore_type: JKS
+ keystore_filepath: /etc/elasticsearch/secret/searchguard.key
+ keystore_password: kspass
+ truststore_type: JKS
+ truststore_filepath: /etc/elasticsearch/secret/searchguard.truststore
+ truststore_password: tspass
+ http:
+ enabled: true
+ keystore_type: JKS
+ keystore_filepath: /etc/elasticsearch/secret/key
+ keystore_password: kspass
+ clientauth_mode: OPTIONAL
+ truststore_type: JKS
+ truststore_filepath: /etc/elasticsearch/secret/truststore
+ truststore_password: tspass
diff --git a/roles/openshift_logging/files/es_migration.sh b/roles/openshift_logging/files/es_migration.sh
new file mode 100644
index 000000000..cca283bae
--- /dev/null
+++ b/roles/openshift_logging/files/es_migration.sh
@@ -0,0 +1,81 @@
+#! bin/bash
+
+CA=${1:-/etc/openshift/logging/ca.crt}
+KEY=${2:-/etc/openshift/logging/system.admin.key}
+CERT=${3:-/etc/openshift/logging/system.admin.crt}
+openshift_logging_es_host=${4:-logging-es}
+openshift_logging_es_port=${5:-9200}
+namespace=${6:-logging}
+
+# for each index in _cat/indices
+# skip indices that begin with . - .kibana, .operations, etc.
+# skip indices that contain a uuid
+# get a list of unique project
+# daterx - the date regex that matches the .%Y.%m.%d at the end of the indices
+# we are interested in - the awk will strip that part off
+function get_list_of_indices() {
+ curl -s --cacert $CA --key $KEY --cert $CERT https://$openshift_logging_es_host:$openshift_logging_es_port/_cat/indices | \
+ awk -v daterx='[.]20[0-9]{2}[.][0-1]?[0-9][.][0-9]{1,2}$' \
+ '$3 !~ "^[.]" && $3 !~ "^[^.]+[.][^.]+"daterx && $3 !~ "^project." && $3 ~ daterx {print gensub(daterx, "", "", $3)}' | \
+ sort -u
+}
+
+# for each index in _cat/indices
+# skip indices that begin with . - .kibana, .operations, etc.
+# get a list of unique project.uuid
+# daterx - the date regex that matches the .%Y.%m.%d at the end of the indices
+# we are interested in - the awk will strip that part off
+function get_list_of_proj_uuid_indices() {
+ curl -s --cacert $CA --key $KEY --cert $CERT https://$openshift_logging_es_host:$openshift_logging_es_port/_cat/indices | \
+ awk -v daterx='[.]20[0-9]{2}[.][0-1]?[0-9][.][0-9]{1,2}$' \
+ '$3 !~ "^[.]" && $3 ~ "^[^.]+[.][^.]+"daterx && $3 !~ "^project." && $3 ~ daterx {print gensub(daterx, "", "", $3)}' | \
+ sort -u
+}
+
+if [[ -z "$(oc get pods -l component=es -o jsonpath='{.items[?(@.status.phase == "Running")].metadata.name}')" ]]; then
+ echo "No Elasticsearch pods found running. Cannot update common data model."
+ exit 1
+fi
+
+count=$(get_list_of_indices | wc -l)
+if [ $count -eq 0 ]; then
+ echo No matching indices found - skipping update_for_uuid
+else
+ echo Creating aliases for $count index patterns . . .
+ {
+ echo '{"actions":['
+ get_list_of_indices | \
+ while IFS=. read proj ; do
+ # e.g. make test.uuid.* an alias of test.* so we can search for
+ # /test.uuid.*/_search and get both the test.uuid.* and
+ # the test.* indices
+ uid=$(oc get project "$proj" -o jsonpath='{.metadata.uid}' 2>/dev/null)
+ [ -n "$uid" ] && echo "{\"add\":{\"index\":\"$proj.*\",\"alias\":\"$proj.$uuid.*\"}}"
+ done
+ echo ']}'
+ } | curl -s --cacert $CA --key $KEY --cert $CERT -XPOST -d @- "https://$openshift_logging_es_host:$openshift_logging_es_port/_aliases"
+fi
+
+count=$(get_list_of_proj_uuid_indices | wc -l)
+if [ $count -eq 0 ] ; then
+ echo No matching indexes found - skipping update_for_common_data_model
+ exit 0
+fi
+
+echo Creating aliases for $count index patterns . . .
+# for each index in _cat/indices
+# skip indices that begin with . - .kibana, .operations, etc.
+# get a list of unique project.uuid
+# daterx - the date regex that matches the .%Y.%m.%d at the end of the indices
+# we are interested in - the awk will strip that part off
+{
+ echo '{"actions":['
+ get_list_of_proj_uuid_indices | \
+ while IFS=. read proj uuid ; do
+ # e.g. make project.test.uuid.* and alias of test.uuid.* so we can search for
+ # /project.test.uuid.*/_search and get both the test.uuid.* and
+ # the project.test.uuid.* indices
+ echo "{\"add\":{\"index\":\"$proj.$uuid.*\",\"alias\":\"${PROJ_PREFIX}$proj.$uuid.*\"}}"
+ done
+ echo ']}'
+} | curl -s --cacert $CA --key $KEY --cert $CERT -XPOST -d @- "https://$openshift_logging_es_host:$openshift_logging_es_port/_aliases"
diff --git a/roles/openshift_logging/files/fluent.conf b/roles/openshift_logging/files/fluent.conf
new file mode 100644
index 000000000..aa843e983
--- /dev/null
+++ b/roles/openshift_logging/files/fluent.conf
@@ -0,0 +1,34 @@
+# This file is the fluentd configuration entrypoint. Edit with care.
+
+@include configs.d/openshift/system.conf
+
+# In each section below, pre- and post- includes don't include anything initially;
+# they exist to enable future additions to openshift conf as needed.
+
+## sources
+## ordered so that syslog always runs last...
+@include configs.d/openshift/input-pre-*.conf
+@include configs.d/dynamic/input-docker-*.conf
+@include configs.d/dynamic/input-syslog-*.conf
+@include configs.d/openshift/input-post-*.conf
+##
+
+<label @INGRESS>
+## filters
+ @include configs.d/openshift/filter-pre-*.conf
+ @include configs.d/openshift/filter-retag-journal.conf
+ @include configs.d/openshift/filter-k8s-meta.conf
+ @include configs.d/openshift/filter-kibana-transform.conf
+ @include configs.d/openshift/filter-k8s-flatten-hash.conf
+ @include configs.d/openshift/filter-k8s-record-transform.conf
+ @include configs.d/openshift/filter-syslog-record-transform.conf
+ @include configs.d/openshift/filter-post-*.conf
+##
+
+## matches
+ @include configs.d/openshift/output-pre-*.conf
+ @include configs.d/openshift/output-operations.conf
+ @include configs.d/openshift/output-applications.conf
+ # no post - applications.conf matches everything left
+##
+</label>
diff --git a/roles/openshift_logging/files/fluentd-throttle-config.yaml b/roles/openshift_logging/files/fluentd-throttle-config.yaml
new file mode 100644
index 000000000..375621ff1
--- /dev/null
+++ b/roles/openshift_logging/files/fluentd-throttle-config.yaml
@@ -0,0 +1,7 @@
+# Logging example fluentd throttling config file
+
+#example-project:
+# read_lines_limit: 10
+#
+#.operations:
+# read_lines_limit: 100
diff --git a/roles/openshift_logging/files/generate-jks.sh b/roles/openshift_logging/files/generate-jks.sh
new file mode 100644
index 000000000..8760f37fe
--- /dev/null
+++ b/roles/openshift_logging/files/generate-jks.sh
@@ -0,0 +1,71 @@
+#! /bin/sh
+set -ex
+
+function importPKCS() {
+ dir=${SCRATCH_DIR:-_output}
+ NODE_NAME=$1
+ ks_pass=${KS_PASS:-kspass}
+ ts_pass=${TS_PASS:-tspass}
+ rm -rf $NODE_NAME
+
+ keytool \
+ -importkeystore \
+ -srckeystore $NODE_NAME.pkcs12 \
+ -srcstoretype PKCS12 \
+ -srcstorepass pass \
+ -deststorepass $ks_pass \
+ -destkeypass $ks_pass \
+ -destkeystore $dir/keystore.jks \
+ -alias 1 \
+ -destalias $NODE_NAME
+
+ echo "Import back to keystore (including CA chain)"
+
+ keytool \
+ -import \
+ -file $dir/ca.crt \
+ -keystore $dir/keystore.jks \
+ -storepass $ks_pass \
+ -noprompt -alias sig-ca
+
+ echo All done for $NODE_NAME
+}
+
+function createTruststore() {
+
+ echo "Import CA to truststore for validating client certs"
+
+ keytool \
+ -import \
+ -file $dir/ca.crt \
+ -keystore $dir/truststore.jks \
+ -storepass $ts_pass \
+ -noprompt -alias sig-ca
+}
+
+dir="/opt/deploy/"
+SCRATCH_DIR=$dir
+
+admin_user='system.admin'
+
+if [[ ! -f $dir/system.admin.jks || -z "$(keytool -list -keystore $dir/system.admin.jks -storepass kspass | grep sig-ca)" ]]; then
+ importPKCS "system.admin"
+ mv $dir/keystore.jks $dir/system.admin.jks
+fi
+
+if [[ ! -f $dir/searchguard_node_key || -z "$(keytool -list -keystore $dir/searchguard_node_key -storepass kspass | grep sig-ca)" ]]; then
+ importPKCS "elasticsearch"
+ mv $dir/keystore.jks $dir/searchguard_node_key
+fi
+
+
+if [[ ! -f $dir/system.admin.jks || -z "$(keytool -list -keystore $dir/system.admin.jks -storepass kspass | grep sig-ca)" ]]; then
+ importPKCS "logging-es"
+fi
+
+[ ! -f $dir/truststore.jks ] && createTruststore
+
+[ ! -f $dir/searchguard_node_truststore ] && cp $dir/truststore.jks $dir/searchguard_node_truststore
+
+# necessary so that the job knows it completed successfully
+exit 0
diff --git a/roles/openshift_logging/files/logging-deployer-sa.yaml b/roles/openshift_logging/files/logging-deployer-sa.yaml
new file mode 100644
index 000000000..334c9402b
--- /dev/null
+++ b/roles/openshift_logging/files/logging-deployer-sa.yaml
@@ -0,0 +1,6 @@
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: logging-deployer
+secrets:
+- name: logging-deployer
diff --git a/roles/openshift_logging/files/secure-forward.conf b/roles/openshift_logging/files/secure-forward.conf
new file mode 100644
index 000000000..f4483df79
--- /dev/null
+++ b/roles/openshift_logging/files/secure-forward.conf
@@ -0,0 +1,24 @@
+# @type secure_forward
+
+# self_hostname ${HOSTNAME}
+# shared_key <SECRET_STRING>
+
+# secure yes
+# enable_strict_verification yes
+
+# ca_cert_path /etc/fluent/keys/your_ca_cert
+# ca_private_key_path /etc/fluent/keys/your_private_key
+ # for private CA secret key
+# ca_private_key_passphrase passphrase
+
+# <server>
+ # or IP
+# host server.fqdn.example.com
+# port 24284
+# </server>
+# <server>
+ # ip address to connect
+# host 203.0.113.8
+ # specify hostlabel for FQDN verification if ipaddress is used for host
+# hostlabel server.fqdn.example.com
+# </server>
diff --git a/roles/openshift_logging/files/server-tls.json b/roles/openshift_logging/files/server-tls.json
new file mode 100644
index 000000000..86deb23e3
--- /dev/null
+++ b/roles/openshift_logging/files/server-tls.json
@@ -0,0 +1,5 @@
+// See for available options: https://nodejs.org/api/tls.html#tls_tls_createserver_options_secureconnectionlistener
+tls_options = {
+ ciphers: 'kEECDH:+kEECDH+SHA:kEDH:+kEDH+SHA:+kEDH+CAMELLIA:kECDH:+kECDH+SHA:kRSA:+kRSA+SHA:+kRSA+CAMELLIA:!aNULL:!eNULL:!SSLv2:!RC4:!DES:!EXP:!SEED:!IDEA:+3DES',
+ honorCipherOrder: true
+}
diff --git a/roles/openshift_logging/files/signing.conf b/roles/openshift_logging/files/signing.conf
new file mode 100644
index 000000000..810a057d9
--- /dev/null
+++ b/roles/openshift_logging/files/signing.conf
@@ -0,0 +1,103 @@
+# Simple Signing CA
+
+# The [default] section contains global constants that can be referred to from
+# the entire configuration file. It may also hold settings pertaining to more
+# than one openssl command.
+
+[ default ]
+#dir = _output # Top dir
+
+# The next part of the configuration file is used by the openssl req command.
+# It defines the CA's key pair, its DN, and the desired extensions for the CA
+# certificate.
+
+[ req ]
+default_bits = 2048 # RSA key size
+encrypt_key = yes # Protect private key
+default_md = sha1 # MD to use
+utf8 = yes # Input is UTF-8
+string_mask = utf8only # Emit UTF-8 strings
+prompt = no # Don't prompt for DN
+distinguished_name = ca_dn # DN section
+req_extensions = ca_reqext # Desired extensions
+
+[ ca_dn ]
+0.domainComponent = "io"
+1.domainComponent = "openshift"
+organizationName = "OpenShift Origin"
+organizationalUnitName = "Logging Signing CA"
+commonName = "Logging Signing CA"
+
+[ ca_reqext ]
+keyUsage = critical,keyCertSign,cRLSign
+basicConstraints = critical,CA:true,pathlen:0
+subjectKeyIdentifier = hash
+
+# The remainder of the configuration file is used by the openssl ca command.
+# The CA section defines the locations of CA assets, as well as the policies
+# applying to the CA.
+
+[ ca ]
+default_ca = signing_ca # The default CA section
+
+[ signing_ca ]
+certificate = $dir/ca.crt # The CA cert
+private_key = $dir/ca.key # CA private key
+new_certs_dir = $dir/ # Certificate archive
+serial = $dir/ca.serial.txt # Serial number file
+crlnumber = $dir/ca.crl.srl # CRL number file
+database = $dir/ca.db # Index file
+unique_subject = no # Require unique subject
+default_days = 730 # How long to certify for
+default_md = sha1 # MD to use
+policy = any_pol # Default naming policy
+email_in_dn = no # Add email to cert DN
+preserve = no # Keep passed DN ordering
+name_opt = ca_default # Subject DN display options
+cert_opt = ca_default # Certificate display options
+copy_extensions = copy # Copy extensions from CSR
+x509_extensions = client_ext # Default cert extensions
+default_crl_days = 7 # How long before next CRL
+crl_extensions = crl_ext # CRL extensions
+
+# Naming policies control which parts of a DN end up in the certificate and
+# under what circumstances certification should be denied.
+
+[ match_pol ]
+domainComponent = match # Must match 'simple.org'
+organizationName = match # Must match 'Simple Inc'
+organizationalUnitName = optional # Included if present
+commonName = supplied # Must be present
+
+[ any_pol ]
+domainComponent = optional
+countryName = optional
+stateOrProvinceName = optional
+localityName = optional
+organizationName = optional
+organizationalUnitName = optional
+commonName = optional
+emailAddress = optional
+
+# Certificate extensions define what types of certificates the CA is able to
+# create.
+
+[ client_ext ]
+keyUsage = critical,digitalSignature,keyEncipherment
+basicConstraints = CA:false
+extendedKeyUsage = clientAuth
+subjectKeyIdentifier = hash
+authorityKeyIdentifier = keyid
+
+[ server_ext ]
+keyUsage = critical,digitalSignature,keyEncipherment
+basicConstraints = CA:false
+extendedKeyUsage = serverAuth,clientAuth
+subjectKeyIdentifier = hash
+authorityKeyIdentifier = keyid
+
+# CRL extensions exist solely to point to the CA certificate that has issued
+# the CRL.
+
+[ crl_ext ]
+authorityKeyIdentifier = keyid
diff --git a/roles/openshift_logging/files/util.sh b/roles/openshift_logging/files/util.sh
new file mode 100644
index 000000000..5752a0fcd
--- /dev/null
+++ b/roles/openshift_logging/files/util.sh
@@ -0,0 +1,192 @@
+#!/bin/bash
+
+function generate_JKS_chain() {
+ dir=${SCRATCH_DIR:-_output}
+ ADD_OID=$1
+ NODE_NAME=$2
+ CERT_NAMES=${3:-$NODE_NAME}
+ ks_pass=${KS_PASS:-kspass}
+ ts_pass=${TS_PASS:-tspass}
+ rm -rf $NODE_NAME
+
+ extension_names=""
+ for name in ${CERT_NAMES//,/ }; do
+ extension_names="${extension_names},dns:${name}"
+ done
+
+ if [ "$ADD_OID" = true ]; then
+ extension_names="${extension_names},oid:1.2.3.4.5.5"
+ fi
+
+ echo Generating keystore and certificate for node $NODE_NAME
+
+ "$keytool" -genkey \
+ -alias $NODE_NAME \
+ -keystore $dir/keystore.jks \
+ -keypass $ks_pass \
+ -storepass $ks_pass \
+ -keyalg RSA \
+ -keysize 2048 \
+ -validity 712 \
+ -dname "CN=$NODE_NAME, OU=OpenShift, O=Logging" \
+ -ext san=dns:localhost,ip:127.0.0.1"${extension_names}"
+
+ echo Generating certificate signing request for node $NODE_NAME
+
+ "$keytool" -certreq \
+ -alias $NODE_NAME \
+ -keystore $dir/keystore.jks \
+ -storepass $ks_pass \
+ -file $dir/$NODE_NAME.csr \
+ -keyalg rsa \
+ -dname "CN=$NODE_NAME, OU=OpenShift, O=Logging" \
+ -ext san=dns:localhost,ip:127.0.0.1"${extension_names}"
+
+ echo Sign certificate request with CA
+
+ openssl ca \
+ -in $dir/$NODE_NAME.csr \
+ -notext \
+ -out $dir/$NODE_NAME.crt \
+ -config $dir/signing.conf \
+ -extensions v3_req \
+ -batch \
+ -extensions server_ext
+
+ echo "Import back to keystore (including CA chain)"
+
+ "$keytool" \
+ -import \
+ -file $dir/ca.crt \
+ -keystore $dir/keystore.jks \
+ -storepass $ks_pass \
+ -noprompt -alias sig-ca
+
+ "$keytool" \
+ -import \
+ -file $dir/$NODE_NAME.crt \
+ -keystore $dir/keystore.jks \
+ -storepass $ks_pass \
+ -noprompt \
+ -alias $NODE_NAME
+
+ echo "Import CA to truststore for validating client certs"
+
+ "$keytool" \
+ -import \
+ -file $dir/ca.crt \
+ -keystore $dir/truststore.jks \
+ -storepass $ts_pass \
+ -noprompt -alias sig-ca
+
+ echo All done for $NODE_NAME
+}
+
+function generate_PEM_cert() {
+ NODE_NAME="$1"
+ dir=${SCRATCH_DIR:-_output} # for writing files to bundle into secrets
+
+ echo Generating keystore and certificate for node ${NODE_NAME}
+
+ openssl req -out "$dir/$NODE_NAME.csr" -new -newkey rsa:2048 -keyout "$dir/$NODE_NAME.key" -subj "/CN=$NODE_NAME/OU=OpenShift/O=Logging" -days 712 -nodes
+
+ echo Sign certificate request with CA
+ openssl ca \
+ -in "$dir/$NODE_NAME.csr" \
+ -notext \
+ -out "$dir/$NODE_NAME.crt" \
+ -config $dir/signing.conf \
+ -extensions v3_req \
+ -batch \
+ -extensions server_ext
+}
+
+function generate_JKS_client_cert() {
+ NODE_NAME="$1"
+ ks_pass=${KS_PASS:-kspass}
+ ts_pass=${TS_PASS:-tspass}
+ dir=${SCRATCH_DIR:-_output} # for writing files to bundle into secrets
+
+ echo Generating keystore and certificate for node ${NODE_NAME}
+
+ "$keytool" -genkey \
+ -alias $NODE_NAME \
+ -keystore $dir/$NODE_NAME.jks \
+ -keyalg RSA \
+ -keysize 2048 \
+ -validity 712 \
+ -keypass $ks_pass \
+ -storepass $ks_pass \
+ -dname "CN=$NODE_NAME, OU=OpenShift, O=Logging"
+
+ echo Generating certificate signing request for node $NODE_NAME
+
+ "$keytool" -certreq \
+ -alias $NODE_NAME \
+ -keystore $dir/$NODE_NAME.jks \
+ -file $dir/$NODE_NAME.csr \
+ -keyalg rsa \
+ -keypass $ks_pass \
+ -storepass $ks_pass \
+ -dname "CN=$NODE_NAME, OU=OpenShift, O=Logging"
+
+ echo Sign certificate request with CA
+ openssl ca \
+ -in "$dir/$NODE_NAME.csr" \
+ -notext \
+ -out "$dir/$NODE_NAME.crt" \
+ -config $dir/signing.conf \
+ -extensions v3_req \
+ -batch \
+ -extensions server_ext
+
+ echo "Import back to keystore (including CA chain)"
+
+ "$keytool" \
+ -import \
+ -file $dir/ca.crt \
+ -keystore $dir/$NODE_NAME.jks \
+ -storepass $ks_pass \
+ -noprompt -alias sig-ca
+
+ "$keytool" \
+ -import \
+ -file $dir/$NODE_NAME.crt \
+ -keystore $dir/$NODE_NAME.jks \
+ -storepass $ks_pass \
+ -noprompt \
+ -alias $NODE_NAME
+
+ echo All done for $NODE_NAME
+}
+
+function join { local IFS="$1"; shift; echo "$*"; }
+
+function get_es_dcs() {
+ oc get dc --selector logging-infra=elasticsearch -o name
+}
+
+function get_curator_dcs() {
+ oc get dc --selector logging-infra=curator -o name
+}
+
+function extract_nodeselector() {
+ local inputstring="${1//\"/}" # remove any errant double quotes in the inputs
+ local selectors=()
+
+ for keyvalstr in ${inputstring//\,/ }; do
+
+ keyval=( ${keyvalstr//=/ } )
+
+ if [[ -n "${keyval[0]}" && -n "${keyval[1]}" ]]; then
+ selectors+=( "\"${keyval[0]}\": \"${keyval[1]}\"")
+ else
+ echo "Could not make a node selector label from '${keyval[*]}'"
+ exit 255
+ fi
+ done
+
+ if [[ "${#selectors[*]}" -gt 0 ]]; then
+ echo nodeSelector: "{" $(join , "${selectors[@]}") "}"
+ fi
+}
diff --git a/roles/openshift_logging/filter_plugins/__init__.py b/roles/openshift_logging/filter_plugins/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/roles/openshift_logging/filter_plugins/__init__.py
diff --git a/roles/openshift_logging/filter_plugins/openshift_logging.py b/roles/openshift_logging/filter_plugins/openshift_logging.py
new file mode 100644
index 000000000..b42d5da5f
--- /dev/null
+++ b/roles/openshift_logging/filter_plugins/openshift_logging.py
@@ -0,0 +1,29 @@
+import random, string
+import shutil
+import sys
+import StringIO
+
+def random_word(source_alpha,length):
+ return ''.join(random.choice(source_alpha) for i in range(length))
+
+def entry_from_named_pair(register_pairs, key):
+ from ansible.utils.display import Display
+ results = register_pairs.get("results")
+ if results == None:
+ raise RuntimeError("The dict argument does not have a 'results' entry. Must not have been created using 'register' in a loop")
+ for result in results:
+ item = result.get("item")
+ if item != None:
+ name = item.get("name")
+ if name == key:
+ return result["content"]
+ raise RuntimeError("There was no entry found in the dict that had an item with a name that matched {}".format(key))
+
+class FilterModule(object):
+ ''' OpenShift Logging Filters '''
+
+ def filters(self):
+ return {
+ 'random_word': random_word,
+ 'entry_from_named_pair': entry_from_named_pair,
+ }
diff --git a/roles/openshift_logging/library/__init.py__ b/roles/openshift_logging/library/__init.py__
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/roles/openshift_logging/library/__init.py__
diff --git a/roles/openshift_logging/library/openshift_logging_facts.py b/roles/openshift_logging/library/openshift_logging_facts.py
new file mode 100644
index 000000000..1f0c25a84
--- /dev/null
+++ b/roles/openshift_logging/library/openshift_logging_facts.py
@@ -0,0 +1,303 @@
+
+DOCUMENTATION = """
+---
+module: openshift_logging_facts
+version_added: ""
+short_description: Gather facts about the OpenShift logging stack
+description:
+ - Determine the current facts about the OpenShift logging stack (e.g. cluster size)
+options:
+author: Red Hat, Inc
+"""
+
+EXAMPLES = """
+- action: opneshift_logging_facts
+"""
+
+RETURN = """
+"""
+
+import copy
+import json
+import exceptions
+import yaml
+from subprocess import *
+
+default_oc_options = ["-o","json"]
+
+#constants used for various labels and selectors
+COMPONENT_KEY="component"
+LOGGING_INFRA_KEY="logging-infra"
+
+#selectors for filtering resources
+DS_FLUENTD_SELECTOR=LOGGING_INFRA_KEY + "=" + "fluentd"
+LOGGING_SELECTOR=LOGGING_INFRA_KEY + "=" + "support"
+ROUTE_SELECTOR = "component=support,logging-infra=support,provider=openshift"
+COMPONENTS = ["kibana","curator","elasticsearch","fluentd", "kibana_ops", "curator_ops", "elasticsearch_ops"]
+
+class OCBaseCommand(object):
+ def __init__(self, binary, kubeconfig, namespace):
+ self.binary = binary
+ self.kubeconfig = kubeconfig
+ self.user = self.getSystemAdmin(self.kubeconfig)
+ self.namespace = namespace
+
+ def getSystemAdmin(self,kubeconfig):
+ with open(kubeconfig,'r') as f:
+ config = yaml.load(f)
+ for user in config["users"]:
+ if user["name"].startswith("system:admin"):
+ return user["name"]
+ raise Exception("Unable to find system:admin in: " + kubeconfig)
+
+ def oc(self, sub, kind, namespace=None, name=None,addOptions=[]):
+ cmd = [self.binary, sub, kind]
+ if name != None:
+ cmd = cmd + [name]
+ if namespace != None:
+ cmd = cmd + ["-n", namespace]
+ cmd = cmd + ["--user="+self.user,"--config="+self.kubeconfig] + default_oc_options + addOptions
+ try:
+ process = Popen(cmd, stdout=PIPE, stderr=PIPE)
+ out, err = process.communicate(cmd)
+ if len(err) > 0:
+ if 'not found' in err:
+ return {'items':[]}
+ if 'No resources found' in err:
+ return {'items':[]}
+ raise Exception(err)
+ except Exception as e:
+ err = "There was an exception trying to run the command '"+ " ".join(cmd) +"' " + str(e)
+ raise Exception(err)
+
+ return json.loads(out)
+
+class OpenshiftLoggingFacts(OCBaseCommand):
+
+ name = "facts"
+
+ def __init__(self, logger, binary, kubeconfig, namespace):
+ super(OpenshiftLoggingFacts, self).__init__(binary, kubeconfig, namespace)
+ self.logger = logger
+ self.facts = dict()
+
+ def defaultKeysFor(self, kind):
+ for comp in COMPONENTS:
+ self.addFactsFor(comp, kind)
+
+ def addFactsFor(self, comp, kind, name=None, facts=None):
+ if self.facts.has_key(comp) == False:
+ self.facts[comp] = dict()
+ if self.facts[comp].has_key(kind) == False:
+ self.facts[comp][kind] = dict()
+ if name:
+ self.facts[comp][kind][name] = facts
+
+ def factsForRoutes(self, namespace):
+ self.defaultKeysFor("routes")
+ routeList = self.oc("get","routes", namespace=namespace, addOptions=["-l",ROUTE_SELECTOR])
+ if len(routeList["items"]) == 0:
+ return None
+ for route in routeList["items"]:
+ name = route["metadata"]["name"]
+ comp = self.comp(name)
+ if comp != None:
+ self.addFactsFor(comp, "routes", name, dict(host=route["spec"]["host"]))
+ self.facts["agl_namespace"] = namespace
+
+
+ def factsForDaemonsets(self, namespace):
+ self.defaultKeysFor("daemonsets")
+ dsList = self.oc("get", "daemonsets", namespace=namespace, addOptions=["-l",LOGGING_INFRA_KEY+"=fluentd"])
+ if len(dsList["items"]) == 0:
+ return
+ for ds in dsList["items"]:
+ name = ds["metadata"]["name"]
+ comp = self.comp(name)
+ spec = ds["spec"]["template"]["spec"]
+ container = spec["containers"][0]
+ result = dict(
+ selector = ds["spec"]["selector"],
+ image = container["image"],
+ resources = container["resources"],
+ nodeSelector = spec["nodeSelector"],
+ serviceAccount = spec["serviceAccount"],
+ terminationGracePeriodSeconds = spec["terminationGracePeriodSeconds"]
+ )
+ self.addFactsFor(comp, "daemonsets", name, result)
+
+ def factsForPvcs(self, namespace):
+ self.defaultKeysFor("pvcs")
+ pvclist = self.oc("get", "pvc", namespace=namespace, addOptions=["-l",LOGGING_INFRA_KEY])
+ if len(pvclist["items"]) == 0:
+ return
+ pvcs = []
+ for pvc in pvclist["items"]:
+ name = pvc["metadata"]["name"]
+ comp = self.comp(name)
+ self.addFactsFor(comp,"pvcs",name,dict())
+
+ def factsForDeploymentConfigs(self, namespace):
+ self.defaultKeysFor("deploymentconfigs")
+ dclist = self.oc("get", "deploymentconfigs", namespace=namespace, addOptions=["-l",LOGGING_INFRA_KEY])
+ if len(dclist["items"]) == 0:
+ return
+ dcs = dclist["items"]
+ for dc in dcs:
+ name = dc["metadata"]["name"]
+ comp = self.comp(name)
+ if comp != None:
+ spec = dc["spec"]["template"]["spec"]
+ facts = dict(
+ selector = dc["spec"]["selector"],
+ replicas = dc["spec"]["replicas"],
+ serviceAccount = spec["serviceAccount"],
+ containers = dict(),
+ volumes = dict()
+ )
+ if spec.has_key("volumes"):
+ for vol in spec["volumes"]:
+ clone = copy.deepcopy(vol)
+ clone.pop("name", None)
+ facts["volumes"][vol["name"]] = clone
+ for container in spec["containers"]:
+ facts["containers"][container["name"]] = dict(
+ image = container["image"],
+ resources = container["resources"],
+ )
+ self.addFactsFor(comp,"deploymentconfigs",name,facts)
+
+ def factsForServices(self, namespace):
+ self.defaultKeysFor("services")
+ servicelist = self.oc("get", "services", namespace=namespace, addOptions=["-l",LOGGING_SELECTOR])
+ if len(servicelist["items"]) == 0:
+ return
+ for service in servicelist["items"]:
+ name = service["metadata"]["name"]
+ comp = self.comp(name)
+ if comp != None:
+ self.addFactsFor(comp, "services", name, dict())
+
+ def factsForConfigMaps(self, namespace):
+ self.defaultKeysFor("configmaps")
+ aList = self.oc("get", "configmaps", namespace=namespace, addOptions=["-l",LOGGING_SELECTOR])
+ if len(aList["items"]) == 0:
+ return
+ for item in aList["items"]:
+ name = item["metadata"]["name"]
+ comp = self.comp(name)
+ if comp != None:
+ self.addFactsFor(comp, "configmaps", name, item["data"])
+
+ def factsForOAuthClients(self, namespace):
+ self.defaultKeysFor("oauthclients")
+ aList = self.oc("get", "oauthclients", namespace=namespace, addOptions=["-l",LOGGING_SELECTOR])
+ if len(aList["items"]) == 0:
+ return
+ for item in aList["items"]:
+ name = item["metadata"]["name"]
+ comp = self.comp(name)
+ if comp != None:
+ result = dict(
+ redirectURIs = item["redirectURIs"]
+ )
+ self.addFactsFor(comp, "oauthclients", name, result)
+
+ def factsForSecrets(self, namespace):
+ self.defaultKeysFor("secrets")
+ aList = self.oc("get", "secrets", namespace=namespace)
+ if len(aList["items"]) == 0:
+ return
+ for item in aList["items"]:
+ name = item["metadata"]["name"]
+ comp = self.comp(name)
+ if comp != None and item["type"] == "Opaque":
+ result = dict(
+ keys = item["data"].keys()
+ )
+ self.addFactsFor(comp, "secrets", name, result)
+
+ def factsForSCCs(self, namespace):
+ self.defaultKeysFor("sccs")
+ scc = self.oc("get", "scc", name="privileged")
+ if len(scc["users"]) == 0:
+ return
+ for item in scc["users"]:
+ comp = self.comp(item)
+ if comp != None:
+ self.addFactsFor(comp, "sccs", "privileged", dict())
+
+ def factsForClusterRoleBindings(self, namespace):
+ self.defaultKeysFor("clusterrolebindings")
+ role = self.oc("get", "clusterrolebindings", name="cluster-readers")
+ if "subjects" not in role or len(role["subjects"]) == 0:
+ return
+ for item in role["subjects"]:
+ comp = self.comp(item["name"])
+ if comp != None and namespace == item["namespace"]:
+ self.addFactsFor(comp, "clusterrolebindings", "cluster-readers", dict())
+
+# this needs to end up nested under the service account...
+ def factsForRoleBindings(self, namespace):
+ self.defaultKeysFor("rolebindings")
+ role = self.oc("get", "rolebindings", namespace=namespace, name="logging-elasticsearch-view-role")
+ if "subjects" not in role or len(role["subjects"]) == 0:
+ return
+ for item in role["subjects"]:
+ comp = self.comp(item["name"])
+ if comp != None and namespace == item["namespace"]:
+ self.addFactsFor(comp, "rolebindings", "logging-elasticsearch-view-role", dict())
+
+ def comp(self, name):
+ if name.startswith("logging-curator-ops"):
+ return "curator_ops"
+ elif name.startswith("logging-kibana-ops") or name.startswith("kibana-ops"):
+ return "kibana_ops"
+ elif name.startswith("logging-es-ops") or name.startswith("logging-elasticsearch-ops"):
+ return "elasticsearch_ops"
+ elif name.startswith("logging-curator"):
+ return "curator"
+ elif name.startswith("logging-kibana") or name.startswith("kibana"):
+ return "kibana"
+ elif name.startswith("logging-es") or name.startswith("logging-elasticsearch"):
+ return "elasticsearch"
+ elif name.startswith("logging-fluentd") or name.endswith("aggregated-logging-fluentd"):
+ return "fluentd"
+ else:
+ return None
+
+ def do(self):
+ self.factsForRoutes(self.namespace)
+ self.factsForDaemonsets(self.namespace)
+ self.factsForDeploymentConfigs(self.namespace)
+ self.factsForServices(self.namespace)
+ self.factsForConfigMaps(self.namespace)
+ self.factsForSCCs(self.namespace)
+ self.factsForOAuthClients(self.namespace)
+ self.factsForClusterRoleBindings(self.namespace)
+ self.factsForRoleBindings(self.namespace)
+ self.factsForSecrets(self.namespace)
+ self.factsForPvcs(self.namespace)
+
+ return self.facts
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ admin_kubeconfig = {"required": True, "type": "str"},
+ oc_bin = {"required": True, "type": "str"},
+ openshift_logging_namespace = {"required": True, "type": "str"}
+ ),
+ supports_check_mode = False
+ )
+ try:
+ cmd = OpenshiftLoggingFacts(module, module.params['oc_bin'], module.params['admin_kubeconfig'],module.params['openshift_logging_namespace'])
+ module.exit_json(
+ ansible_facts = {"openshift_logging_facts": cmd.do() }
+ )
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+from ansible.module_utils.basic import *
+if __name__ == '__main__':
+ main()
diff --git a/roles/openshift_logging/meta/main.yaml b/roles/openshift_logging/meta/main.yaml
new file mode 100644
index 000000000..8bff6cfb7
--- /dev/null
+++ b/roles/openshift_logging/meta/main.yaml
@@ -0,0 +1,3 @@
+---
+dependencies:
+ - { role: openshift_facts }
diff --git a/roles/openshift_logging/tasks/delete_logging.yaml b/roles/openshift_logging/tasks/delete_logging.yaml
new file mode 100644
index 000000000..6e8fc29d0
--- /dev/null
+++ b/roles/openshift_logging/tasks/delete_logging.yaml
@@ -0,0 +1,93 @@
+---
+- name: stop logging
+ include: stop_cluster.yaml
+
+# delete the deployment objects that we had created
+- name: delete logging api objects
+ command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig
+ delete {{ item }} --selector logging-infra -n {{ openshift_logging_namespace }} --ignore-not-found=true
+ with_items:
+ - dc
+ - rc
+ - svc
+ - routes
+ - templates
+ - daemonset
+
+# delete the oauthclient
+- name: delete oauthclient kibana-proxy
+ command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig delete oauthclient kibana-proxy --ignore-not-found=true
+
+# delete any image streams that we may have created
+- name: delete logging is
+ command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig
+ delete is -l logging-infra=support -n {{ openshift_logging_namespace }} --ignore-not-found=true
+
+# delete our old secrets
+- name: delete logging secrets
+ command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig
+ delete secret {{ item }} -n {{ openshift_logging_namespace }} --ignore-not-found=true
+ with_items:
+ - logging-fluentd
+ - logging-elasticsearch
+ - logging-kibana
+ - logging-kibana-proxy
+ - logging-curator
+ ignore_errors: yes
+
+# delete role bindings
+- name: delete rolebindings
+ command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig
+ delete rolebinding {{ item }} -n {{ openshift_logging_namespace }} --ignore-not-found=true
+ with_items:
+ - logging-elasticsearch-view-role
+
+# delete cluster role bindings
+- name: delete cluster role bindings
+ command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig
+ delete clusterrolebindings {{ item }} -n {{ openshift_logging_namespace }} --ignore-not-found=true
+ with_items:
+ - rolebinding-reader
+
+# delete cluster roles
+- name: delete cluster roles
+ command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig
+ delete clusterroles {{ item }} -n {{ openshift_logging_namespace }} --ignore-not-found=true
+ with_items:
+ - rolebinding-reader
+
+# delete our service accounts
+- name: delete service accounts
+ command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig
+ delete serviceaccount {{ item }} -n {{ openshift_logging_namespace }} --ignore-not-found=true
+ with_items:
+ - aggregated-logging-elasticsearch
+ - aggregated-logging-kibana
+ - aggregated-logging-curator
+ - aggregated-logging-fluentd
+
+# delete our roles
+- name: delete roles
+ command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig
+ delete clusterrole {{ item }} -n {{ openshift_logging_namespace }} --ignore-not-found=true
+ with_items:
+ - daemonset-admin
+
+# delete our configmaps
+- name: delete configmaps
+ command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig
+ delete configmap {{ item }} -n {{ openshift_logging_namespace }} --ignore-not-found=true
+ with_items:
+ - logging-curator
+ - logging-elasticsearch
+ - logging-fluentd
diff --git a/roles/openshift_logging/tasks/generate_certs.yaml b/roles/openshift_logging/tasks/generate_certs.yaml
new file mode 100644
index 000000000..161d51055
--- /dev/null
+++ b/roles/openshift_logging/tasks/generate_certs.yaml
@@ -0,0 +1,168 @@
+---
+# we will ensure our secrets and configmaps are set up here first
+- name: Checking for ca.key
+ stat: path="{{generated_certs_dir}}/ca.key"
+ register: ca_key_file
+ check_mode: no
+
+- name: Checking for ca.crt
+ stat: path="{{generated_certs_dir}}/ca.crt"
+ register: ca_cert_file
+ check_mode: no
+
+- name: Checking for ca.serial.txt
+ stat: path="{{generated_certs_dir}}/ca.serial.txt"
+ register: ca_serial_file
+ check_mode: no
+
+- name: Generate certificates
+ command: >
+ {{ openshift.common.admin_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig ca create-signer-cert
+ --key={{generated_certs_dir}}/ca.key --cert={{generated_certs_dir}}/ca.crt
+ --serial={{generated_certs_dir}}/ca.serial.txt --name=logging-signer-test
+ check_mode: no
+ when:
+ - not ca_key_file.stat.exists
+ - not ca_cert_file.stat.exists
+ - not ca_serial_file.stat.exists
+
+- name: Checking for signing.conf
+ stat: path="{{generated_certs_dir}}/signing.conf"
+ register: signing_conf_file
+ check_mode: no
+
+- block:
+ - copy: src=signing.conf dest={{generated_certs_dir}}/signing.conf
+ check_mode: no
+
+ - lineinfile: "dest={{generated_certs_dir}}/signing.conf regexp='# Top dir$' line='dir = {{generated_certs_dir}} # Top dir'"
+ check_mode: no
+ when:
+ - not signing_conf_file.stat.exists
+
+- include: procure_server_certs.yaml
+ loop_control:
+ loop_var: cert_info
+ with_items:
+ - procure_component: kibana
+ - procure_component: kibana-ops
+ - procure_component: kibana-internal
+ hostnames: "kibana, kibana-ops, {{openshift_logging_kibana_hostname}}, {{openshift_logging_kibana_ops_hostname}}"
+
+# - include: procure_server_certs.yaml
+# vars:
+# - procure_component: kibana
+
+# - include: procure_server_certs.yaml
+# vars:
+# - procure_component: kibana-ops
+
+# - include: procure_server_certs.yaml
+# vars:
+# - procure_component: kibana-internal
+# - hostnames: "kibana, kibana-ops, {{openshift_logging_kibana_hostname}}, {{openshift_logging_kibana_ops_hostname}}"
+
+- name: Copy proxy TLS configuration file
+ copy: src=server-tls.json dest={{generated_certs_dir}}/server-tls.json
+ when: server_tls_json is undefined
+ check_mode: no
+
+- name: Copy proxy TLS configuration file
+ copy: content="{{server_tls_json}}" dest={{generated_certs_dir}}/server-tls.json
+ when: server_tls_json is defined
+ check_mode: no
+
+- name: Checking for ca.db
+ stat: path="{{generated_certs_dir}}/ca.db"
+ register: ca_db_file
+ check_mode: no
+
+- copy: content="" dest={{generated_certs_dir}}/ca.db
+ check_mode: no
+ when:
+ - not ca_db_file.stat.exists
+
+- name: Checking for ca.crt.srl
+ stat: path="{{generated_certs_dir}}/ca.crt.srl"
+ register: ca_cert_srl_file
+ check_mode: no
+
+- copy: content="" dest={{generated_certs_dir}}/ca.crt.srl
+ check_mode: no
+ when:
+ - not ca_cert_srl_file.stat.exists
+
+- name: Generate PEM certs
+ include: generate_pems.yaml component={{node_name}}
+ with_items:
+ - system.logging.fluentd
+ - system.logging.kibana
+ - system.logging.curator
+ - system.admin
+ loop_control:
+ loop_var: node_name
+
+- shell: certs=""; for cert in $(echo logging-es{,-ops}); do certs=$certs,dns:$cert; done; echo $certs
+ register: elasticsearch_certs
+ check_mode: no
+
+- shell: certs=""; for cert in $(echo logging-es{,-ops}{,-cluster}{,.logging.svc.cluster.local}); do certs=$certs,dns:$cert; done; echo $certs
+ register: logging_es_certs
+ check_mode: no
+
+#- shell: index=2; certs=""; for cert in $(echo logging-es{,-ops}); do certs=$certs,DNS.$index=$cert; index=$(($index+1)); done; echo $certs
+# register: elasticsearch_certs
+# check_mode: no
+
+#- shell: index=2; certs=""; for cert in $(echo logging-es{,-ops}{,-cluster}{,.logging.svc.cluster.local}); do certs=$certs,DNS.$index=$cert; index=$(($index+1)); done; echo $certs
+# register: logging_es_certs
+# check_mode: no
+
+- name: Generate PKCS12 chains
+# include: generate_pkcs12.yaml component='system.admin'
+ include: generate_jks_chain.yaml component='system.admin'
+
+- name: Generate PKCS12 chains
+# include: generate_pkcs12.yaml component={{node.name}} oid={{node.oid | default(False)}} chain_certs={{node.certs}}
+ include: generate_jks_chain.yaml component={{node.name}} oid={{node.oid | default(False)}} chain_certs={{node.certs}}
+ with_items:
+ - {name: 'elasticsearch', oid: True, certs: '{{elasticsearch_certs.stdout}}'}
+ - {name: 'logging-es', certs: '{{logging_es_certs.stdout}}'}
+ loop_control:
+ loop_var: node
+# This should be handled within the ES image instead... ---
+#- name: Copy jks script
+# copy:
+# src: generate-jks.sh
+# dest: "{{etcd_generated_certs_dir}}/logging"
+
+#- name: Generate JKS chains
+# template:
+# src: job.j2
+# dest: "{{mktemp.stdout}}/jks_job.yaml"
+
+#- name: kick off job
+# shell: >
+# {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create -f {{mktemp.stdout}}/jks_job.yaml -n {{logging_namespace}}
+# register: podoutput
+
+#- shell: >
+# echo {{podoutput.stdout}} | awk -v podname='\\\".*\\\"' '{print $2}'
+# register: podname
+
+#- action: shell >
+# {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig oc get pod/{{podname.stdout}} -o go-template='{{ '{{' }}index .status "phase"{{ '}}' }}' -n {{logging_namespace}}
+# register: result
+# until: result.stdout.find("Succeeded") != -1
+# retries: 5
+# delay: 10
+# --- This should be handled within the ES image instead...
+- name: Generate proxy session
+ shell: tr -dc 'a-zA-Z0-9' < /dev/urandom | head -c 200
+ register: session_secret
+ check_mode: no
+
+- name: Generate oauth client secret
+ shell: tr -dc 'a-zA-Z0-9' < /dev/urandom | head -c 64
+ register: oauth_secret
+ check_mode: no
diff --git a/roles/openshift_logging/tasks/generate_clusterrolebindings.yaml b/roles/openshift_logging/tasks/generate_clusterrolebindings.yaml
new file mode 100644
index 000000000..ffd5f1e00
--- /dev/null
+++ b/roles/openshift_logging/tasks/generate_clusterrolebindings.yaml
@@ -0,0 +1,12 @@
+---
+- name: Generate ClusterRoleBindings
+ template: src=clusterrolebinding.j2 dest={{mktemp.stdout}}/templates/logging-15-{{obj_name}}-clusterrolebinding.yaml
+ vars:
+ acct_name: aggregated-logging-elasticsearch
+ obj_name: rolebinding-reader
+ crb_usernames: ["system:serviceaccount:{{openshift_logging_namespace}}:{{acct_name}}"]
+ subjects:
+ - kind: ServiceAccount
+ name: "{{acct_name}}"
+ namespace: "{{openshift_logging_namespace}}"
+ check_mode: no
diff --git a/roles/openshift_logging/tasks/generate_clusterroles.yaml b/roles/openshift_logging/tasks/generate_clusterroles.yaml
new file mode 100644
index 000000000..8b0ef377a
--- /dev/null
+++ b/roles/openshift_logging/tasks/generate_clusterroles.yaml
@@ -0,0 +1,10 @@
+---
+- name: Generate ClusterRole for cluster-reader
+ template: src=clusterrole.j2 dest={{mktemp.stdout}}/templates/logging-10-{{obj_name}}-clusterrole.yaml
+ vars:
+ obj_name: rolebinding-reader
+ rules:
+ - resources: [clusterrolebindings]
+ verbs:
+ - get
+ check_mode: no
diff --git a/roles/openshift_logging/tasks/generate_configmaps.yaml b/roles/openshift_logging/tasks/generate_configmaps.yaml
new file mode 100644
index 000000000..86882a5da
--- /dev/null
+++ b/roles/openshift_logging/tasks/generate_configmaps.yaml
@@ -0,0 +1,103 @@
+---
+- block:
+ - copy:
+ src: elasticsearch-logging.yml
+ dest: "{{mktemp.stdout}}/elasticsearch-logging.yml"
+ when: es_logging_contents is undefined
+
+ - copy:
+ src: elasticsearch.yml
+ dest: "{{mktemp.stdout}}/elasticsearch.yml"
+ when: es_config_contents is undefined
+
+ - lineinfile:
+ dest: "{{mktemp.stdout}}/elasticsearch.yml"
+ regexp: '^openshift\.operations\.allow_cluster_reader(.)*$'
+ line: "\nopenshift.operations.allow_cluster_reader: {{openshift_logging_es_ops_allow_cluster_reader | lower}}"
+ when: es_config_contents is undefined
+
+ - copy:
+ content: "{{es_logging_contents}}"
+ dest: "{{mktemp.stdout}}/elasticsearch-logging.yml"
+ when: es_logging_contents is defined
+
+ - copy:
+ content: "{{es_config_contents}}"
+ dest: "{{mktemp.stdout}}/elasticsearch.yml"
+ when: es_config_contents is defined
+
+ - shell: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create configmap logging-elasticsearch
+ --from-file=logging.yml={{mktemp.stdout}}/elasticsearch-logging.yml --from-file=elasticsearch.yml={{mktemp.stdout}}/elasticsearch.yml -o yaml --dry-run
+ register: es_configmap
+
+ - copy:
+ content: "{{es_configmap.stdout}}"
+ dest: "{{mktemp.stdout}}/templates/logging-elasticsearch-configmap.yaml"
+ when: es_configmap.stdout is defined
+ check_mode: no
+
+- block:
+ - copy:
+ src: curator.yml
+ dest: "{{mktemp.stdout}}/curator.yml"
+ when: curator_config_contents is undefined
+
+ - copy:
+ content: "{{curator_config_contents}}"
+ dest: "{{mktemp.stdout}}/curator.yml"
+ when: curator_config_contenets is defined
+
+ - shell: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create configmap logging-curator
+ --from-file=config.yaml={{mktemp.stdout}}/curator.yml -o yaml --dry-run
+ register: curator_configmap
+
+ - copy:
+ content: "{{curator_configmap.stdout}}"
+ dest: "{{mktemp.stdout}}/templates/logging-curator-configmap.yaml"
+ when: curator_configmap.stdout is defined
+ check_mode: no
+
+- block:
+ - copy:
+ src: fluent.conf
+ dest: "{{mktemp.stdout}}/fluent.conf"
+ when: fluentd_config_contents is undefined
+
+ - copy:
+ src: fluentd-throttle-config.yaml
+ dest: "{{mktemp.stdout}}/fluentd-throttle-config.yaml"
+ when: fluentd_throttle_contents is undefined
+
+ - copy:
+ src: secure-forward.conf
+ dest: "{{mktemp.stdout}}/secure-forward.conf"
+ when: fluentd_securefoward_contents is undefined
+
+ - copy:
+ content: "{{fluentd_config_contents}}"
+ dest: "{{mktemp.stdout}}/fluent.conf"
+ when: fluentd_config_contents is defined
+
+ - copy:
+ content: "{{fluentd_throttle_contents}}"
+ dest: "{{mktemp.stdout}}/fluentd-throttle-config.yaml"
+ when: fluentd_throttle_contents is defined
+
+ - copy:
+ content: "{{fluentd_secureforward_contents}}"
+ dest: "{{mktemp.stdout}}/secure-forward.conf"
+ when: fluentd_secureforward_contents is defined
+
+ - shell: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create configmap logging-fluentd
+ --from-file=fluent.conf={{mktemp.stdout}}/fluent.conf --from-file=throttle-config.yaml={{mktemp.stdout}}/fluentd-throttle-config.yaml
+ --from-file=secure-forward.conf={{mktemp.stdout}}/secure-forward.conf -o yaml --dry-run
+ register: fluentd_configmap
+
+ - copy:
+ content: "{{fluentd_configmap.stdout}}"
+ dest: "{{mktemp.stdout}}/templates/logging-fluentd-configmap.yaml"
+ when: fluentd_configmap.stdout is defined
+ check_mode: no
diff --git a/roles/openshift_logging/tasks/generate_deploymentconfigs.yaml b/roles/openshift_logging/tasks/generate_deploymentconfigs.yaml
new file mode 100644
index 000000000..151cafd9d
--- /dev/null
+++ b/roles/openshift_logging/tasks/generate_deploymentconfigs.yaml
@@ -0,0 +1,59 @@
+---
+- name: Generate kibana deploymentconfig
+ template: src=kibana.j2 dest={{mktemp.stdout}}/logging-kibana-dc.yaml
+ vars:
+ component: kibana
+ deploy_name: "logging-{{component}}"
+ image: "{{openshift_logging_image_prefix}}logging-kibana:{{openshift_logging_image_version}}"
+ proxy_image: "{{openshift_logging_image_prefix}}logging-auth-proxy:{{openshift_logging_image_version}}"
+ es_host: logging-es
+ es_port: "{{openshift_logging_es_port}}"
+ check_mode: no
+
+- name: Generate OPS kibana deploymentconfig
+ template: src=kibana.j2 dest={{mktemp.stdout}}/logging-kibana-ops-dc.yaml
+ vars:
+ component: kibana-ops
+ deploy_name: "logging-{{component}}"
+ image: "{{openshift_logging_image_prefix}}logging-kibana:{{openshift_logging_image_version}}"
+ proxy_image: "{{openshift_logging_image_prefix}}logging-auth-proxy:{{openshift_logging_image_version}}"
+ es_host: logging-es-ops
+ es_port: "{{openshift_logging_es_ops_port}}"
+ check_mode: no
+
+- name: Generate elasticsearch deploymentconfig
+ template: src=es.j2 dest={{mktemp.stdout}}/logging-es-dc.yaml
+ vars:
+ component: es
+ deploy_name_prefix: "logging-{{component}}"
+ deploy_name: "{{deploy_name_prefix}}-abc123"
+ image: "{{openshift_logging_image_prefix}}logging-elasticsearch:{{openshift_logging_image_version}}"
+ es_cluster_name: "{{component}}"
+ check_mode: no
+
+- name: Generate OPS elasticsearch deploymentconfig
+ template: src=es.j2 dest={{mktemp.stdout}}/logging-es-ops-dc.yaml
+ vars:
+ component: es-ops
+ deploy_name_prefix: "logging-{{component}}"
+ deploy_name: "{{deploy_name_prefix}}-abc123"
+ image: "{{openshift_logging_image_prefix}}logging-elasticsearch:{{openshift_logging_image_version}}"
+ es_cluster_name: "{{component}}"
+ check_mode: no
+
+- name: Generate curator deploymentconfig
+ template: src=curator.j2 dest={{mktemp.stdout}}/logging-curator-dc.yaml
+ vars:
+ component: curator
+ deploy_name: "logging-{{component}}"
+ image: "{{openshift_logging_image_prefix}}logging-curator:{{openshift_logging_image_version}}"
+ check_mode: no
+
+- name: Generate OPS curator deploymentconfig
+ template: src=curator.j2 dest={{mktemp.stdout}}/logging-curator-ops-dc.yaml
+ vars:
+ component: curator-ops
+ deploy_name: "logging-{{component}}"
+ image: "{{openshift_logging_image_prefix}}logging-curator:{{openshift_logging_image_version}}"
+ openshift_logging_es_host: logging-es-ops
+ check_mode: no
diff --git a/roles/openshift_logging/tasks/generate_jks_chain.yaml b/roles/openshift_logging/tasks/generate_jks_chain.yaml
new file mode 100644
index 000000000..14ffdc51f
--- /dev/null
+++ b/roles/openshift_logging/tasks/generate_jks_chain.yaml
@@ -0,0 +1,60 @@
+---
+- debug: msg="certs are {{chain_certs}} and oid is {{oid}}"
+ when: chain_certs is defined and oid is defined
+
+- debug: msg="certs are {{chain_certs}}"
+ when: chain_certs is defined and oid is undefined
+
+- name: Build extensions with certs
+ shell: echo "{{chain_certs}}{{ (oid) | ternary(',oid:1.2.3.4.5.5','') }}"
+ register: cert_ext
+ when: chain_certs is defined and oid is defined
+ check_mode: no
+
+- debug: msg="extensions are {{cert_ext.stdout}}"
+ when: cert_ext.stdout is defined
+
+- shell: >
+ echo {{ (cert_ext.stdout is defined) | ternary( '-ext san=dns:localhost,ip:127.0.0.1','') }}{{ (cert_ext.stdout is defined) | ternary( cert_ext.stdout, '') }}
+ register: extensions
+ check_mode: no
+
+- name: Checking for {{component}}.jks ...
+ stat: path="{{generated_certs_dir}}/{{component}}.jks"
+ register: jks_file
+ check_mode: no
+
+- name: Checking for truststore...
+ stat: path="{{generated_certs_dir}}/truststore.jks"
+ register: jks_truststore
+ check_mode: no
+
+- block:
+ - shell: >
+ keytool -genkey -alias {{component}} -keystore {{generated_certs_dir}}/{{component}}.jks -keypass kspass -storepass kspass
+ -keyalg RSA -keysize 2048 -validity 712 -dname "CN={{component}}, OU=OpenShift, O=Logging" {{extensions.stdout}}
+
+ - shell: >
+ keytool -certreq -alias {{component}} -keystore {{generated_certs_dir}}/{{component}}.jks -storepass kspass
+ -file {{generated_certs_dir}}/{{component}}-jks.csr -keyalg RSA -dname "CN={{component}}, OU=OpenShift, O=Logging" {{extensions.stdout}}
+
+ - shell: >
+ openssl ca -in {{generated_certs_dir}}/{{component}}-jks.csr -notext -out {{generated_certs_dir}}/{{component}}-jks.crt
+ -config {{generated_certs_dir}}/signing.conf -extensions v3_req -batch -extensions server_ext
+
+ - shell: >
+ keytool -import -file {{generated_certs_dir}}/ca.crt -keystore {{generated_certs_dir}}/{{component}}.jks
+ -storepass kspass -noprompt -alias sig-ca
+
+ - shell: >
+ keytool -import -file {{generated_certs_dir}}/{{component}}-jks.crt -keystore {{generated_certs_dir}}/{{component}}.jks
+ -storepass kspass -noprompt -alias {{component}}
+
+ when: not jks_file.stat.exists
+ check_mode: no
+
+- block:
+ - shell: >
+ keytool -import -file {{generated_certs_dir}}/ca.crt -keystore {{generated_certs_dir}}/truststore.jks -storepass tspass -noprompt -alias sig-ca
+ when: not jks_truststore.stat.exists
+ check_mode: no
diff --git a/roles/openshift_logging/tasks/generate_pems.yaml b/roles/openshift_logging/tasks/generate_pems.yaml
new file mode 100644
index 000000000..289b72ea6
--- /dev/null
+++ b/roles/openshift_logging/tasks/generate_pems.yaml
@@ -0,0 +1,36 @@
+---
+- name: Checking for {{component}}.key
+ stat: path="{{generated_certs_dir}}/{{component}}.key"
+ register: key_file
+ check_mode: no
+
+- name: Checking for {{component}}.crt
+ stat: path="{{generated_certs_dir}}/{{component}}.crt"
+ register: cert_file
+ check_mode: no
+
+- name: Creating cert req for {{component}}
+ command: >
+ openssl req -out {{generated_certs_dir}}/{{component}}.csr -new -newkey rsa:2048 -keyout {{generated_certs_dir}}/{{component}}.key
+ -subj "/CN={{component}}/OU=OpenShift/O=Logging/subjectAltName=DNS.1=localhost{{cert_ext.stdout}}" -days 712 -nodes
+ when:
+ - not key_file.stat.exists
+ - cert_ext.stdout is defined
+ check_mode: no
+
+- name: Creating cert req for {{component}}
+ command: >
+ openssl req -out {{generated_certs_dir}}/{{component}}.csr -new -newkey rsa:2048 -keyout {{generated_certs_dir}}/{{component}}.key
+ -subj "/CN={{component}}/OU=OpenShift/O=Logging" -days 712 -nodes
+ when:
+ - not key_file.stat.exists
+ - cert_ext.stdout is undefined
+ check_mode: no
+
+- name: Sign cert request with CA for {{component}}
+ command: >
+ openssl ca -in {{generated_certs_dir}}/{{component}}.csr -notext -out {{generated_certs_dir}}/{{component}}.crt
+ -config {{generated_certs_dir}}/signing.conf -extensions v3_req -batch -extensions server_ext
+ when:
+ - not cert_file.stat.exists
+ check_mode: no
diff --git a/roles/openshift_logging/tasks/generate_pkcs12.yaml b/roles/openshift_logging/tasks/generate_pkcs12.yaml
new file mode 100644
index 000000000..dde65746f
--- /dev/null
+++ b/roles/openshift_logging/tasks/generate_pkcs12.yaml
@@ -0,0 +1,24 @@
+---
+- debug: msg="certs are {{chain_certs}} and oid is {{oid}}"
+ when: chain_certs is defined and oid is defined
+
+- debug: msg="certs are {{chain_certs}}"
+ when: chain_certs is defined and oid is undefined
+
+- name: Build extensions with certs
+ shell: echo "{{chain_certs}}{{ (oid) | ternary(',oid=1.2.3.4.5.5','') }}"
+ register: cert_ext
+ when: chain_certs is defined and oid is defined
+
+- debug: msg="extensions are {{cert_ext.stdout}}"
+ when: cert_ext.stdout is defined
+
+- include: generate_pems.yaml
+
+- local_action: stat path="{{mktemp.stdout}}/{{component}}.pkcs12"
+ register: pkcs_file
+ become: no
+
+- name: Generating pkcs12 chain for {{component}}
+ command: openssl pkcs12 -export -out {{generated_certs_dir}}/{{component}}.pkcs12 -inkey {{generated_certs_dir}}/{{component}}.key -in {{generated_certs_dir}}/{{component}}.crt -password pass:pass
+ when: not pkcs_file.stat.exists
diff --git a/roles/openshift_logging/tasks/generate_pvcs.yaml b/roles/openshift_logging/tasks/generate_pvcs.yaml
new file mode 100644
index 000000000..ee4416bbd
--- /dev/null
+++ b/roles/openshift_logging/tasks/generate_pvcs.yaml
@@ -0,0 +1,47 @@
+---
+- name: Init pool of PersistentVolumeClaim names
+ set_fact: es_pvc_pool={{es_pvc_pool|default([]) + [pvc_name]}}
+ vars:
+ pvc_name: "{{openshift_logging_es_pvc_prefix}}-{{item| int}}"
+ start: "{{es_pvc_names | map('regex_search',openshift_logging_es_pvc_prefix+'.*')|select('string')|list|length}}"
+ with_sequence: start={{start}} end={{ (start|int > openshift_logging_es_cluster_size - 1) | ternary(start, openshift_logging_es_cluster_size - 1)}}
+ when:
+ - openshift_logging_es_pvc_size | search('^\d.*')
+ - "{{ es_dc_names|default([]) | length < openshift_logging_es_cluster_size }}"
+ check_mode: no
+
+- name: Generating PersistentVolumeClaims
+ template: src=pvc.j2 dest={{mktemp.stdout}}/templates/logging-{{obj_name}}-pvc.yaml
+ vars:
+ obj_name: "{{claim_name}}"
+ size: "{{openshift_logging_es_pvc_size}}"
+ access_modes:
+ - ReadWriteOnce
+ pv_selector: "{{openshift_logging_es_pv_selector}}"
+ with_items:
+ - "{{es_pvc_pool | default([])}}"
+ loop_control:
+ loop_var: claim_name
+ when:
+ - not openshift_logging_es_pvc_dynamic
+ - es_pvc_pool is defined
+ check_mode: no
+
+- name: Generating PersistentVolumeClaims - Dynamic
+ template: src=pvc.j2 dest={{mktemp.stdout}}/templates/logging-{{obj_name}}-pvc.yaml
+ vars:
+ obj_name: "{{claim_name}}"
+ annotations:
+ volume.alpha.kubernetes.io/storage-class: "dynamic"
+ size: "{{openshift_logging_es_pvc_size}}"
+ access_modes:
+ - ReadWriteOnce
+ pv_selector: "{{openshift_logging_es_pv_selector}}"
+ with_items:
+ - "{{es_pvc_pool|default([])}}"
+ loop_control:
+ loop_var: claim_name
+ when:
+ - openshift_logging_es_pvc_dynamic
+ - es_pvc_pool is defined
+ check_mode: no
diff --git a/roles/openshift_logging/tasks/generate_rolebindings.yaml b/roles/openshift_logging/tasks/generate_rolebindings.yaml
new file mode 100644
index 000000000..02f81368d
--- /dev/null
+++ b/roles/openshift_logging/tasks/generate_rolebindings.yaml
@@ -0,0 +1,11 @@
+---
+- name: Generate RoleBindings
+ template: src=rolebinding.j2 dest={{mktemp.stdout}}/templates/logging-{{obj_name}}-rolebinding.yaml
+ vars:
+ obj_name: logging-elasticsearch-view-role
+ roleRef:
+ name: view
+ subjects:
+ - kind: ServiceAccount
+ name: aggregated-logging-elasticsearch
+ check_mode: no
diff --git a/roles/openshift_logging/tasks/generate_routes.yaml b/roles/openshift_logging/tasks/generate_routes.yaml
new file mode 100644
index 000000000..d280ac04c
--- /dev/null
+++ b/roles/openshift_logging/tasks/generate_routes.yaml
@@ -0,0 +1,20 @@
+---
+- name: Generating logging routes
+ template: src=route_reencrypt.j2 dest={{mktemp.stdout}}/templates/logging-{{route_info.name}}-route.yaml
+ tags: routes
+ vars:
+ obj_name: "{{route_info.name}}"
+ route_host: "{{route_info.host}}"
+ service_name: "{{route_info.name}}"
+ tls_ca_cert: "{{key_pairs | entry_from_named_pair('ca_file')| b64decode }}"
+ tls_dest_ca_cert: "{{key_pairs | entry_from_named_pair('ca_file')| b64decode }}"
+ labels:
+ component: support
+ logging-infra: support
+ provider: openshift
+ with_items:
+ - {name: logging-kibana, host: "{{openshift_logging_kibana_hostname}}"}
+ - {name: logging-kibana-ops, host: "{{openshift_logging_kibana_ops_hostname}}"}
+ loop_control:
+ loop_var: route_info
+ when: (route_info.name == 'logging-kibana-ops' and openshift_logging_use_ops) or route_info.name == 'logging-kibana'
diff --git a/roles/openshift_logging/tasks/generate_secrets.yaml b/roles/openshift_logging/tasks/generate_secrets.yaml
new file mode 100644
index 000000000..e20b88c0f
--- /dev/null
+++ b/roles/openshift_logging/tasks/generate_secrets.yaml
@@ -0,0 +1,73 @@
+---
+- name: Retrieving the cert to use when generating secrets for the logging components
+ slurp: src="{{generated_certs_dir}}/{{item.file}}"
+ register: key_pairs
+ with_items:
+ - { name: "ca_file", file: "ca.crt" }
+ - { name: "kibana_key", file: "system.logging.kibana.key"}
+ - { name: "kibana_cert", file: "system.logging.kibana.crt"}
+ - { name: "curator_key", file: "system.logging.curator.key"}
+ - { name: "curator_cert", file: "system.logging.curator.crt"}
+ - { name: "fluentd_key", file: "system.logging.fluentd.key"}
+ - { name: "fluentd_cert", file: "system.logging.fluentd.crt"}
+ - { name: "kibana_internal_key", file: "kibana-internal.key"}
+ - { name: "kibana_internal_cert", file: "kibana-internal.crt"}
+ - { name: "server_tls", file: "server-tls.json"}
+
+- name: Generating secrets for logging components
+ template: src=secret.j2 dest={{mktemp.stdout}}/templates/{{secret_name}}-secret.yaml
+ vars:
+ secret_name: logging-{{component}}
+ secret_key_file: "{{component}}_key"
+ secret_cert_file: "{{component}}_cert"
+ secrets:
+ - {key: ca, value: "{{key_pairs | entry_from_named_pair('ca_file')| b64decode }}"}
+ - {key: key, value: "{{key_pairs | entry_from_named_pair(secret_key_file)| b64decode }}"}
+ - {key: cert, value: "{{key_pairs | entry_from_named_pair(secret_cert_file)| b64decode }}"}
+ secret_keys: ["ca", "cert", "key"]
+ with_items:
+ - kibana
+ - curator
+ - fluentd
+ loop_control:
+ loop_var: component
+ when: secret_name not in openshift_logging_facts.{{component}}.secrets or
+ secret_keys | difference(openshift_logging_facts.{{component}}.secrets["{{secret_name}}"]["keys"]) | length != 0
+ check_mode: no
+
+- name: Generating secrets for kibana proxy
+ template: src=secret.j2 dest={{mktemp.stdout}}/templates/{{secret_name}}-secret.yaml
+ vars:
+ secret_name: logging-kibana-proxy
+ secrets:
+ - {key: oauth-secret, value: "{{oauth_secret.stdout}}"}
+ - {key: session-secret, value: "{{session_secret.stdout}}"}
+ - {key: server-key, value: "{{kibana_key_file}}"}
+ - {key: server-cert, value: "{{kibana_cert_file}}"}
+ - {key: server-tls, value: "{{server_tls_file}}"}
+ secret_keys: ["server-tls.json", "server-key", "session-secret", "oauth-secret", "server-cert"]
+ kibana_key_file: "{{key_pairs | entry_from_named_pair('kibana_internal_key')| b64decode }}"
+ kibana_cert_file: "{{key_pairs | entry_from_named_pair('kibana_internal_cert')| b64decode }}"
+ server_tls_file: "{{key_pairs | entry_from_named_pair('server_tls')| b64decode }}"
+ when: secret_name not in openshift_logging_facts.kibana.secrets or
+ secret_keys | difference(openshift_logging_facts.kibana.secrets["{{secret_name}}"]["keys"]) | length != 0
+ check_mode: no
+
+- name: Generating secrets for elasticsearch
+ command: >
+ {{openshift.common.client_binary}} --config={{ mktemp.stdout }}/admin.kubeconfig secrets new {{secret_name}}
+ key={{generated_certs_dir}}/logging-es.jks truststore={{generated_certs_dir}}/truststore.jks
+ searchguard.key={{generated_certs_dir}}/elasticsearch.jks searchguard.truststore={{generated_certs_dir}}/truststore.jks
+ admin-key={{generated_certs_dir}}/system.admin.key admin-cert={{generated_certs_dir}}/system.admin.crt
+ admin-ca={{generated_certs_dir}}/ca.crt admin.jks={{generated_certs_dir}}/system.admin.jks -o yaml
+ vars:
+ secret_name: logging-elasticsearch
+ secret_keys: ["admin-cert", "searchguard.key", "admin-ca", "key", "truststore", "admin-key"]
+ register: logging_es_secret
+ when: secret_name not in openshift_logging_facts.elasticsearch.secrets or
+ secret_keys | difference(openshift_logging_facts.elasticsearch.secrets["{{secret_name}}"]["keys"]) | length != 0
+ check_mode: no
+
+- copy: content="{{logging_es_secret.stdout}}" dest={{mktemp.stdout}}/templates/logging-elasticsearch-secret.yaml
+ when: logging_es_secret.stdout is defined
+ check_mode: no
diff --git a/roles/openshift_logging/tasks/generate_serviceaccounts.yaml b/roles/openshift_logging/tasks/generate_serviceaccounts.yaml
new file mode 100644
index 000000000..7b956e2e0
--- /dev/null
+++ b/roles/openshift_logging/tasks/generate_serviceaccounts.yaml
@@ -0,0 +1,13 @@
+---
+- name: Generating serviceaccounts
+ template: src=serviceaccount.j2 dest={{mktemp.stdout}}/templates/logging-{{component}}-sa.yaml
+ vars:
+ obj_name: aggregated-logging-{{component}}
+ with_items:
+ - elasticsearch
+ - kibana
+ - fluentd
+ - curator
+ loop_control:
+ loop_var: component
+ check_mode: no
diff --git a/roles/openshift_logging/tasks/generate_services.yaml b/roles/openshift_logging/tasks/generate_services.yaml
new file mode 100644
index 000000000..95f113577
--- /dev/null
+++ b/roles/openshift_logging/tasks/generate_services.yaml
@@ -0,0 +1,81 @@
+---
+- name: Generating logging-es service
+ template: src=service.j2 dest={{mktemp.stdout}}/templates/logging-es-svc.yaml
+ vars:
+ obj_name: logging-es
+ ports:
+ - {port: 9200, targetPort: restapi}
+ labels:
+ logging-infra: support
+ selector:
+ provider: openshift
+ component: es
+ check_mode: no
+
+- name: Generating logging-es-cluster service
+ template: src=service.j2 dest={{mktemp.stdout}}/templates/logging-es-cluster-svc.yaml
+ vars:
+ obj_name: logging-es-cluster
+ ports:
+ - {port: 9300}
+ labels:
+ logging-infra: support
+ selector:
+ provider: openshift
+ component: es
+ check_mode: no
+
+- name: Generating logging-kibana service
+ template: src=service.j2 dest={{mktemp.stdout}}/templates/logging-kibana-svc.yaml
+ vars:
+ obj_name: logging-kibana
+ ports:
+ - {port: 443, targetPort: oaproxy}
+ labels:
+ logging-infra: support
+ selector:
+ provider: openshift
+ component: kibana
+ check_mode: no
+
+- name: Generating logging-es-ops service
+ template: src=service.j2 dest={{mktemp.stdout}}/templates/logging-es-ops-svc.yaml
+ vars:
+ obj_name: logging-es-ops
+ ports:
+ - {port: 9200, targetPort: restapi}
+ labels:
+ logging-infra: support
+ selector:
+ provider: openshift
+ component: es-ops
+ when: openshift_logging_use_ops
+ check_mode: no
+
+- name: Generating logging-es-ops-cluster service
+ template: src=service.j2 dest={{mktemp.stdout}}/templates/logging-es-ops-cluster-svc.yaml
+ vars:
+ obj_name: logging-es-ops-cluster
+ ports:
+ - {port: 9300}
+ labels:
+ logging-infra: support
+ selector:
+ provider: openshift
+ component: es-ops
+ when: openshift_logging_use_ops
+ check_mode: no
+
+- name: Generating logging-kibana-ops service
+ template: src=service.j2 dest={{mktemp.stdout}}/templates/logging-kibana-ops-svc.yaml
+ vars:
+ obj_name: logging-kibana-ops
+ ports:
+ - {port: 443, targetPort: oaproxy}
+ labels:
+ logging-infra: support
+ selector:
+ provider: openshift
+ component: kibana-ops
+ when: openshift_logging_use_ops
+ check_mode: no
diff --git a/roles/openshift_logging/tasks/install_curator.yaml b/roles/openshift_logging/tasks/install_curator.yaml
new file mode 100644
index 000000000..165a9d14e
--- /dev/null
+++ b/roles/openshift_logging/tasks/install_curator.yaml
@@ -0,0 +1,27 @@
+---
+- name: Generate curator deploymentconfig
+ template: src=curator.j2 dest={{mktemp.stdout}}/templates/logging-curator-dc.yaml
+ vars:
+ component: curator
+ logging_component: curator
+ deploy_name: "logging-{{component}}"
+ image: "{{openshift_logging_image_prefix}}logging-curator:{{openshift_logging_image_version}}"
+ es_host: logging-es
+ es_port: "{{openshift_logging_es_port}}"
+ curator_cpu_limit: "{{openshift_logging_curator_cpu_limit }}"
+ curator_memory_limit: "{{openshift_logging_curator_memory_limit }}"
+ check_mode: no
+
+- name: Generate OPS curator deploymentconfig
+ template: src=curator.j2 dest={{mktemp.stdout}}/templates/logging-curator-ops-dc.yaml
+ vars:
+ component: curator-ops
+ logging_component: curator
+ deploy_name: "logging-{{component}}"
+ image: "{{openshift_logging_image_prefix}}logging-curator:{{openshift_logging_image_version}}"
+ es_host: logging-es-ops
+ es_port: "{{openshift_logging_es_ops_port}}"
+ curator_cpu_limit: "{{openshift_logging_curator_ops_cpu_limit }}"
+ curator_memory_limit: "{{openshift_logging_curator_ops_memory_limit }}"
+ when: openshift_logging_use_ops
+ check_mode: no
diff --git a/roles/openshift_logging/tasks/install_elasticsearch.yaml b/roles/openshift_logging/tasks/install_elasticsearch.yaml
new file mode 100644
index 000000000..c5d8d3537
--- /dev/null
+++ b/roles/openshift_logging/tasks/install_elasticsearch.yaml
@@ -0,0 +1,105 @@
+---
+- name: Generate PersistentVolumeClaims
+ include: "{{ role_path}}/tasks/generate_pvcs.yaml"
+ vars:
+ es_pvc_names: "{{openshift_logging_facts.elasticsearch.pvcs.keys()}}"
+ es_dc_names: "{{openshift_logging_facts.elasticsearch.deploymentconfigs.keys()}}"
+ when:
+ - "{{ openshift_logging_facts.elasticsearch.deploymentconfigs.keys() | length < openshift_logging_es_cluster_size }}"
+
+- name: Init pool of DeploymentConfig names for Elasticsearch
+ set_fact: es_dc_pool={{es_dc_pool | default([]) + [deploy_name]}}
+ vars:
+ component: es
+ es_cluster_name: "{{component}}"
+ deploy_name_prefix: "logging-{{component}}"
+ deploy_name: "{{deploy_name_prefix}}-{{'abcdefghijklmnopqrstuvwxyz0123456789'|random_word(8)}}"
+ with_sequence: count={{(openshift_logging_es_cluster_size - openshift_logging_facts.elasticsearch.deploymentconfigs.keys() | length) | abs}}
+ when:
+ - "{{ openshift_logging_facts.elasticsearch.deploymentconfigs.keys() | length < openshift_logging_es_cluster_size }}"
+ check_mode: no
+
+
+- name: Generate Elasticsearch DeploymentConfig
+ template: src=es.j2 dest={{mktemp.stdout}}/templates/logging-{{deploy_name}}-dc.yaml
+ vars:
+ component: es
+ logging_component: elasticsearch
+ deploy_name_prefix: "logging-{{component}}"
+ image: "{{openshift_logging_image_prefix}}logging-elasticsearch:{{openshift_logging_image_version}}"
+ es_cluster_name: "{{component}}"
+ es_cpu_limit: "{{openshift_logging_es_cpu_limit }}"
+ es_memory_limit: "{{openshift_logging_es_memory_limit}}"
+ volume_names: "{{es_pvc_pool | default([])}}"
+ pvc_claim: "{{(volume_names | length > item.0) | ternary(volume_names[item.0], None)}}"
+ deploy_name: "{{item.1}}"
+ with_indexed_items:
+ - "{{es_dc_pool | default([])}}"
+ check_mode: no
+ when:
+ - "{{ openshift_logging_facts.elasticsearch.deploymentconfigs.keys() | length < openshift_logging_es_cluster_size }}"
+
+# --------- Tasks for Operation clusters ---------
+
+- name: Validate Elasticsearch cluster size for Ops
+ fail: msg="The openshift_logging_es_ops_cluster_size may not be scaled down more than 1 less (or 0) the number of Elasticsearch nodes already deployed"
+ vars:
+ es_dcs: "{{openshift_logging_facts.elasticsearch_ops.deploymentconfigs}}"
+ cluster_size: "{{openshift_logging_es_ops_cluster_size}}"
+ when:
+ - openshift_logging_use_ops
+ - "{{es_dcs | length - openshift_logging_es_ops_cluster_size | abs > 1}}"
+ check_mode: no
+
+- name: Generate PersistentVolumeClaims for Ops
+ include: "{{ role_path}}/tasks/generate_pvcs.yaml"
+ vars:
+ es_pvc_names: "{{openshift_logging_facts.elasticsearch_ops.pvcs.keys()}}"
+ es_dc_names: "{{openshift_logging_facts.elasticsearch_ops.deploymentconfigs.keys()}}"
+ openshift_logging_es_pvc_prefix: "{{openshift_logging_es_ops_pvc_prefix}}"
+ openshift_logging_es_cluster_size: "{{openshift_logging_es_ops_cluster_size}}"
+ openshift_logging_es_pvc_size: "{{openshift_logging_es_ops_pvc_size}}"
+ openshift_logging_es_pvc_dynamic: "{{openshift_logging_es_ops_pvc_dynamic}}"
+ openshift_logging_es_pv_selector: "{{openshift_logging_es_ops_pv_selector}}"
+ when:
+ - openshift_logging_use_ops
+ - "{{ openshift_logging_facts.elasticsearch_ops.deploymentconfigs.keys() | length < openshift_logging_es_ops_cluster_size }}"
+ check_mode: no
+
+- name: Init pool of DeploymentConfig names for Elasticsearch for Ops
+ set_fact: es_dc_pool_ops={{es_dc_pool_ops | default([]) + [deploy_name]}}
+ vars:
+ component: es-ops
+ es_cluster_name: "{{component}}"
+ deploy_name_prefix: "logging-{{component}}"
+ deploy_name: "{{deploy_name_prefix}}-{{'abcdefghijklmnopqrstuvwxyz0123456789'|random_word(8)}}"
+ cluster_size: "{{openshift_logging_es_ops_cluster_size}}"
+ with_sequence: count={{openshift_logging_es_ops_cluster_size - openshift_logging_facts.elasticsearch_ops.deploymentconfigs.keys() | length}}
+ when:
+ - openshift_logging_use_ops
+ - "{{ openshift_logging_facts.elasticsearch_ops.deploymentconfigs.keys() | length < openshift_logging_es_ops_cluster_size }}"
+ check_mode: no
+
+- name: Generate Elasticsearch DeploymentConfig for Ops
+ template: src=es.j2 dest={{mktemp.stdout}}/templates/logging-{{deploy_name}}-dc.yaml
+ vars:
+ component: es-ops
+ logging_component: elasticsearch
+ deploy_name_prefix: "logging-{{component}}"
+ image: "{{openshift_logging_image_prefix}}logging-elasticsearch:{{openshift_logging_image_version}}"
+ volume_names: "{{es_pvc_pool | default([])}}"
+ pvc_claim: "{{(volume_names | length > item.0) | ternary(volume_names[item.0], None)}}"
+ deploy_name: "{{item.1}}"
+ es_cluster_name: "{{component}}"
+ es_cpu_limit: "{{openshift_logging_es_ops_cpu_limit }}"
+ es_memory_limit: "{{openshift_logging_es_ops_memory_limit}}"
+ es_node_quorum: "{{es_ops_node_quorum}}"
+ es_recover_after_nodes: "{{es_ops_recover_after_nodes}}"
+ es_recover_expected_nodes: "{{es_ops_recover_expected_nodes}}"
+ openshift_logging_es_recover_after_time: "{{openshift_logging_es_ops_recover_after_time}}"
+ with_indexed_items:
+ - "{{es_dc_pool_ops | default([])}}"
+ when:
+ - openshift_logging_use_ops
+ - "{{ openshift_logging_facts.elasticsearch_ops.deploymentconfigs.keys() | length < openshift_logging_es_ops_cluster_size }}"
+ check_mode: no
diff --git a/roles/openshift_logging/tasks/install_fluentd.yaml b/roles/openshift_logging/tasks/install_fluentd.yaml
new file mode 100644
index 000000000..35bd452ed
--- /dev/null
+++ b/roles/openshift_logging/tasks/install_fluentd.yaml
@@ -0,0 +1,38 @@
+---
+- shell: >
+ echo "{{ (openshift_logging_use_ops) | ternary(openshift_logging_es_ops_host, openshift_logging_es_host) }}"
+ register: fluentd_ops_host
+ check_mode: no
+
+- shell: >
+ echo "{{ (openshift_logging_use_ops) | ternary(openshift_logging_es_ops_port, openshift_logging_es_port) }}"
+ register: fluentd_ops_port
+ check_mode: no
+
+
+- name: Generating Fluentd daemonset
+ template: src=fluentd.j2 dest={{mktemp.stdout}}/templates/logging-fluentd.yaml
+ vars:
+ daemonset_name: logging-fluentd
+ daemonset_component: fluentd
+ daemonset_container_name: fluentd-elasticsearch
+ daemonset_serviceAccount: aggregated-logging-fluentd
+ ops_host: "{{ fluentd_ops_host.stdout }}"
+ ops_port: "{{ fluentd_ops_port.stdout }}"
+ check_mode: no
+
+- name: "Set permissions for fluentd"
+ command: >
+ {{ openshift.common.admin_binary}} --config={{ mktemp.stdout }}/admin.kubeconfig policy
+ add-scc-to-user privileged system:serviceaccount:{{openshift_logging_namespace}}:aggregated-logging-fluentd
+ register: fluentd_output
+ failed_when: "fluentd_output.rc == 1 and 'exists' not in fluentd_output.stderr"
+ check_mode: no
+
+- name: "Set additional permissions for fluentd"
+ command: >
+ {{ openshift.common.admin_binary}} --config={{ mktemp.stdout }}/admin.kubeconfig policy
+ add-cluster-role-to-user cluster-reader system:serviceaccount:{{openshift_logging_namespace}}:aggregated-logging-fluentd
+ register: fluentd2_output
+ failed_when: "fluentd2_output.rc == 1 and 'exists' not in fluentd2_output.stderr"
+ check_mode: no
diff --git a/roles/openshift_logging/tasks/install_kibana.yaml b/roles/openshift_logging/tasks/install_kibana.yaml
new file mode 100644
index 000000000..382ab2522
--- /dev/null
+++ b/roles/openshift_logging/tasks/install_kibana.yaml
@@ -0,0 +1,33 @@
+---
+- name: Generate kibana deploymentconfig
+ template: src=kibana.j2 dest={{mktemp.stdout}}/templates/logging-kibana-dc.yaml
+ vars:
+ component: kibana
+ logging_component: kibana
+ deploy_name: "logging-{{component}}"
+ image: "{{openshift_logging_image_prefix}}logging-kibana:{{openshift_logging_image_version}}"
+ proxy_image: "{{openshift_logging_image_prefix}}logging-auth-proxy:{{openshift_logging_image_version}}"
+ es_host: logging-es
+ es_port: "{{openshift_logging_es_port}}"
+ kibana_cpu_limit: "{{openshift_logging_kibana_cpu_limit }}"
+ kibana_memory_limit: "{{openshift_logging_kibana_memory_limit }}"
+ kibana_proxy_cpu_limit: "{{openshift_logging_kibana_proxy_cpu_limit }}"
+ kibana_proxy_memory_limit: "{{openshift_logging_kibana_proxy_memory_limit }}"
+ check_mode: no
+
+- name: Generate OPS kibana deploymentconfig
+ template: src=kibana.j2 dest={{mktemp.stdout}}/templates/logging-kibana-ops-dc.yaml
+ vars:
+ component: kibana-ops
+ logging_component: kibana
+ deploy_name: "logging-{{component}}"
+ image: "{{openshift_logging_image_prefix}}logging-kibana:{{openshift_logging_image_version}}"
+ proxy_image: "{{openshift_logging_image_prefix}}logging-auth-proxy:{{openshift_logging_image_version}}"
+ es_host: logging-es-ops
+ es_port: "{{openshift_logging_es_ops_port}}"
+ kibana_cpu_limit: "{{openshift_logging_kibana_ops_cpu_limit }}"
+ kibana_memory_limit: "{{openshift_logging_kibana_ops_memory_limit }}"
+ kibana_proxy_cpu_limit: "{{openshift_logging_kibana_ops_proxy_cpu_limit }}"
+ kibana_proxy_memory_limit: "{{openshift_logging_kibana_ops_proxy_memory_limit }}"
+ when: openshift_logging_use_ops
+ check_mode: no
diff --git a/roles/openshift_logging/tasks/install_logging.yaml b/roles/openshift_logging/tasks/install_logging.yaml
new file mode 100644
index 000000000..591f11476
--- /dev/null
+++ b/roles/openshift_logging/tasks/install_logging.yaml
@@ -0,0 +1,49 @@
+---
+- name: Gather OpenShift Logging Facts
+ openshift_logging_facts:
+ oc_bin: "{{openshift.common.client_binary}}"
+ admin_kubeconfig: "{{mktemp.stdout}}/admin.kubeconfig"
+ openshift_logging_namespace: "{{openshift_logging_namespace}}"
+ tags: logging_facts
+ check_mode: no
+
+- name: Validate Elasticsearch cluster size
+ fail: msg="The openshift_logging_es_cluster_size may not be scaled down more than 1 less (or 0) the number of Elasticsearch nodes already deployed"
+ when: "{{openshift_logging_facts.elasticsearch.deploymentconfigs | length - openshift_logging_es_cluster_size | abs > 1}}"
+
+- name: Install logging
+ include: "{{ role_path }}/tasks/install_{{ install_component }}.yaml"
+ when: openshift_hosted_logging_install | default(true) | bool
+ with_items:
+ - support
+ - elasticsearch
+ - kibana
+ - curator
+ - fluentd
+ loop_control:
+ loop_var: install_component
+
+- name: Register API objects from generated templates
+ shell: ls -d -1 {{mktemp.stdout}}/templates/* | sort
+ register: logging_objects
+ check_mode: no
+
+- name: Creating API objects from generated templates
+ command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig apply -f {{file}} -n {{openshift_logging_namespace}}
+ with_items: "{{logging_objects.stdout_lines}}"
+ loop_control:
+ loop_var: file
+ when: not ansible_check_mode
+
+- name: Printing out objects to create
+ debug: msg="{{lookup('file', file)|quote}}"
+ with_fileglob:
+ - "{{mktemp.stdout}}/templates/*.yaml"
+ loop_control:
+ loop_var: file
+ when: ansible_check_mode
+
+- name: Scaling up cluster
+ include: start_cluster.yaml
+ when: start_cluster | default(true) | bool
diff --git a/roles/openshift_logging/tasks/install_support.yaml b/roles/openshift_logging/tasks/install_support.yaml
new file mode 100644
index 000000000..71979a7d8
--- /dev/null
+++ b/roles/openshift_logging/tasks/install_support.yaml
@@ -0,0 +1,52 @@
+---
+# This is the base configuration for installing the other components
+- name: Check for logging project already exists
+ command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get project {{openshift_logging_namespace}} --no-headers
+ register: logging_project_result
+ ignore_errors: yes
+ when: not ansible_check_mode
+
+- name: "Create logging project"
+ command: >
+ {{ openshift.common.admin_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig new-project {{openshift_logging_namespace}}
+ when: not ansible_check_mode and "not found" in logging_project_result.stderr
+
+- name: Create logging cert directory
+ file: path={{openshift.common.config_base}}/logging state=directory mode=0755
+ changed_when: False
+ check_mode: no
+
+- include: generate_certs.yaml
+ vars:
+ generated_certs_dir: "{{openshift.common.config_base}}/logging"
+
+- name: Create temp directory for all our templates
+ file: path={{mktemp.stdout}}/templates state=directory mode=0755
+ changed_when: False
+ check_mode: no
+
+- include: generate_secrets.yaml
+ vars:
+ generated_certs_dir: "{{openshift.common.config_base}}/logging"
+
+- include: generate_configmaps.yaml
+
+- include: generate_services.yaml
+
+- name: Generate kibana-proxy oauth client
+ template: src=oauth-client.j2 dest={{mktemp.stdout}}/templates/oauth-client.yaml
+ vars:
+ secret: "{{oauth_secret.stdout}}"
+ when: oauth_secret.stdout is defined
+ check_mode: no
+
+- include: generate_clusterroles.yaml
+
+- include: generate_rolebindings.yaml
+
+- include: generate_clusterrolebindings.yaml
+
+- include: generate_serviceaccounts.yaml
+
+- include: generate_routes.yaml
diff --git a/roles/openshift_logging/tasks/label_node.yaml b/roles/openshift_logging/tasks/label_node.yaml
new file mode 100644
index 000000000..55cfea38c
--- /dev/null
+++ b/roles/openshift_logging/tasks/label_node.yaml
@@ -0,0 +1,27 @@
+---
+- shell: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get node {{host}}
+ --template='{{ '{{index .metadata.labels "' }}{{label}}{{ '"}}' }}'
+ register: label_value
+ failed_when: label_value.rc == 1 and 'exists' not in label_value.stderr
+ when: not ansible_check_mode
+
+- shell: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig label node {{host}} {{label}}={{value}} --overwrite
+ register: label_result
+ failed_when: label_result.rc == 1 and 'exists' not in label_result.stderr
+ when:
+ - value is defined
+ - label_value.stdout is defined
+ - label_value.stdout != value
+ - unlabel is not defined or not unlabel
+ - not ansible_check_mode
+
+- shell: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig label node {{host}} {{label}}-
+ register: label_result
+ failed_when: label_result.rc == 1 and 'exists' not in label_result.stderr
+ when:
+ - unlabel is defined
+ - unlabel
+ - not ansible_check_mode
diff --git a/roles/openshift_logging/tasks/main.yaml b/roles/openshift_logging/tasks/main.yaml
new file mode 100644
index 000000000..b64c24ade
--- /dev/null
+++ b/roles/openshift_logging/tasks/main.yaml
@@ -0,0 +1,35 @@
+---
+- name: Create temp directory for doing work in
+ command: mktemp -d /tmp/openshift-logging-ansible-XXXXXX
+ register: mktemp
+ changed_when: False
+ check_mode: no
+ tags: logging_init
+
+- debug: msg="Created temp dir {{mktemp.stdout}}"
+
+- name: Copy the admin client config(s)
+ command: >
+ cp {{ openshift_master_config_dir }}/admin.kubeconfig {{ mktemp.stdout }}/admin.kubeconfig
+ changed_when: False
+ check_mode: no
+ tags: logging_init
+
+- include: "{{ role_path }}/tasks/install_logging.yaml"
+ when: openshift_logging_install_logging | default(false) | bool
+
+- include: "{{ role_path }}/tasks/upgrade_logging.yaml"
+ when: openshift_logging_upgrade_logging | default(false) | bool
+
+- include: "{{ role_path }}/tasks/delete_logging.yaml"
+ when:
+ - not openshift_logging_install_logging | default(false) | bool
+ - not openshift_logging_upgrade_logging | default(false) | bool
+
+- name: Delete temp directory
+ file:
+ name: "{{ mktemp.stdout }}"
+ state: absent
+ tags: logging_cleanup
+ changed_when: False
+ check_mode: no
diff --git a/roles/openshift_logging/tasks/procure_server_certs.yaml b/roles/openshift_logging/tasks/procure_server_certs.yaml
new file mode 100644
index 000000000..2c046d6e6
--- /dev/null
+++ b/roles/openshift_logging/tasks/procure_server_certs.yaml
@@ -0,0 +1,54 @@
+---
+- name: Checking for {{ cert_info.procure_component }}.crt
+ stat: path="{{generated_certs_dir}}/{{ cert_info.procure_component }}.crt"
+ register: component_cert_file
+ check_mode: no
+
+- name: Checking for {{ cert_info.procure_component }}.key
+ stat: path="{{generated_certs_dir}}/{{ cert_info.procure_component }}.key"
+ register: component_key_file
+ check_mode: no
+
+- name: Trying to discover server cert variable name for {{ cert_info.procure_component }}
+ command: echo "{{ lookup('env', '{{cert_info.procure_component}}' + '_crt') }}"
+ register: procure_component_crt
+ when: cert_info.hostnames is undefined and {{ cert_info.procure_component }}_crt is defined and {{ cert_info.procure_component }}_key is defined
+ check_mode: no
+
+- name: Trying to discover the server key variable name for {{ cert_info.procure_component }}
+ command: echo "{{ lookup('env', '{{cert_info.procure_component}}' + '_key') }}"
+ register: procure_component_key
+ when: cert_info.hostnames is undefined and {{ cert_info.procure_component }}_crt is defined and {{ cert_info.procure_component }}_key is defined
+ check_mode: no
+
+- name: Creating signed server cert and key for {{ cert_info.procure_component }}
+ command: >
+ {{ openshift.common.admin_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig ca create-server-cert
+ --key={{generated_certs_dir}}/{{cert_info.procure_component}}.key --cert={{generated_certs_dir}}/{{cert_info.procure_component}}.crt
+ --hostnames={{cert_info.hostnames|quote}} --signer-cert={{generated_certs_dir}}/ca.crt --signer-key={{generated_certs_dir}}/ca.key
+ --signer-serial={{generated_certs_dir}}/ca.serial.txt
+ check_mode: no
+ when:
+ - cert_info.hostnames is defined
+ - not component_key_file.stat.exists
+ - not component_cert_file.stat.exists
+
+- name: Copying server key for {{ cert_info.procure_component }} to generated certs directory
+ copy: content="{{procure_component_key}}" dest={{generated_certs_dir}}/{{cert_info.procure_component}}.key
+ check_mode: no
+ when:
+ - cert_info.hostnames is undefined
+ - "{{ cert_info.procure_component }}_crt is defined"
+ - "{{ cert_info.procure_component }}_key is defined"
+ - not component_key_file.stat.exists
+ - not component_cert_file.stat.exists
+
+- name: Copying Server cert for {{ cert_info.procure_component }} to generated certs directory
+ copy: content="{{procure_component_crt}}" dest={{generated_certs_dir}}/{{cert_info.procure_component}}.crt
+ check_mode: no
+ when:
+ - cert_info.hostnames is undefined
+ - "{{ cert_info.procure_component }}_crt is defined"
+ - "{{ cert_info.procure_component }}_key is defined"
+ - not component_key_file.stat.exists
+ - not component_cert_file.stat.exists
diff --git a/roles/openshift_logging/tasks/scale.yaml b/roles/openshift_logging/tasks/scale.yaml
new file mode 100644
index 000000000..3d86ea171
--- /dev/null
+++ b/roles/openshift_logging/tasks/scale.yaml
@@ -0,0 +1,26 @@
+---
+- shell: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get {{object}}
+ --template='{{ '{{.spec.replicas}}' }}' -n {{openshift_logging_namespace}}
+ register: replica_count
+ failed_when: replica_count.rc == 1 and 'exists' not in replica_count.stderr
+ when: not ansible_check_mode
+
+- shell: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig scale {{object}}
+ --replicas={{desired}} -n {{openshift_logging_namespace}}
+ register: scale_result
+ failed_when: scale_result.rc == 1 and 'exists' not in scale_result.stderr
+ when:
+ - replica_count.stdout != desired
+ - not ansible_check_mode
+
+- shell: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig describe {{object}} -n {{openshift_logging_namespace}} | awk -v statusrx='Pods Status:' '$0 ~ statusrx {print $3}'
+ register: replica_counts
+ until: replica_counts.stdout.find("{{desired}}") != -1
+ retries: 30
+ delay: 10
+ when:
+ - replica_count.stdout != desired
+ - not ansible_check_mode
diff --git a/roles/openshift_logging/tasks/start_cluster.yaml b/roles/openshift_logging/tasks/start_cluster.yaml
new file mode 100644
index 000000000..cdfc5f2d3
--- /dev/null
+++ b/roles/openshift_logging/tasks/start_cluster.yaml
@@ -0,0 +1,107 @@
+---
+- shell: >
+ echo "{{openshift_logging_fluentd_nodeselector}}" | cut -d':' -f1
+ register: openshift_logging_fluentd_nodeselector_key
+ check_mode: no
+
+- shell: >
+ echo "{{openshift_logging_fluentd_nodeselector}}" | cut -d' ' -f2
+ register: openshift_logging_fluentd_nodeselector_value
+ check_mode: no
+
+- shell: >
+ {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get node -o name | sed "s,^node/,,g"
+ register: fluentd_hosts
+ when: "'--all' in openshift_logging_fluentd_hosts"
+ check_mode: no
+
+- name: start fluentd
+ include: label_node.yaml
+ vars:
+ host: "{{fluentd_host}}"
+ label: "{{openshift_logging_fluentd_nodeselector_key.stdout}}"
+ value: "{{openshift_logging_fluentd_nodeselector_value.stdout}}"
+ with_items: "{{(fluentd_hosts.stdout_lines is defined) | ternary(fluentd_hosts.stdout_lines, openshift_logging_fluentd_hosts)}}"
+ loop_control:
+ loop_var: fluentd_host
+
+- shell: >
+ {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=es -o name -n {{openshift_logging_namespace}}
+ register: es_dc
+ check_mode: no
+
+- name: start elasticsearch
+ include: scale.yaml
+ vars:
+ desired: 1
+ with_items: "{{es_dc.stdout_lines}}"
+ loop_control:
+ loop_var: object
+
+- shell: >
+ {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=kibana -o name -n {{openshift_logging_namespace}}
+ register: kibana_dc
+ check_mode: no
+
+- name: start kibana
+ include: scale.yaml
+ vars:
+ desired: 1
+ with_items: "{{kibana_dc.stdout_lines}}"
+ loop_control:
+ loop_var: object
+
+- shell: >
+ {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=curator -o name -n {{openshift_logging_namespace}}
+ register: curator_dc
+ check_mode: no
+
+- name: start curator
+ include: scale.yaml
+ vars:
+ desired: 1
+ with_items: "{{curator_dc.stdout_lines}}"
+ loop_control:
+ loop_var: object
+
+- shell: >
+ {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=es-ops -o name -n {{openshift_logging_namespace}}
+ register: es_dc
+ check_mode: no
+
+- name: start elasticsearch-ops
+ include: scale.yaml
+ vars:
+ desired: 1
+ with_items: "{{es_dc.stdout_lines}}"
+ loop_control:
+ loop_var: object
+ when: openshift_logging_use_ops
+
+- shell: >
+ {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=kibana-ops -o name -n {{openshift_logging_namespace}}
+ register: kibana_dc
+ check_mode: no
+
+- name: start kibana-ops
+ include: scale.yaml
+ vars:
+ desired: 1
+ with_items: "{{kibana_dc.stdout_lines}}"
+ loop_control:
+ loop_var: object
+ when: openshift_logging_use_ops
+
+- shell: >
+ {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=curator-ops -o name -n {{openshift_logging_namespace}}
+ register: curator_dc
+ check_mode: no
+
+- name: start curator-ops
+ include: scale.yaml
+ vars:
+ desired: 1
+ with_items: "{{curator_dc.stdout_lines}}"
+ loop_control:
+ loop_var: object
+ when: openshift_logging_use_ops
diff --git a/roles/openshift_logging/tasks/stop_cluster.yaml b/roles/openshift_logging/tasks/stop_cluster.yaml
new file mode 100644
index 000000000..e018d0618
--- /dev/null
+++ b/roles/openshift_logging/tasks/stop_cluster.yaml
@@ -0,0 +1,98 @@
+---
+- shell: >
+ echo "{{openshift_logging_fluentd_nodeselector}}" | cut -d':' -f1
+ register: openshift_logging_fluentd_nodeselector_key
+
+- shell: >
+ echo "{{openshift_logging_fluentd_nodeselector}}" | cut -d' ' -f2
+ register: openshift_logging_fluentd_nodeselector_value
+
+- shell: >
+ {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get node -o name | sed "s,^node/,,g"
+ register: fluentd_hosts
+ when: "'--all' in openshift_logging_fluentd_hosts"
+
+- name: stop fluentd
+ include: label_node.yaml
+ vars:
+ host: "{{fluentd_host}}"
+ label: "{{openshift_logging_fluentd_nodeselector_key.stdout}}"
+ unlabel: True
+ with_items: "{{(fluentd_hosts.stdout_lines is defined) | ternary(fluentd_hosts.stdout_lines, openshift_logging_fluentd_hosts)}}"
+ loop_control:
+ loop_var: fluentd_host
+
+- shell: >
+ {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=es -o name -n {{openshift_logging_namespace}}
+ register: es_dc
+
+- name: stop elasticsearch
+ include: scale.yaml
+ vars:
+ desired: 0
+ with_items: "{{es_dc.stdout_lines}}"
+ loop_control:
+ loop_var: object
+
+- shell: >
+ {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=kibana -o name -n {{openshift_logging_namespace}}
+ register: kibana_dc
+
+- name: stop kibana
+ include: scale.yaml
+ vars:
+ desired: 0
+ with_items: "{{kibana_dc.stdout_lines}}"
+ loop_control:
+ loop_var: object
+
+- shell: >
+ {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=curator -o name -n {{openshift_logging_namespace}}
+ register: curator_dc
+
+- name: stop curator
+ include: scale.yaml
+ vars:
+ desired: 0
+ with_items: "{{curator_dc.stdout_lines}}"
+ loop_control:
+ loop_var: object
+
+- shell: >
+ {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=es-ops -o name -n {{openshift_logging_namespace}}
+ register: es_dc
+
+- name: stop elasticsearch-ops
+ include: scale.yaml
+ vars:
+ desired: 0
+ with_items: "{{es_dc.stdout_lines}}"
+ loop_control:
+ loop_var: object
+ when: openshift_logging_use_ops
+
+- shell: >
+ {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=kibana-ops -o name -n {{openshift_logging_namespace}}
+ register: kibana_dc
+
+- name: stop kibana-ops
+ include: scale.yaml
+ vars:
+ desired: 0
+ with_items: "{{kibana_dc.stdout_lines}}"
+ loop_control:
+ loop_var: object
+ when: openshift_logging_use_ops
+
+- shell: >
+ {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=curator-ops -o name -n {{openshift_logging_namespace}}
+ register: curator_dc
+
+- name: stop curator-ops
+ include: scale.yaml
+ vars:
+ desired: 0
+ with_items: "{{curator_dc.stdout_lines}}"
+ loop_control:
+ loop_var: object
+ when: openshift_logging_use_ops
diff --git a/roles/openshift_logging/tasks/upgrade_logging.yaml b/roles/openshift_logging/tasks/upgrade_logging.yaml
new file mode 100644
index 000000000..b2c8022d5
--- /dev/null
+++ b/roles/openshift_logging/tasks/upgrade_logging.yaml
@@ -0,0 +1,33 @@
+---
+- name: Stop the Cluster
+ include: stop_cluster.yaml
+
+- name: Upgrade logging
+ include: install_logging.yaml
+ vars:
+ start_cluster: False
+
+# ensure that ES is running
+- shell: >
+ {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=es -o name -n {{openshift_logging_namespace}}
+ register: es_dc
+ check_mode: no
+
+- name: start elasticsearch
+ include: scale.yaml
+ vars:
+ desired: 1
+ with_items: "{{es_dc.stdout_lines}}"
+ loop_control:
+ loop_var: object
+
+- copy:
+ src: es_migration.sh
+ dest: {{mktemp.stdout}}/es_migration.sh
+
+- name: Run upgrade scripts
+ shell: >
+ sh {{mktemp.stdout}}/es_migration.sh {{openshift.common.config_base}}/logging/ca.crt {{openshift.common.config_base}}/logging/system.admin.key {{openshift.common.config_base}}/logging/system.admin.crt {{openshift_logging_es_host}} {{openshift_logging_es_port}} {{openshift_logging_namespace}}
+
+- name: Start up rest of cluster
+ include: start_cluster.yaml
diff --git a/roles/openshift_logging/templates/clusterrole.j2 b/roles/openshift_logging/templates/clusterrole.j2
new file mode 100644
index 000000000..0d28db48e
--- /dev/null
+++ b/roles/openshift_logging/templates/clusterrole.j2
@@ -0,0 +1,21 @@
+apiVersion: v1
+kind: ClusterRole
+metadata:
+ name: {{obj_name}}
+rules:
+{% for rule in rules %}
+- resources:
+{% for kind in rule.resources %}
+ - {{ kind }}
+{% endfor %}
+ apiGroups:
+{% if rule.api_groups is defined %}
+{% for group in rule.api_groups %}
+ - {{ group }}
+{% endfor %}
+{% endif %}
+ verbs:
+{% for verb in rule.verbs %}
+ - {{ verb }}
+{% endfor %}
+{% endfor %}
diff --git a/roles/openshift_logging/templates/clusterrolebinding.j2 b/roles/openshift_logging/templates/clusterrolebinding.j2
new file mode 100644
index 000000000..2d25ff1fb
--- /dev/null
+++ b/roles/openshift_logging/templates/clusterrolebinding.j2
@@ -0,0 +1,24 @@
+apiVersion: v1
+kind: ClusterRoleBinding
+metadata:
+ name: {{obj_name}}
+{% if crb_usernames is defined %}
+userNames:
+{% for name in crb_usernames %}
+ - {{ name }}
+{% endfor %}
+{% endif %}
+{% if crb_groupnames is defined %}
+groupNames:
+{% for name in crb_groupnames %}
+ - {{ name }}
+{% endfor %}
+{% endif %}
+subjects:
+{% for sub in subjects %}
+ - kind: {{ sub.kind }}
+ name: {{ sub.name }}
+ namespace: {{sub.namespace}}
+{% endfor %}
+roleRef:
+ name: {{obj_name}}
diff --git a/roles/openshift_logging/templates/curator.j2 b/roles/openshift_logging/templates/curator.j2
new file mode 100644
index 000000000..3ffb48bfb
--- /dev/null
+++ b/roles/openshift_logging/templates/curator.j2
@@ -0,0 +1,97 @@
+apiVersion: "v1"
+kind: "DeploymentConfig"
+metadata:
+ name: "{{deploy_name}}"
+ labels:
+ provider: openshift
+ component: "{{component}}"
+ logging-infra: "{{logging_component}}"
+spec:
+ replicas: 0
+ selector:
+ provider: openshift
+ component: "{{component}}"
+ logging-infra: "{{logging_component}}"
+ strategy:
+ rollingParams:
+ intervalSeconds: 1
+ timeoutSeconds: 600
+ updatePeriodSeconds: 1
+ type: Recreate
+ template:
+ metadata:
+ name: "{{deploy_name}}"
+ labels:
+ logging-infra: "{{logging_component}}"
+ provider: openshift
+ component: "{{component}}"
+ spec:
+ terminationGracePeriod: 600
+ serviceAccountName: aggregated-logging-curator
+ containers:
+ -
+ name: "curator"
+ image: {{image}}
+ imagePullPolicy: Always
+ resources:
+ limits:
+ cpu: "{{curator_cpu_limit}}"
+{% if curator_memory_limit is defined and curator_memory_limit is not none %}
+ memory: "{{curator_memory_limit}}"
+{% endif %}
+ env:
+ -
+ name: "K8S_HOST_URL"
+ value: "{{master_url}}"
+ -
+ name: "ES_HOST"
+ value: "{{es_host}}"
+ -
+ name: "ES_PORT"
+ value: "{{es_port}}"
+ -
+ name: "ES_CLIENT_CERT"
+ value: "/etc/curator/keys/cert"
+ -
+ name: "ES_CLIENT_KEY"
+ value: "/etc/curator/keys/key"
+ -
+ name: "ES_CA"
+ value: "/etc/curator/keys/ca"
+ -
+ name: "CURATOR_DEFAULT_DAYS"
+ value: "{{openshift_logging_curator_default_days}}"
+ -
+ name: "CURATOR_RUN_HOUR"
+ value: "{{openshift_logging_curator_run_hour}}"
+ -
+ name: "CURATOR_RUN_MINUTE"
+ value: "{{openshift_logging_curator_run_minute}}"
+ -
+ name: "CURATOR_RUN_TIMEZONE"
+ value: "{{openshift_logging_curator_run_timezone}}"
+ -
+ name: "CURATOR_SCRIPT_LOG_LEVEL"
+ value: "{{openshift_logging_curator_script_log_level}}"
+ -
+ name: "CURATOR_LOG_LEVEL"
+ value: "{{openshift_logging_curator_log_level}}"
+ volumeMounts:
+ - name: certs
+ mountPath: /etc/curator/keys
+ readOnly: true
+ - name: config
+ mountPath: /usr/curator/settings
+ readOnly: true
+ - name: elasticsearch-storage
+ mountPath: /elasticsearch/persistent
+ readOnly: true
+ volumes:
+ - name: certs
+ secret:
+ secretName: logging-curator
+ - name: config
+ configMap:
+ name: logging-curator
+ - name: elasticsearch-storage
+ emptyDir: {}
diff --git a/roles/openshift_logging/templates/es.j2 b/roles/openshift_logging/templates/es.j2
new file mode 100644
index 000000000..e5d415f81
--- /dev/null
+++ b/roles/openshift_logging/templates/es.j2
@@ -0,0 +1,105 @@
+apiVersion: "v1"
+kind: "DeploymentConfig"
+metadata:
+ name: "{{deploy_name}}"
+ labels:
+ provider: openshift
+ component: "{{component}}"
+ deployment: "{{deploy_name}}"
+ logging-infra: "{{logging_component}}"
+spec:
+ replicas: 0
+ selector:
+ provider: openshift
+ component: "{{component}}"
+ deployment: "{{deploy_name}}"
+ logging-infra: "{{logging_component}}"
+ strategy:
+ type: Recreate
+ template:
+ metadata:
+ name: "{{deploy_name}}"
+ labels:
+ logging-infra: "{{logging_component}}"
+ provider: openshift
+ component: "{{component}}"
+ deployment: "{{deploy_name}}"
+ spec:
+ terminationGracePeriod: 600
+ serviceAccountName: aggregated-logging-elasticsearch
+ securityContext:
+ supplementalGroups:
+ - {{openshift_logging_es_storage_group}}
+ containers:
+ -
+ name: "elasticsearch"
+ image: {{image}}
+ imagePullPolicy: Always
+ resources:
+ limits:
+ memory: "{{es_memory_limit}}"
+{% if es_cpu_limit is defined and es_cpu_limit is not none %}
+ cpu: "{{es_cpu_limit}}"
+{% endif %}
+ requests:
+ memory: "512Mi"
+ ports:
+ -
+ containerPort: 9200
+ name: "restapi"
+ -
+ containerPort: 9300
+ name: "cluster"
+ env:
+ -
+ name: "NAMESPACE"
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ -
+ name: "KUBERNETES_TRUST_CERT"
+ value: "true"
+ -
+ name: "SERVICE_DNS"
+ value: "logging-{{es_cluster_name}}-cluster"
+ -
+ name: "CLUSTER_NAME"
+ value: "logging-{{es_cluster_name}}"
+ -
+ name: "INSTANCE_RAM"
+ value: "{{openshift_logging_es_memory_limit}}"
+ -
+ name: "NODE_QUORUM"
+ value: "{{es_node_quorum | int}}"
+ -
+ name: "RECOVER_AFTER_NODES"
+ value: "{{es_recover_after_nodes}}"
+ -
+ name: "RECOVER_EXPECTED_NODES"
+ value: "{{es_recover_expected_nodes}}"
+ -
+ name: "RECOVER_AFTER_TIME"
+ value: "{{openshift_logging_es_recover_after_time}}"
+ volumeMounts:
+ - name: elasticsearch
+ mountPath: /etc/elasticsearch/secret
+ readOnly: true
+ - name: elasticsearch-config
+ mountPath: /usr/share/java/elasticsearch/config
+ readOnly: true
+ - name: elasticsearch-storage
+ mountPath: /elasticsearch/persistent
+ volumes:
+ - name: elasticsearch
+ secret:
+ secretName: logging-elasticsearch
+ - name: elasticsearch-config
+ configMap:
+ name: logging-elasticsearch
+ - name: elasticsearch-storage
+{% if pvc_claim is defined and pvc_claim | trim | length > 0 %}
+ persistentVolumeClaim:
+ claimName: {{pvc_claim}}
+{% else %}
+ emptyDir: {}
+{% endif %}
diff --git a/roles/openshift_logging/templates/fluentd.j2 b/roles/openshift_logging/templates/fluentd.j2
new file mode 100644
index 000000000..a09b582a2
--- /dev/null
+++ b/roles/openshift_logging/templates/fluentd.j2
@@ -0,0 +1,149 @@
+apiVersion: extensions/v1beta1
+kind: "DaemonSet"
+metadata:
+ name: "{{daemonset_name}}"
+ labels:
+ provider: openshift
+ component: "{{daemonset_component}}"
+ logging-infra: "{{daemonset_component}}"
+spec:
+ selector:
+ matchLabels:
+ provider: openshift
+ component: "{{daemonset_component}}"
+ updateStrategy:
+ type: RollingUpdate
+ rollingUpdate:
+ minReadySeconds: 600
+ template:
+ metadata:
+ name: "{{daemonset_container_name}}"
+ labels:
+ logging-infra: "{{daemonset_component}}"
+ provider: openshift
+ component: "{{daemonset_component}}"
+ spec:
+ serviceAccountName: "{{daemonset_serviceAccount}}"
+ nodeSelector:
+ {{openshift_logging_fluentd_nodeselector}}
+ containers:
+ - name: "{{daemonset_container_name}}"
+ image: "{{openshift_logging_image_prefix}}{{daemonset_name}}:{{openshift_logging_image_version}}"
+ imagePullPolicy: Always
+ securityContext:
+ privileged: true
+ resources:
+ limits:
+ cpu: {{openshift_logging_fluentd_cpu_limit}}
+ memory: {{openshift_logging_fluentd_memory_limit}}
+ volumeMounts:
+ - name: runlogjournal
+ mountPath: /run/log/journal
+ - name: varlog
+ mountPath: /var/log
+ - name: varlibdockercontainers
+ mountPath: /var/lib/docker/containers
+ readOnly: true
+ - name: config
+ mountPath: /etc/fluent/configs.d/user
+ readOnly: true
+ - name: certs
+ mountPath: /etc/fluent/keys
+ readOnly: true
+ - name: dockerhostname
+ mountPath: /etc/docker-hostname
+ readOnly: true
+ - name: localtime
+ mountPath: /etc/localtime
+ readOnly: true
+ - name: dockercfg
+ mountPath: /etc/sysconfig/docker
+ readOnly: true
+ env:
+ - name: "K8S_HOST_URL"
+ value: "{{master_url}}"
+ - name: "ES_HOST"
+ value: "{{openshift_logging_es_host}}"
+ - name: "ES_PORT"
+ value: "{{openshift_logging_es_port}}"
+ - name: "ES_CLIENT_CERT"
+ value: "{{openshift_logging_es_client_cert}}"
+ - name: "ES_CLIENT_KEY"
+ value: "{{openshift_logging_es_client_key}}"
+ - name: "ES_CA"
+ value: "{{openshift_logging_es_ca}}"
+ - name: "OPS_HOST"
+ value: "{{ops_host}}"
+ - name: "OPS_PORT"
+ value: "{{ops_port}}"
+ - name: "OPS_CLIENT_CERT"
+ value: "{{openshift_logging_es_ops_client_cert}}"
+ - name: "OPS_CLIENT_KEY"
+ value: "{{openshift_logging_es_ops_client_key}}"
+ - name: "OPS_CA"
+ value: "{{openshift_logging_es_ops_ca}}"
+ - name: "ES_COPY"
+ value: "{{openshift_logging_fluentd_es_copy|lower}}"
+ - name: "ES_COPY_HOST"
+ value: "{{es_copy_host | default('')}}"
+ - name: "ES_COPY_PORT"
+ value: "{{es_copy_port | default('')}}"
+ - name: "ES_COPY_SCHEME"
+ value: "{{es_copy_scheme | default('https')}}"
+ - name: "ES_COPY_CLIENT_CERT"
+ value: "{{es_copy_client_cert | default('')}}"
+ - name: "ES_COPY_CLIENT_KEY"
+ value: "{{es_copy_client_key | default('')}}"
+ - name: "ES_COPY_CA"
+ value: "{{es_copy_ca | default('')}}"
+ - name: "ES_COPY_USERNAME"
+ value: "{{es_copy_username | default('')}}"
+ - name: "ES_COPY_PASSWORD"
+ value: "{{es_copy_password | default('')}}"
+ - name: "OPS_COPY_HOST"
+ value: "{{ops_copy_host | default('')}}"
+ - name: "OPS_COPY_PORT"
+ value: "{{ops_copy_port | default('')}}"
+ - name: "OPS_COPY_SCHEME"
+ value: "{{ops_copy_scheme | default('https')}}"
+ - name: "OPS_COPY_CLIENT_CERT"
+ value: "{{ops_copy_client_cert | default('')}}"
+ - name: "OPS_COPY_CLIENT_KEY"
+ value: "{{ops_copy_client_key | default('')}}"
+ - name: "OPS_COPY_CA"
+ value: "{{ops_copy_ca | default('')}}"
+ - name: "OPS_COPY_USERNAME"
+ value: "{{ops_copy_username | default('')}}"
+ - name: "OPS_COPY_PASSWORD"
+ value: "{{ops_copy_password | default('')}}"
+ - name: "USE_JOURNAL"
+ value: "{{openshift_logging_fluentd_use_journal|lower}}"
+ - name: "JOURNAL_SOURCE"
+ value: "{{fluentd_journal_source | default('')}}"
+ - name: "JOURNAL_READ_FROM_HEAD"
+ value: "{{openshift_logging_fluentd_journal_read_from_head|lower}}"
+ volumes:
+ - name: runlogjournal
+ hostPath:
+ path: /run/log/journal
+ - name: varlog
+ hostPath:
+ path: /var/log
+ - name: varlibdockercontainers
+ hostPath:
+ path: /var/lib/docker/containers
+ - name: config
+ configMap:
+ name: logging-fluentd
+ - name: certs
+ secret:
+ secretName: logging-fluentd
+ - name: dockerhostname
+ hostPath:
+ path: /etc/hostname
+ - name: localtime
+ hostPath:
+ path: /etc/localtime
+ - name: dockercfg
+ hostPath:
+ path: /etc/sysconfig/docker
diff --git a/roles/openshift_logging/templates/job.j2 b/roles/openshift_logging/templates/job.j2
new file mode 100644
index 000000000..d7794a407
--- /dev/null
+++ b/roles/openshift_logging/templates/job.j2
@@ -0,0 +1,26 @@
+apiVersion: v1
+kind: Pod
+metadata:
+ labels:
+ logging-infra: support
+ generateName: jks-cert-gen-
+spec:
+ containers:
+ - name: jks-cert-gen
+ image: {{openshift_logging_image_prefix}}logging-deployer:{{openshift_logging_image_version}}
+ imagePullPolicy: Always
+ command: ["sh", "generate-jks.sh"]
+ securityContext:
+ privileged: true
+ volumeMounts:
+ - mountPath: /opt/deploy
+ name: certmount
+ env:
+ - name: PROJECT
+ value: {{openshift_logging_namespace}}
+ restartPolicy: Never
+ serviceAccount: aggregated-logging-fluentd
+ volumes:
+ - hostPath:
+ path: "{{generated_certs_dir}}"
+ name: certmount
diff --git a/roles/openshift_logging/templates/kibana.j2 b/roles/openshift_logging/templates/kibana.j2
new file mode 100644
index 000000000..ca3d727bf
--- /dev/null
+++ b/roles/openshift_logging/templates/kibana.j2
@@ -0,0 +1,110 @@
+apiVersion: "v1"
+kind: "DeploymentConfig"
+metadata:
+ name: "{{deploy_name}}"
+ labels:
+ provider: openshift
+ component: "{{component}}"
+ logging-infra: "{{logging_component}}"
+spec:
+ replicas: 0
+ selector:
+ provider: openshift
+ component: "{{component}}"
+ logging-infra: "{{logging_component}}"
+ strategy:
+ rollingParams:
+ intervalSeconds: 1
+ timeoutSeconds: 600
+ updatePeriodSeconds: 1
+ type: Rolling
+ template:
+ metadata:
+ name: "{{deploy_name}}"
+ labels:
+ logging-infra: "{{logging_component}}"
+ provider: openshift
+ component: "{{component}}"
+ spec:
+ serviceAccountName: aggregated-logging-kibana
+ containers:
+ -
+ name: "kibana"
+ image: {{image}}
+ imagePullPolicy: Always
+{% if (kibana_memory_limit is defined and kibana_memory_limit is not none) or (kibana_cpu_limit is defined and kibana_cpu_limit is not none) %}
+ resources:
+ limits:
+{% if kibana_cpu_limit is not none %}
+ cpu: "{{kibana_cpu_limit}}"
+{% endif %}
+{% if kibana_memory_limit is not none %}
+ memory: "{{kibana_memory_limit}}"
+{% endif %}
+{% endif %}
+ env:
+ - name: "ES_HOST"
+ value: "{{es_host}}"
+ - name: "ES_PORT"
+ value: "{{es_port}}"
+ volumeMounts:
+ - name: kibana
+ mountPath: /etc/kibana/keys
+ readOnly: true
+ -
+ name: "kibana-proxy"
+ image: {{proxy_image}}
+ imagePullPolicy: Always
+{% if (kibana_proxy_memory_limit is defined and kibana_proxy_memory_limit is not none) or (kibana_proxy_cpu_limit is defined and kibana_proxy_cpu_limit is not none) %}
+ resources:
+ limits:
+{% if kibana_proxy_cpu_limit is not none %}
+ cpu: "{{kibana_proxy_cpu_limit}}"
+{% endif %}
+{% if kibana_proxy_memory_limit is not none %}
+ memory: "{{kibana_proxy_memory_limit}}"
+{% endif %}
+{% endif %}
+ ports:
+ -
+ name: "oaproxy"
+ containerPort: 3000
+ env:
+ -
+ name: "OAP_BACKEND_URL"
+ value: "http://localhost:5601"
+ -
+ name: "OAP_AUTH_MODE"
+ value: "oauth2"
+ -
+ name: "OAP_TRANSFORM"
+ value: "user_header,token_header"
+ -
+ name: "OAP_OAUTH_ID"
+ value: kibana-proxy
+ -
+ name: "OAP_MASTER_URL"
+ value: {{master_url}}
+ -
+ name: "OAP_PUBLIC_MASTER_URL"
+ value: {{public_master_url}}
+ -
+ name: "OAP_LOGOUT_REDIRECT"
+ value: {{public_master_url}}/console/logout
+ -
+ name: "OAP_MASTER_CA_FILE"
+ value: "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt"
+ -
+ name: "OAP_DEBUG"
+ value: "{{openshift_logging_kibana_proxy_debug}}"
+ volumeMounts:
+ - name: kibana-proxy
+ mountPath: /secret
+ readOnly: true
+ volumes:
+ - name: kibana
+ secret:
+ secretName: logging-kibana
+ - name: kibana-proxy
+ secret:
+ secretName: logging-kibana-proxy
diff --git a/roles/openshift_logging/templates/oauth-client.j2 b/roles/openshift_logging/templates/oauth-client.j2
new file mode 100644
index 000000000..41d3123cb
--- /dev/null
+++ b/roles/openshift_logging/templates/oauth-client.j2
@@ -0,0 +1,15 @@
+apiVersion: v1
+kind: OAuthClient
+metadata:
+ name: kibana-proxy
+ labels:
+ logging-infra: support
+secret: {{secret}}
+redirectURIs:
+- https://{{openshift_logging_kibana_hostname}}
+- https://{{openshift_logging_kibana_ops_hostname}}
+scopeRestrictions:
+- literals:
+ - user:info
+ - user:check-access
+ - user:list-projects
diff --git a/roles/openshift_logging/templates/pvc.j2 b/roles/openshift_logging/templates/pvc.j2
new file mode 100644
index 000000000..f19a3a750
--- /dev/null
+++ b/roles/openshift_logging/templates/pvc.j2
@@ -0,0 +1,27 @@
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+ name: {{obj_name}}
+ labels:
+ logging-infra: support
+{% if annotations is defined %}
+ annotations:
+{% for key,value in annotations.iteritems() %}
+ {{key}}: {{value}}
+{% endfor %}
+{% endif %}
+spec:
+{% if pv_selector is defined and pv_selector is mapping %}
+ selector:
+ matchLabels:
+{% for key,value in pv_selector.iteritems() %}
+ {{key}}: {{value}}
+{% endfor %}
+{% endif %}
+ accessModes:
+{% for mode in access_modes %}
+ - {{ mode }}
+{% endfor %}
+ resources:
+ requests:
+ storage: {{size}}
diff --git a/roles/openshift_logging/templates/rolebinding.j2 b/roles/openshift_logging/templates/rolebinding.j2
new file mode 100644
index 000000000..fcd4e87cc
--- /dev/null
+++ b/roles/openshift_logging/templates/rolebinding.j2
@@ -0,0 +1,14 @@
+apiVersion: v1
+kind: RoleBinding
+metadata:
+ name: {{obj_name}}
+roleRef:
+{% if roleRef.kind is defined %}
+ kind: {{ roleRef.kind }}
+{% endif %}
+ name: {{ roleRef.name }}
+subjects:
+{% for sub in subjects %}
+ - kind: {{ sub.kind }}
+ name: {{ sub.name }}
+{% endfor %}
diff --git a/roles/openshift_logging/templates/route_reencrypt.j2 b/roles/openshift_logging/templates/route_reencrypt.j2
new file mode 100644
index 000000000..8be30a2c4
--- /dev/null
+++ b/roles/openshift_logging/templates/route_reencrypt.j2
@@ -0,0 +1,25 @@
+apiVersion: "v1"
+kind: "Route"
+metadata:
+ name: "{{obj_name}}"
+{% if labels is defined%}
+ labels:
+{% for key, value in labels.iteritems() %}
+ {{key}}: {{value}}
+{% endfor %}
+{% endif %}
+spec:
+ host: {{ route_host }}
+ tls:
+ caCertificate: |
+{% for line in tls_ca_cert.split('\n') %}
+ {{ line }}
+{% endfor %}
+ destinationCACertificate: |
+{% for line in tls_dest_ca_cert.split('\n') %}
+ {{ line }}
+{% endfor %}
+ termination: reencrypt
+ to:
+ kind: Service
+ name: {{ service_name }}
diff --git a/roles/openshift_logging/templates/secret.j2 b/roles/openshift_logging/templates/secret.j2
new file mode 100644
index 000000000..d73bae9c4
--- /dev/null
+++ b/roles/openshift_logging/templates/secret.j2
@@ -0,0 +1,9 @@
+apiVersion: v1
+kind: Secret
+metadata:
+ name: {{secret_name}}
+type: Opaque
+data:
+{% for s in secrets %}
+ {{s.key}}: {{s.value | b64encode}}
+{% endfor %}
diff --git a/roles/openshift_logging/templates/service.j2 b/roles/openshift_logging/templates/service.j2
new file mode 100644
index 000000000..6c4ec0c76
--- /dev/null
+++ b/roles/openshift_logging/templates/service.j2
@@ -0,0 +1,28 @@
+apiVersion: "v1"
+kind: "Service"
+metadata:
+ name: "{{obj_name}}"
+{% if labels is defined%}
+ labels:
+{% for key, value in labels.iteritems() %}
+ {{key}}: {{value}}
+{% endfor %}
+{% endif %}
+spec:
+ ports:
+{% for port in ports %}
+ -
+{% for key, value in port.iteritems() %}
+ {{key}}: {{value}}
+{% endfor %}
+{% if port.targetPort is undefined %}
+ clusterIP: "None"
+{% endif %}
+{% endfor %}
+{% if service_targetPort is defined %}
+ targetPort: {{service_targetPort}}
+{% endif %}
+ selector:
+ {% for key, value in selector.iteritems() %}
+ {{key}}: {{value}}
+ {% endfor %}
diff --git a/roles/openshift_logging/templates/serviceaccount.j2 b/roles/openshift_logging/templates/serviceaccount.j2
new file mode 100644
index 000000000..b22acc594
--- /dev/null
+++ b/roles/openshift_logging/templates/serviceaccount.j2
@@ -0,0 +1,16 @@
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: {{obj_name}}
+{% if labels is defined%}
+ labels:
+{% for key, value in labels.iteritems() %}
+ {{key}}: {{value}}
+{% endfor %}
+{% endif %}
+{% if secrets is defined %}
+secrets:
+{% for name in secrets %}
+- name: {{ name }}
+{% endfor %}
+{% endif %}
diff --git a/roles/openshift_logging/vars/main.yaml b/roles/openshift_logging/vars/main.yaml
new file mode 100644
index 000000000..fb8af11e9
--- /dev/null
+++ b/roles/openshift_logging/vars/main.yaml
@@ -0,0 +1,40 @@
+tr_or_ohlip: "{{ openshift_hosted_logging_openshift_logging_image_prefix or target_registry or none }}"
+ip_kv: "{{ '-p IMAGE_PREFIX=' ~ tr_or_ohlip | quote if tr_or_ohlip is defined else '' }}"
+iv_kv: "{{ '-p IMAGE_VERSION=' ~ openshift_hosted_logging_openshift_logging_image_version | quote if openshift_hosted_logging_openshift_logging_image_version is defined else '' }}"
+oc_new_app_values: "{{ ip_kv }} {{ iv_kv }}"
+openshift_master_config_dir: "{{ openshift.common.config_base }}/master"
+kh_cmap_param: "{{ '--from-literal kibana-hostname=' ~ openshift_hosted_logging_hostname | quote if openshift_hosted_logging_hostname is defined else '' }}"
+kh_ops_cmap_param: "{{ '--from-literal kibana-ops-hostname=' ~ openshift_hosted_logging_ops_hostname | quote if openshift_hosted_logging_ops_hostname is defined else '' }}"
+pmu_cmap_param: "{{ '--from-literal public-master-url=' ~ openshift_hosted_logging_master_public_url | quote if openshift_hosted_logging_master_public_url is defined else '' }}"
+es_cs_cmap_param: "{{ '--from-literal es-cluster-size=' ~ openshift_hosted_logging_elasticsearch_cluster_size | string | quote if openshift_hosted_logging_elasticsearch_cluster_size is defined else '' }}"
+es_ops_cs_cmap_param: "{{ '--from-literal es-ops-cluster-size=' ~ openshift_hosted_logging_elasticsearch_ops_cluster_size | string | quote if openshift_hosted_logging_elasticsearch_ops_cluster_size is defined else '' }}"
+es_ir_cmap_param: "{{ '--from-literal es-instance-ram=' ~ openshift_hosted_logging_elasticsearch_instance_ram | quote if openshift_hosted_logging_elasticsearch_instance_ram is defined else '' }}"
+es_ops_ir_cmap_param: "{{ '--from-literal es-ops-instance-ram=' ~ openshift_hosted_logging_elasticsearch_ops_instance_ram | quote if openshift_hosted_logging_elasticsearch_ops_instance_ram is defined else '' }}"
+es_pvcs_cmap_param: "{{ '--from-literal es-pvc-size=' ~ openshift_hosted_logging_elasticsearch_pvc_size | quote if openshift_hosted_logging_elasticsearch_pvc_size is defined else '' }}"
+es_ops_pvcs_cmap_param: "{{ '--from-literal es-ops-pvc-size=' ~ openshift_hosted_logging_elasticsearch_ops_pvc_size | quote if openshift_hosted_logging_elasticsearch_ops_pvc_size is defined else '' }}"
+es_pvcp_cmap_param: "{{ '--from-literal es-pvc-prefix=' ~ openshift_hosted_logging_elasticsearch_pvc_prefix | quote if openshift_hosted_logging_elasticsearch_pvc_prefix is defined else '' }}"
+es_ops_pvcp_cmap_param: "{{ '--from-literal es-ops-pvc-prefix=' ~ openshift_hosted_logging_elasticsearch_ops_pvc_prefix | quote if openshift_hosted_logging_elasticsearch_ops_pvc_prefix is defined else '' }}"
+es_pvcd_cmap_param: "{{ '--from-literal es-pvc-dynamic=' ~ openshift_hosted_logging_elasticsearch_pvc_dynamic | quote if openshift_hosted_logging_elasticsearch_pvc_dynamic is defined else '' }}"
+es_ops_pvcd_cmap_param: "{{ '--from-literal es-ops-pvc-dynamic=' ~ openshift_hosted_logging_elasticsearch_ops_pvc_dynamic | quote if openshift_hosted_logging_elasticsearch_ops_pvc_dynamic is defined else '' }}"
+es_sg_cmap_param: "{{ '--from-literal storage-group=' ~ openshift_hosted_logging_elasticsearch_storage_group | string | quote if openshift_hosted_logging_elasticsearch_storage_group is defined else '' }}"
+es_ns_cmap_param: "{{ '--from-literal es-nodeselector=' ~ openshift_hosted_logging_elasticsearch_nodeselector | quote if openshift_hosted_logging_elasticsearch_nodeselector is defined else '' }}"
+es_ops_ns_cmap_param: "{{ '--from-literal es-ops-nodeselector=' ~ openshift_hosted_logging_elasticsearch_ops_nodeselector | quote if openshift_hosted_logging_elasticsearch_ops_nodeselector is defined else '' }}"
+fd_ns_cmap_param: "{{ '--from-literal fluentd-nodeselector=' ~ openshift_hosted_logging_openshift_logging_fluentd_nodeselector | quote if openshift_hosted_logging_openshift_logging_fluentd_nodeselector is defined else 'logging-infra-fluentd=true' }}"
+kb_ns_cmap_param: "{{ '--from-literal kibana-nodeselector=' ~ openshift_hosted_logging_kibana_nodeselector | quote if openshift_hosted_logging_kibana_nodeselector is defined else '' }}"
+kb_ops_ns_cmap_param: "{{ '--from-literal kibana-ops-nodeselector=' ~ openshift_hosted_logging_kibana_ops_nodeselector | quote if openshift_hosted_logging_kibana_ops_nodeselector is defined else '' }}"
+cr_ns_cmap_param: "{{ '--from-literal curator-nodeselector=' ~ openshift_hosted_logging_curator_nodeselector | quote if openshift_hosted_logging_curator_nodeselector is defined else '' }}"
+cr_ops_ns_cmap_param: "{{ '--from-literal curator-ops-nodeselector=' ~ openshift_hosted_logging_curator_ops_nodeselector | quote if openshift_hosted_logging_curator_ops_nodeselector is defined else '' }}"
+ops_cmap_param: "{{ '--from-literal enable-ops-cluster=' ~ openshift_hosted_logging_enable_ops_cluster | string | lower | quote if openshift_hosted_logging_enable_ops_cluster is defined else '' }}"
+use_journal_cmap_param: "{{ '--from-literal use-journal=' ~ openshift_hosted_logging_use_journal | string | lower | quote if openshift_hosted_logging_use_journal is defined else '' }}"
+journal_source_cmap_param: "{{ '--from-literal journal-source=' ~ openshift_hosted_logging_journal_source | quote if openshift_hosted_logging_journal_source is defined else '' }}"
+openshift_logging_fluentd_journal_read_from_head_cmap_param: "{{ '--from-literal journal-read-from-head=' ~ openshift_hosted_logging_openshift_logging_fluentd_journal_read_from_head | string | lower | quote if openshift_hosted_logging_openshift_logging_fluentd_journal_read_from_head is defined else '' }}"
+ips_cmap_param: "{{ '--from-literal image-pull-secret=' ~ openshift_hosted_logging_image_pull_secret | quote if openshift_hosted_logging_image_pull_secret is defined else '' }}"
+deployer_cmap_params: "{{ kh_cmap_param }} {{ kh_ops_cmap_param }} {{ pmu_cmap_param }} {{ es_cs_cmap_param }} {{ es_ir_cmap_param }} {{ es_pvcs_cmap_param }} {{ es_pvcp_cmap_param }} {{ es_pvcd_cmap_param }} {{ es_ops_cs_cmap_param }} {{ es_ops_ir_cmap_param }} {{ es_ops_pvcs_cmap_param }} {{ es_ops_pvcp_cmap_param }} {{ es_ops_pvcd_cmap_param }} {{ es_sg_cmap_param }} {{ es_ns_cmap_param }} {{ es_ops_ns_cmap_param }} {{ fd_ns_cmap_param }} {{ kb_ns_cmap_param }} {{ kb_ops_ns_cmap_param }} {{ cr_ns_cmap_param }} {{ cr_ops_ns_cmap_param }} {{ ops_cmap_param }} {{ use_journal_cmap_param }} {{ journal_source_cmap_param }} {{ openshift_logging_fluentd_journal_read_from_head_cmap_param }} {{ ips_cmap_param }}"
+
+es_node_quorum: "{{openshift_logging_es_cluster_size/2 + 1}}"
+es_recover_after_nodes: "{{openshift_logging_es_cluster_size - 1}}"
+es_recover_expected_nodes: "{{openshift_logging_es_cluster_size}}"
+
+es_ops_node_quorum: "{{openshift_logging_es_ops_cluster_size/2 + 1}}"
+es_ops_recover_after_nodes: "{{openshift_logging_es_ops_cluster_size - 1}}"
+es_ops_recover_expected_nodes: "{{openshift_logging_es_ops_cluster_size}}"