--- - name: Getting current ES deployment size set_fact: openshift_logging_current_es_size={{ openshift_logging_facts.elasticsearch.deploymentconfigs.keys() | length }} - set_fact: openshift_logging_es_pvc_prefix="logging-es" when: not openshift_logging_es_pvc_prefix or openshift_logging_es_pvc_prefix == '' ### evaluate if the PVC attached to the dc currently matches the provided vars ## if it does then we reuse that pvc in the DC - include: set_es_storage.yaml vars: es_component: es es_name: "{{ deployment.0 }}" es_spec: "{{ deployment.1 }}" es_node_selector: "{{ openshift_logging_es_nodeselector | default({}) }}" es_pvc_names: "{{ openshift_logging_facts.elasticsearch.pvcs.keys() }}" es_pvc_size: "{{ openshift_logging_es_pvc_size }}" es_pvc_prefix: "{{ openshift_logging_es_pvc_prefix }}" es_pvc_dynamic: "{{ openshift_logging_es_pvc_dynamic | bool }}" es_pv_selector: "{{ openshift_logging_es_pv_selector }}" es_cpu_limit: "{{ openshift_logging_es_cpu_limit }}" es_memory_limit: "{{ openshift_logging_es_memory_limit }}" es_number_of_shards: "{{ openshift_logging_es_number_of_shards }}" es_number_of_replicas: "{{ openshift_logging_es_number_of_replicas }}" with_together: - "{{ openshift_logging_facts.elasticsearch.deploymentconfigs.keys() }}" - "{{ openshift_logging_facts.elasticsearch.deploymentconfigs.values() }}" loop_control: loop_var: deployment ## if it does not then we should create one that does and attach it ## create new dc/pvc is needed - include: set_es_storage.yaml vars: es_component: es es_name: "logging-es-{{'abcdefghijklmnopqrstuvwxyz0123456789'|random_word(8)}}" es_spec: "{}" es_node_selector: "{{ openshift_logging_es_nodeselector | default({}) }}" es_pvc_names: "{{ openshift_logging_facts.elasticsearch.pvcs.keys() }}" es_pvc_size: "{{ openshift_logging_es_pvc_size }}" es_pvc_prefix: "{{ openshift_logging_es_pvc_prefix }}" es_pvc_dynamic: "{{ openshift_logging_es_pvc_dynamic | bool }}" es_pv_selector: "{{ openshift_logging_es_pv_selector }}" es_cpu_limit: "{{ openshift_logging_es_cpu_limit }}" es_memory_limit: "{{ openshift_logging_es_memory_limit }}" es_number_of_shards: "{{ openshift_logging_es_number_of_shards }}" es_number_of_replicas: "{{ openshift_logging_es_number_of_replicas }}" with_sequence: count={{ openshift_logging_es_cluster_size | int - openshift_logging_facts.elasticsearch.deploymentconfigs | count }} # --------- Tasks for Operation clusters --------- - name: Getting current ES deployment size set_fact: openshift_logging_current_es_ops_size={{ openshift_logging_facts.elasticsearch_ops.deploymentconfigs.keys() | length }} - set_fact: openshift_logging_es_ops_pvc_prefix="{{ openshift_logging_es_ops_pvc_prefix | default('logging-es-ops') }}" - name: Validate Elasticsearch cluster size for Ops fail: msg="The openshift_logging_es_ops_cluster_size may not be scaled down more than 1 less (or 0) the number of Elasticsearch nodes already deployed" vars: es_dcs: "{{openshift_logging_facts.elasticsearch_ops.deploymentconfigs}}" cluster_size: "{{openshift_logging_es_ops_cluster_size|int}}" when: - openshift_logging_use_ops | bool - "{{es_dcs | length - openshift_logging_es_ops_cluster_size|int | abs > 1}}" check_mode: no - set_fact: openshift_logging_es_ops_pvc_prefix="logging-es-ops" when: not openshift_logging_es_ops_pvc_prefix or openshift_logging_es_ops_pvc_prefix == '' - include: set_es_storage.yaml vars: es_component: es-ops es_name: "{{ deployment.0 }}" es_spec: "{{ deployment.1 }}" es_node_selector: "{{ openshift_logging_es_ops_nodeselector | default({}) }}" es_pvc_names: "{{ openshift_logging_facts.elasticsearch_ops.pvcs.keys() }}" es_pvc_size: "{{ openshift_logging_es_ops_pvc_size }}" es_pvc_prefix: "{{ openshift_logging_es_ops_pvc_prefix }}" es_pvc_dynamic: "{{ openshift_logging_es_ops_pvc_dynamic | bool }}" es_pv_selector: "{{ openshift_logging_es_ops_pv_selector }}" es_cpu_limit: "{{ openshift_logging_es_ops_cpu_limit }}" es_memory_limit: "{{ openshift_logging_es_ops_memory_limit }}" es_number_of_shards: "{{ openshift_logging_es_ops_number_of_shards }}" es_number_of_replicas: "{{ openshift_logging_es_ops_number_of_replicas }}" with_together: - "{{ openshift_logging_facts.elasticsearch_ops.deploymentconfigs.keys() }}" - "{{ openshift_logging_facts.elasticsearch_ops.deploymentconfigs.values() }}" loop_control: loop_var: deployment when: - openshift_logging_use_ops | bool ## if it does not then we should create one that does and attach it ## create new dc/pvc is needed - include: set_es_storage.yaml vars: es_component: es-ops es_name: "logging-es-ops-{{'abcdefghijklmnopqrstuvwxyz0123456789'|random_word(8)}}" es_spec: "{}" es_node_selector: "{{ openshift_logging_es_ops_nodeselector | default({}) }}" es_pvc_names: "{{ openshift_logging_facts.elasticsearch_ops.pvcs.keys() }}" es_pvc_size: "{{ openshift_logging_es_ops_pvc_size }}" es_pvc_prefix: "{{ openshift_logging_es_ops_pvc_prefix }}" es_pvc_dynamic: "{{ openshift_logging_es_ops_pvc_dynamic | bool }}" es_pv_selector: "{{ openshift_logging_es_ops_pv_selector }}" es_cpu_limit: "{{ openshift_logging_es_ops_cpu_limit }}" es_memory_limit: "{{ openshift_logging_es_ops_memory_limit }}" es_number_of_shards: "{{ openshift_logging_es_ops_number_of_shards }}" es_number_of_replicas: "{{ openshift_logging_es_ops_number_of_replicas }}" with_sequence: count={{ openshift_logging_es_ops_cluster_size | int - openshift_logging_facts.elasticsearch_ops.deploymentconfigs | count }} when: - openshift_logging_use_ops | bool