summaryrefslogtreecommitdiffstats
path: root/roles/openshift_node/tasks/main.yml
blob: 6022694bcdce66dd4ec60dacc6495cf26e10230f (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
---
# TODO: allow for overriding default ports where possible
- fail:
    msg: "SELinux is disabled, This deployment type requires that SELinux is enabled."
  when: (not ansible_selinux or ansible_selinux.status != 'enabled') and deployment_type in ['enterprise', 'online', 'atomic-enterprise', 'openshift-enterprise']

- name: Set node facts
  openshift_facts:
    role: "{{ item.role }}"
    local_facts: "{{ item.local_facts }}"
  with_items:
  # Reset node labels to an empty dictionary.
  - role: node
    local_facts:
      labels: {}
  - role: node
    local_facts:
      annotations: "{{ openshift_node_annotations | default(none) }}"
      debug_level: "{{ openshift_node_debug_level | default(openshift.common.debug_level) }}"
      iptables_sync_period: "{{ openshift_node_iptables_sync_period | default(None) }}"
      kubelet_args: "{{ openshift_node_kubelet_args | default(None) }}"
      labels: "{{ lookup('oo_option', 'openshift_node_labels') | default( openshift_node_labels | default(none), true) }}"
      registry_url: "{{ oreg_url | default(none) }}"
      schedulable: "{{ openshift_schedulable | default(openshift_scheduleable) | default(None) }}"
      sdn_mtu: "{{ openshift_node_sdn_mtu | default(None) }}"
      storage_plugin_deps: "{{ osn_storage_plugin_deps | default(None) }}"
      set_node_ip: "{{ openshift_set_node_ip | default(None) }}"
      node_image: "{{ osn_image | default(None) }}"
      ovs_image: "{{ osn_ovs_image | default(None) }}"
      proxy_mode: "{{ openshift_node_proxy_mode | default('iptables') }}"
      local_quota_per_fsgroup: "{{ openshift_node_local_quota_per_fsgroup | default(None) }}"
      dns_ip: "{{ openshift_dns_ip | default(none) | get_dns_ip(hostvars[inventory_hostname])}}"
      env_vars: "{{ openshift_node_env_vars | default(None) }}"

# We have to add tuned-profiles in the same transaction otherwise we run into depsolving
# problems because the rpms don't pin the version properly. This was fixed in 3.1 packaging.
- name: Install Node package
  action: "{{ ansible_pkg_mgr }} name={{ openshift.common.service_type }}-node{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }},tuned-profiles-{{ openshift.common.service_type }}-node{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }} state=present"
  when: not openshift.common.is_containerized | bool

- name: Set atomic-guest tuned profile
  command: "tuned-adm profile atomic-guest"
  when: openshift.common.is_atomic | bool

- name: Install sdn-ovs package
  action: "{{ ansible_pkg_mgr }} name={{ openshift.common.service_type }}-sdn-ovs{{ openshift_pkg_version | oo_image_tag_to_rpm_version(include_dash=True) }} state=present"
  when: openshift.common.use_openshift_sdn and not openshift.common.is_containerized | bool

- name: Pull node image
  command: >
    docker pull {{ openshift.node.node_image }}:{{ openshift_image_tag }}
  register: pull_result
  changed_when: "'Downloaded newer image' in pull_result.stdout"
  when: openshift.common.is_containerized | bool

- name: Pull OpenVSwitch image
  command: >
    docker pull {{ openshift.node.ovs_image }}:{{ openshift_image_tag }}
  register: pull_result
  changed_when: "'Downloaded newer image' in pull_result.stdout"
  when: openshift.common.is_containerized | bool and openshift.common.use_openshift_sdn | bool

- name: Install the systemd units
  include: systemd_units.yml

# The atomic-openshift-node service will set this parameter on
# startup, but if the network service is restarted this setting is
# lost. Reference: https://bugzilla.redhat.com/show_bug.cgi?id=1372388
- name: Persist net.ipv4.ip_forward sysctl entry
  sysctl: name="net.ipv4.ip_forward" value=1 sysctl_set=yes state=present reload=yes

- name: Start and enable openvswitch docker service
  service: name=openvswitch.service enabled=yes state=started
  when: openshift.common.is_containerized | bool and openshift.common.use_openshift_sdn | bool
  register: ovs_start_result

- set_fact:
    ovs_service_status_changed: "{{ ovs_start_result | changed }}"

- file:
    dest: "{{ (openshift_node_kubelet_args|default({'config':None})).config}}"
    state: directory
  when: openshift_node_kubelet_args is defined and 'config' in openshift_node_kubelet_args

# TODO: add the validate parameter when there is a validation command to run
- name: Create the Node config
  template:
    dest: "{{ openshift.common.config_base }}/node/node-config.yaml"
    src: node.yaml.v1.j2
    backup: true
    owner: root
    group: root
    mode: 0600
  notify:
  - restart node

- name: Configure AWS Cloud Provider Settings
  lineinfile:
    dest: /etc/sysconfig/{{ openshift.common.service_type }}-node
    regexp: "{{ item.regex }}"
    line: "{{ item.line }}"
    create: true
  with_items:
    - regex: '^AWS_ACCESS_KEY_ID='
      line: "AWS_ACCESS_KEY_ID={{ openshift_cloudprovider_aws_access_key | default('') }}"
    - regex: '^AWS_SECRET_ACCESS_KEY='
      line: "AWS_SECRET_ACCESS_KEY={{ openshift_cloudprovider_aws_secret_key | default('') }}"
  no_log: True
  when: "openshift_cloudprovider_kind is defined and openshift_cloudprovider_kind == 'aws' and openshift_cloudprovider_aws_access_key is defined and openshift_cloudprovider_aws_secret_key is defined"
  notify:
  - restart node

- name: Configure Node Environment Variables
  lineinfile:
    dest: /etc/sysconfig/{{ openshift.common.service_type }}-node
    regexp: "^{{ item.key }}="
    line: "{{ item.key }}={{ item.value }}"
    create: true
  with_dict: "{{ openshift.node.env_vars | default({}) }}"
  notify:
  - restart node

- name: NFS storage plugin configuration
  include: storage_plugins/nfs.yml
  tags:
    - nfs

- name: GlusterFS storage plugin configuration
  include: storage_plugins/glusterfs.yml
  when: "'glusterfs' in openshift.node.storage_plugin_deps"

- name: Ceph storage plugin configuration
  include: storage_plugins/ceph.yml
  when: "'ceph' in openshift.node.storage_plugin_deps"

- name: iSCSI storage plugin configuration
  include: storage_plugins/iscsi.yml
  when: "'iscsi' in openshift.node.storage_plugin_deps"

# Necessary because when you're on a node that's also a master the master will be
# restarted after the node restarts docker and it will take up to 60 seconds for
# systemd to start the master again
- name: Wait for master API to become available before proceeding
  # Using curl here since the uri module requires python-httplib2 and
  # wait_for port doesn't provide health information.
  command: >
    curl --silent --cacert {{ openshift.common.config_base }}/node/ca.crt
    {{ openshift_node_master_api_url }}/healthz/ready
  args:
    # Disables the following warning:
    # Consider using get_url or uri module rather than running curl
    warn: no
  register: api_available_output
  until: api_available_output.stdout == 'ok'
  retries: 120
  delay: 1
  changed_when: false
  when: openshift.common.is_containerized | bool

- name: Start and enable node dep
  service: name={{ openshift.common.service_type }}-node-dep enabled=yes state=started
  when: openshift.common.is_containerized | bool

- name: Start and enable node
  service: name={{ openshift.common.service_type }}-node enabled=yes state=started
  register: node_start_result
  until: not node_start_result | failed
  retries: 1
  delay: 30

- set_fact:
    node_service_status_changed: "{{ node_start_result | changed }}"