summaryrefslogtreecommitdiffstats
path: root/roles/cuda/tasks/main.yml
diff options
context:
space:
mode:
Diffstat (limited to 'roles/cuda/tasks/main.yml')
-rw-r--r--roles/cuda/tasks/main.yml54
1 files changed, 54 insertions, 0 deletions
diff --git a/roles/cuda/tasks/main.yml b/roles/cuda/tasks/main.yml
new file mode 100644
index 0000000..f292f67
--- /dev/null
+++ b/roles/cuda/tasks/main.yml
@@ -0,0 +1,54 @@
+---
+# tasks file for ansible-role-cuda
+- name: "Gather OS specific variables"
+ include_vars: "{{ item }}"
+ with_first_found:
+ - "{{ ansible_distribution|lower }}-{{ ansible_distribution_version }}.yml"
+ - "{{ ansible_distribution|lower }}-{{ ansible_distribution_major_version }}.yml"
+ - "{{ ansible_distribution|lower }}.yml"
+ - "{{ ansible_os_family|lower }}.yml"
+
+- block:
+ - include_tasks: configure_yum.yml
+ when: ansible_pkg_mgr == 'yum' or ansible_pkg_mgr == 'dnf'
+
+ - include_tasks: configure_apt.yml
+ when: ansible_pkg_mgr == 'apt'
+
+ - name: Install kernel development files
+ package: name=kernel-devel state=present
+ register: result
+
+ - name: Synchronize kernel and kernel-devel packages
+ package: name=kernel state=latest
+ when: (result | changed)
+
+ - name: Install CUDA and related packages (1.5-2GB download, also restarts if cuda_restart_node_on_install is set to True)
+ package:
+ name: "{{ item }}"
+ state: present
+ with_items: "{{ cuda_packages }}"
+ register: cuda_packages_installation
+ notify:
+ - ZZ CUDA Restart server
+ - ZZ CUDA Wait for server to restart
+
+ - name: Template CUDA paths to user environments
+ template:
+ src: cuda.sh.j2
+ dest: /etc/profile.d/cuda.sh
+ mode: 0755
+ when: cuda_bash_profile
+
+ - include_tasks: cuda_init.yml
+ when: cuda_init == True
+
+ # This is here because if we in the same playbook try to start slurmd without
+ # having run the cuda_init.sh script then slurmd doesn't start and the play fails.
+ # todo: reload nvidia modules/etc instead of restart
+ - name: flush the handlers - so that the node is rebooted after CUDA is installed and that the GPUs are initialized before we start slurm
+ meta: flush_handlers
+
+ when: gpu == True
+
+# vim:ft=ansible: