diff --git a/ansible/roles/persist_hostkeys/tasks/main.yml b/ansible/roles/persist_hostkeys/tasks/main.yml index 47493220d..e2d97d6b5 100644 --- a/ansible/roles/persist_hostkeys/tasks/main.yml +++ b/ansible/roles/persist_hostkeys/tasks/main.yml @@ -2,7 +2,7 @@ - name: Ensure hostkeys directory exists on persistent storage file: - path: "{{ appliances_state_dir }}/hostkeys/{{ inventory_hostname }}" + path: "/mnt/hostkeys/{{ inventory_hostname }}" state: directory owner: root group: root @@ -11,7 +11,7 @@ - name: Copy hostkeys from persistent storage # won't fail if no keys are in persistent storage copy: - src: "{{ appliances_state_dir }}/hostkeys/{{ inventory_hostname }}/" + src: "/mnt/hostkeys/{{ inventory_hostname }}/" dest: /etc/ssh/ remote_src: true @@ -23,7 +23,7 @@ - name: Persist hostkeys copy: - dest: "{{ appliances_state_dir }}/hostkeys/{{ inventory_hostname }}/" + dest: "/mnt/hostkeys/{{ inventory_hostname }}/" src: "{{ item }}" remote_src: true mode: preserve diff --git a/environments/.caas/inventory/extra_groups b/environments/.caas/inventory/extra_groups index d60ae7839..9feead562 100644 --- a/environments/.caas/inventory/extra_groups +++ b/environments/.caas/inventory/extra_groups @@ -15,5 +15,3 @@ compute [podman:children] zenith -[persist_hostkeys:children] -openondemand diff --git a/environments/.caas/inventory/group_vars/all/nfs.yml b/environments/.caas/inventory/group_vars/all/nfs.yml index 14fff6295..c19b32644 100644 --- a/environments/.caas/inventory/group_vars/all/nfs.yml +++ b/environments/.caas/inventory/group_vars/all/nfs.yml @@ -1,20 +1 @@ -nfs_server: "{{ nfs_server_default }}" - -caas_nfs_ood_state: - - comment: Export /var/lib/state from Slurm control node to OOD - nfs_enable: - server: "{{ inventory_hostname in groups['control'] }}" - clients: "{{ inventory_hostname in groups['openondemand'] }}" - nfs_export: "{{ appliances_state_dir }}" - nfs_client_mnt_point: "{{ appliances_state_dir }}" - nfs_client_mnt_options: "x-systemd.required-by=zenith-ood.service,x-systemd.before=zenith-ood.service" - -caas_nfs_home: - - comment: Export /exports/home from Slurm control node as /home - nfs_enable: - server: "{{ inventory_hostname in groups['control'] }}" - clients: "{{ inventory_hostname in groups['cluster'] }}" - nfs_export: "/exports/home" # assumes skeleton TF is being used - nfs_client_mnt_point: "/home" - -nfs_configurations: "{{ caas_nfs_ood_state + (caas_nfs_home if not cluster_home_manila_share | bool else []) }}" +nfs_configurations: "{{ nfs_default_configs + (nfs_home_config if not cluster_home_manila_share | bool else []) }}" diff --git a/environments/common/inventory/group_vars/all/nfs.yml b/environments/common/inventory/group_vars/all/nfs.yml index 45b7c6967..21063d2e1 100644 --- a/environments/common/inventory/group_vars/all/nfs.yml +++ b/environments/common/inventory/group_vars/all/nfs.yml @@ -5,19 +5,32 @@ nfs_server_default: "{{ groups['control'] | first }}" # avoid using hostvars for compute-init -nfs_configurations: - - comment: Export /exports/home from Slurm control node as /home - nfs_enable: - server: "{{ inventory_hostname in groups['control'] }}" - # Don't mount share on server where it is exported from... - # Could do something like `nfs_clients: "{{ 'nfs_servers' not in group_names }}"` instead. - clients: "{{ inventory_hostname in groups['cluster'] and inventory_hostname not in groups['control'] }}" - nfs_server: "{{ nfs_server_default }}" - nfs_export: "/exports/home" # assumes skeleton TF is being used - nfs_client_mnt_point: "/home" - +nfs_default_configs: - comment: Export /exports/cluster from Slurm control node nfs_enable: server: "{{ inventory_hostname in groups['control'] }}" clients: false nfs_export: "/exports/cluster" + + - comment: Export hostkeys from Slurm control node to OOD + nfs_enable: + server: "{{ inventory_hostname in groups['control'] }}" + clients: "{{ inventory_hostname in groups['openondemand'] or inventory_hostname in groups['login'] }}" + nfs_export: "{{ appliances_state_dir | default('/var/lib/state') }}/hostkeys" # needs to be defaulted to run but should be skipped on clients where appliances_state_dir is otherwise undefined + nfs_server: "{{ nfs_server_default }}" + nfs_client_mnt_point: "/mnt/hostkeys" + nfs_client_mnt_options: "x-systemd.required-by=zenith-ood.service,x-systemd.before=zenith-ood.service" + +# Separated to be overridable in caas +nfs_home_config: +- comment: Export /exports/home from Slurm control node as /home + nfs_enable: + server: "{{ inventory_hostname in groups['control'] }}" + # Don't mount share on server where it is exported from... + # Could do something like `nfs_clients: "{{ 'nfs_servers' not in group_names }}"` instead. + clients: "{{ inventory_hostname in groups['cluster'] and inventory_hostname not in groups['control'] }}" + nfs_server: "{{ nfs_server_default }}" + nfs_export: "/exports/home" # assumes skeleton TF is being used + nfs_client_mnt_point: "/home" + +nfs_configurations: "{{ nfs_default_configs + nfs_home_config }}" diff --git a/environments/common/layouts/everything b/environments/common/layouts/everything index 878bebbf3..5c15965a5 100644 --- a/environments/common/layouts/everything +++ b/environments/common/layouts/everything @@ -69,8 +69,10 @@ openhpc [manila] # Hosts to configure for manila fileshares -[persist_hostkeys] +[persist_hostkeys:children] # Hosts to persist hostkeys for across reimaging. NB: Requires appliances_state_dir on hosts. +login +openondemand [squid] # Hosts to run squid proxy diff --git a/environments/skeleton/{{cookiecutter.environment}}/terraform/inventory.tpl b/environments/skeleton/{{cookiecutter.environment}}/terraform/inventory.tpl index 22642e9a5..ccda5946e 100644 --- a/environments/skeleton/{{cookiecutter.environment}}/terraform/inventory.tpl +++ b/environments/skeleton/{{cookiecutter.environment}}/terraform/inventory.tpl @@ -20,6 +20,12 @@ login: ansible_host: ${[for n in login.network: n.fixed_ip_v4 if n.access_network][0]} instance_id: ${ login.id } %{ endfor ~} + vars: + appliances_state_dir: ${state_dir} + +openondemand: + vars: + appliances_state_dir: ${state_dir} %{ for group_name in keys(compute_groups) ~} ${cluster_name}_${group_name}: