|
1 | 1 | require 'spec_helper' |
2 | 2 |
|
3 | 3 | describe 'aws-parallelcluster-environment::login_nodes_keys' do |
| 4 | + SHARED_DIR_LOGIN_NODES = "/SHARED_DIR_LOGIN_NODES".freeze |
| 5 | + SYNC_FILE = "#{SHARED_DIR_LOGIN_NODES}/.login_nodes_keys_sync_file".freeze |
| 6 | + CLUSTER_CONFIG_VERSION = "CLUSTER_CONFIG_VERSION".freeze |
| 7 | + |
4 | 8 | for_all_oses do |platform, version| |
5 | 9 | context "on #{platform}#{version}" do |
6 | 10 | context "when awsbatch scheduler" do |
7 | 11 | cached(:chef_run) do |
8 | 12 | runner = runner(platform: platform, version: version) do |node| |
9 | 13 | node.override['cluster']['scheduler'] = 'awsbatch' |
| 14 | + node.override['cluster']['shared_dir_login_nodes'] = SHARED_DIR_LOGIN_NODES |
10 | 15 | end |
11 | 16 | runner.converge(described_recipe) |
12 | 17 | end |
13 | 18 | cached(:node) { chef_run.node } |
14 | 19 |
|
15 | 20 | it 'does not create the script directory' do |
16 | | - is_expected.to_not create_directory("#{node['cluster']['shared_dir_login_nodes']}/scripts") |
| 21 | + is_expected.to_not create_directory("#{SHARED_DIR_LOGIN_NODES}/scripts") |
17 | 22 | end |
18 | 23 | end |
19 | 24 |
|
20 | 25 | context "when compute node" do |
21 | 26 | cached(:chef_run) do |
22 | 27 | runner = runner(platform: platform, version: version) do |node| |
23 | 28 | node.override['cluster']['node_type'] = 'ComputeFleet' |
| 29 | + node.override['cluster']['shared_dir_login_nodes'] = SHARED_DIR_LOGIN_NODES |
24 | 30 | end |
25 | 31 | runner.converge(described_recipe) |
26 | 32 | end |
27 | 33 | cached(:node) { chef_run.node } |
28 | 34 |
|
29 | 35 | it 'does not create the scripts directory' do |
30 | | - is_expected.to_not create_directory("#{node['cluster']['shared_dir_login_nodes']}/scripts") |
| 36 | + is_expected.to_not create_directory("#{SHARED_DIR_LOGIN_NODES}/scripts") |
31 | 37 | end |
32 | 38 | end |
33 | 39 |
|
|
36 | 42 | runner = runner(platform: platform, version: version) do |node| |
37 | 43 | node.override['cluster']['node_type'] = 'HeadNode' |
38 | 44 | node.override['cluster']['scheduler'] = 'slurm' |
39 | | - node.override['cluster']['shared_dir_login_nodes'] = '/opt/parallelcluster/shared_login_nodes' |
| 45 | + node.override['cluster']['shared_dir_login_nodes'] = SHARED_DIR_LOGIN_NODES |
| 46 | + node.override['cluster']['cluster_config_version'] = CLUSTER_CONFIG_VERSION |
40 | 47 | end |
41 | 48 | runner.converge(described_recipe) |
42 | 49 | end |
43 | 50 | cached(:node) { chef_run.node } |
44 | 51 |
|
45 | 52 | it 'creates the scripts directory' do |
46 | | - is_expected.to create_directory("#{node['cluster']['shared_dir_login_nodes']}/scripts").with( |
| 53 | + is_expected.to create_directory("#{SHARED_DIR_LOGIN_NODES}/scripts").with( |
47 | 54 | owner: 'root', |
48 | 55 | group: 'root', |
49 | 56 | mode: '0744' |
50 | 57 | ) |
51 | 58 | end |
52 | 59 |
|
53 | 60 | it 'creates keys-manager.sh script' do |
54 | | - is_expected.to create_cookbook_file("#{node['cluster']['shared_dir_login_nodes']}/scripts/keys-manager.sh").with( |
| 61 | + is_expected.to create_cookbook_file("/SHARED_DIR_LOGIN_NODES/scripts/keys-manager.sh").with( |
55 | 62 | source: 'login_nodes/keys-manager.sh', |
56 | 63 | owner: "root", |
57 | 64 | group: "root", |
|
61 | 68 |
|
62 | 69 | it "creates login nodes keys" do |
63 | 70 | is_expected.to run_execute("Initialize Login Nodes keys") |
64 | | - .with(command: "bash #{node['cluster']['shared_dir_login_nodes']}/scripts/keys-manager.sh --create --folder-path #{node['cluster']['shared_dir_login_nodes']}") |
| 71 | + .with(command: "bash #{SHARED_DIR_LOGIN_NODES}/scripts/keys-manager.sh --create --folder-path #{SHARED_DIR_LOGIN_NODES}") |
| 72 | + end |
| 73 | + |
| 74 | + it "writes the synchronization file for login nodes" do |
| 75 | + is_expected.to create_file(SYNC_FILE).with( |
| 76 | + content: CLUSTER_CONFIG_VERSION, |
| 77 | + mode: '0644', |
| 78 | + owner: 'root', |
| 79 | + group: 'root' |
| 80 | + ) |
65 | 81 | end |
66 | 82 | end |
67 | 83 |
|
|
70 | 86 | runner = runner(platform: platform, version: version) do |node| |
71 | 87 | node.override['cluster']['node_type'] = 'LoginNode' |
72 | 88 | node.override['cluster']['scheduler'] = 'slurm' |
73 | | - node.override['cluster']['shared_dir_login_nodes'] = '/opt/parallelcluster/shared_login_nodes' |
| 89 | + node.override['cluster']['shared_dir_login_nodes'] = SHARED_DIR_LOGIN_NODES |
| 90 | + node.override['cluster']['cluster_config_version'] = CLUSTER_CONFIG_VERSION |
74 | 91 | end |
75 | 92 | runner.converge(described_recipe) |
76 | 93 | end |
77 | 94 | cached(:node) { chef_run.node } |
78 | 95 |
|
| 96 | + it "waits for cluster config version file" do |
| 97 | + is_expected.to run_bash("Wait for synchronization file at #{SYNC_FILE} to be written for version #{CLUSTER_CONFIG_VERSION}").with( |
| 98 | + code: "[[ \"$(cat #{SYNC_FILE})\" == \"#{CLUSTER_CONFIG_VERSION}\" ]] || exit 1", |
| 99 | + retries: 30, |
| 100 | + retry_delay: 10, |
| 101 | + timeout: 5 |
| 102 | + ) |
| 103 | + end |
| 104 | + |
79 | 105 | it "imports login nodes keys" do |
80 | 106 | is_expected.to run_execute("Import Login Nodes keys") |
81 | | - .with(command: "bash #{node['cluster']['shared_dir_login_nodes']}/scripts/keys-manager.sh --import --folder-path #{node['cluster']['shared_dir_login_nodes']}") |
| 107 | + .with(command: "bash #{SHARED_DIR_LOGIN_NODES}/scripts/keys-manager.sh --import --folder-path #{SHARED_DIR_LOGIN_NODES}") |
82 | 108 | end |
83 | 109 | end |
84 | 110 | end |
|
0 commit comments