Skip to content

Commit 7a21cf4

Browse files
committed
Merge branch 'fix_wiki_gen_exotic' into 'master'
[gen/wiki] Fix wiki links to exotic clusters by simplifying hardware page headings See merge request grid5000/reference-repository!156
2 parents ddde9b7 + 1ded918 commit 7a21cf4

File tree

2 files changed

+68
-51
lines changed

2 files changed

+68
-51
lines changed

lib/refrepo/gen/wiki/generators/hardware.rb

Lines changed: 3 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -381,9 +381,7 @@ def generate_storage
381381
((!d['reservation'].nil? && d['reservation']) ? '[[Disk_reservation|*]]' : '')
382382
}.join(', ') + ")"
383383
end
384-
queues = cluster_hash['queues'] - ['admin', 'default']
385-
queue_t = (queues.nil? || (queues.empty? ? '' : "_.28" + queues[0].gsub(' ', '_') + ' queue.29'))
386-
nodes_data << { 'uid' => node_uid, 'data' => { 'main' => maindisk_t, 'hdd' => hdd_t, 'ssd' => ssd_t, 'reservation' => reservable_disks, 'queue' => queue_t } }
384+
nodes_data << { 'uid' => node_uid, 'data' => { 'main' => maindisk_t, 'hdd' => hdd_t, 'ssd' => ssd_t, 'reservation' => reservable_disks } }
387385
end
388386
nd = nodes_data.group_by { |d| d['data'] }
389387
nd.each do |data, nodes|
@@ -396,7 +394,7 @@ def generate_storage
396394
end
397395
table_data << [
398396
"[[#{site_uid.capitalize}:Hardware|#{site_uid.capitalize}]]",
399-
"[[#{site_uid.capitalize}:Hardware##{cluster_uid}#{data['queue']}|#{nodesetname}]]",
397+
"[[#{site_uid.capitalize}:Hardware##{cluster_uid}|#{nodesetname}]]",
400398
nodes.length,
401399
data['main'],
402400
data['hdd'],
@@ -445,7 +443,7 @@ def generate_interfaces
445443
network_interfaces.sort.to_h.each { |num, interfaces|
446444
table_data << [
447445
"[[#{site_uid.capitalize}:Network|#{site_uid.capitalize}]]",
448-
"[[#{site_uid.capitalize}:Hardware##{cluster_uid}" + (interfaces['queues'] == '' ? '' : "_.28#{queues.gsub(' ', '_')}.29") + "|#{cluster_uid}" + (network_interfaces.size==1 ? '' : '-' + G5K.nodeset(num)) + "]]",
446+
"[[#{site_uid.capitalize}:Hardware##{cluster_uid}" + "|#{cluster_uid}" + (network_interfaces.size==1 ? '' : '-' + G5K.nodeset(num)) + "]]",
449447
num.count,
450448
interfaces['25g_count'].zero? ? '' : interfaces['25g_count'],
451449
interfaces['10g_count'].zero? ? '' : interfaces['10g_count'],

lib/refrepo/gen/wiki/generators/site_hardware.rb

Lines changed: 65 additions & 46 deletions
Original file line numberDiff line numberDiff line change
@@ -77,14 +77,17 @@ def self.generate_summary_data(site, with_sites)
7777
hardware[site].sort.to_h.each { |cluster_uid, cluster_hash|
7878
cluster_nodes = cluster_hash.keys.flatten.count
7979
queue = cluster_hash.map { |k, v| v['queue']}.first
80-
queue_str = cluster_hash.map { |k, v| v['queue_str']}.first
8180
access_conditions = []
82-
access_conditions << "<b>#{queue}</b>&nbsp;queue" if queue != ''
83-
access_conditions << '<b>exotic</b>&nbsp;job&nbsp;type' if cluster_hash.map { |k, v| v['exotic']}.first
81+
if queue == 'production'
82+
access_conditions << "<b>[[Grid5000:UsagePolicy#Rules_for_the_production_queue|#{queue}]]</b>&nbsp;queue"
83+
elsif queue != ''
84+
access_conditions << "<b>#{queue}</b>&nbsp;queue"
85+
end
86+
access_conditions << '<b>[[Getting_Started#Selecting_specific_resources|exotic]]</b>&nbsp;job&nbsp;type' if cluster_hash.map { |k, v| v['exotic']}.first
8487
table_columns = (with_sites == true ? ['Site'] : []) + ['Cluster', 'Access Condition', 'Date of arrival', { attributes: 'data-sort-type="number"', text: 'Nodes' }, 'CPU', { attributes: 'data-sort-type="number"', text: 'Cores' }, { attributes: 'data-sort-type="number"', text: 'Memory' }, { attributes: 'data-sort-type="number"', text: 'Storage' }, { attributes: 'data-sort-type="number"', text: 'Network' }] + ((site_accelerators.zero? && with_sites == false) ? [] : ['Accelerators'])
8588
data = partition(cluster_hash)
8689
table_data << (with_sites == true ? ["[[#{site.capitalize}:Hardware|#{site.capitalize}]]"] : []) + [
87-
(with_sites == true ? "[[#{site.capitalize}:Hardware##{cluster_uid}" + (queue_str == '' ? '' : "_.28#{queue_str.gsub(' ', '_')}.29") + "|#{cluster_uid}]]" : "[[##{cluster_uid}" + (queue_str == '' ? '' : "_.28#{queue_str.gsub(' ', '_')}.29") + "|#{cluster_uid}]]"),
90+
(with_sites == true ? "[[#{site.capitalize}:Hardware##{cluster_uid}" + "|#{cluster_uid}]]" : "[[##{cluster_uid}" + "|#{cluster_uid}]]"),
8891
access_conditions.join(",<br/>"),
8992
cell_data(data, 'date'),
9093
cluster_nodes,
@@ -109,53 +112,69 @@ def self.generate_description(site)
109112
site_accelerators += cluster_hash.select { |k, v| v['accelerators'] != '' }.count
110113
}
111114

112-
hardware[site].sort.to_h.each { |cluster_uid, cluster_hash|
113-
subclusters = cluster_hash.keys.count != 1
114-
cluster_nodes = cluster_hash.keys.flatten.count
115-
cluster_cpus = cluster_hash.map { |k, v| k.count * v['cpus_per_node'] }.reduce(:+)
116-
cluster_cores = cluster_hash.map { |k, v| k.count * v['cpus_per_node'] * v['cores_per_cpu'] }.reduce(:+)
117-
queue_str = cluster_hash.map { |k, v| v['queue_str']}.first
118-
access_conditions = []
119-
access_conditions << queue_str if queue_str != ''
120-
access_conditions << "exotic job type" if cluster_hash.map { |k, v| v['exotic']}.first
121-
table_columns = ['Cluster', 'Queue', 'Date of arrival', { attributes: 'data-sort-type="number"', text: 'Nodes' }, 'CPU', { attributes: 'data-sort-type="number"', text: 'Cores' }, { attributes: 'data-sort-type="number"', text: 'Memory' }, { attributes: 'data-sort-type="number"', text: 'Storage' }, { attributes: 'data-sort-type="number"', text: 'Network' }] + (site_accelerators.zero? ? [] : ['Accelerators'])
122-
123-
text_data << ["\n== #{cluster_uid}" + (access_conditions.empty? ? '' : " (#{access_conditions.join(", ")})") + " ==\n"]
124-
text_data << ["'''#{cluster_nodes} #{G5K.pluralize(cluster_nodes, 'node')}, #{cluster_cpus} #{G5K.pluralize(cluster_cpus, 'cpu')}, #{cluster_cores} #{G5K.pluralize(cluster_cores, 'core')}" + (subclusters == true ? ",''' split as follows due to differences between nodes " : "''' ") + "([https://public-api.grid5000.fr/stable/sites/#{site}/clusters/#{cluster_uid}/nodes.json?pretty=1 json])"]
125-
126-
cluster_hash.sort.to_h.each_with_index { |(num, h), i|
127-
if subclusters
128-
subcluster_nodes = num.count
129-
subcluster_cpus = subcluster_nodes * h['cpus_per_node']
130-
subcluster_cores = subcluster_nodes * h['cpus_per_node'] * h['cores_per_cpu']
131-
text_data << "<hr style=\"height:10pt; visibility:hidden;\" />\n" if i != 0 # smaller vertical <br />
132-
text_data << ["; #{cluster_uid}-#{G5K.nodeset(num)} (#{subcluster_nodes} #{G5K.pluralize(subcluster_nodes, 'node')}, #{subcluster_cpus} #{G5K.pluralize(subcluster_cpus, 'cpu')}, #{subcluster_cores} #{G5K.pluralize(subcluster_cores, 'core')})"]
133-
end
115+
# Group by queue
116+
# Alphabetic ordering of queue names matches what we want: "default" < "production" < "testing"
117+
hardware[site].group_by { |cluster_uid, cluster_hash| cluster_hash.map { |k, v| v['queue']}.first }.sort.each { |queue, clusters|
118+
queue = (queue.nil? || queue.empty?) ? 'default' : queue
119+
text_data << "\n= Clusters in #{queue} queue ="
120+
clusters.sort.to_h.each { |cluster_uid, cluster_hash|
121+
subclusters = cluster_hash.keys.count != 1
122+
cluster_nodes = cluster_hash.keys.flatten.count
123+
cluster_cpus = cluster_hash.map { |k, v| k.count * v['cpus_per_node'] }.reduce(:+)
124+
cluster_cores = cluster_hash.map { |k, v| k.count * v['cpus_per_node'] * v['cores_per_cpu'] }.reduce(:+)
125+
queue_str = cluster_hash.map { |k, v| v['queue_str']}.first
126+
access_conditions = []
127+
access_conditions << queue_str if queue_str != ''
128+
access_conditions << "exotic job type" if cluster_hash.map { |k, v| v['exotic']}.first
129+
table_columns = ['Cluster', 'Queue', 'Date of arrival', { attributes: 'data-sort-type="number"', text: 'Nodes' }, 'CPU', { attributes: 'data-sort-type="number"', text: 'Cores' }, { attributes: 'data-sort-type="number"', text: 'Memory' }, { attributes: 'data-sort-type="number"', text: 'Storage' }, { attributes: 'data-sort-type="number"', text: 'Network' }] + (site_accelerators.zero? ? [] : ['Accelerators'])
130+
131+
text_data << ["\n== #{cluster_uid} ==\n"]
132+
text_data << ["'''#{cluster_nodes} #{G5K.pluralize(cluster_nodes, 'node')}, #{cluster_cpus} #{G5K.pluralize(cluster_cpus, 'cpu')}, #{cluster_cores} #{G5K.pluralize(cluster_cores, 'core')}" + (subclusters == true ? ",''' split as follows due to differences between nodes " : "''' ") + "([https://public-api.grid5000.fr/stable/sites/#{site}/clusters/#{cluster_uid}/nodes.json?pretty=1 json])"]
133+
134+
reservation_cmd = "\n{{Term|location=f#{site}|cmd="
135+
reservation_cmd += "<code class=\"command\">oarsub</code> "
136+
reservation_cmd += "<code class=\"replace\">-q #{queue}</code> " if queue != 'default'
137+
reservation_cmd += "<code class=\"replace\">-t exotic</code> " if cluster_hash.map { |k, v| v['exotic']}.first
138+
reservation_cmd += "<code class=\"env\">-p \"cluster='#{cluster_uid}'\"</code> "
139+
reservation_cmd += "<code>-I</code>"
140+
reservation_cmd += "}}\n"
141+
text_data << "\n'''Reservation example:'''"
142+
text_data << reservation_cmd
143+
144+
cluster_hash.sort.to_h.each_with_index { |(num, h), i|
145+
if subclusters
146+
subcluster_nodes = num.count
147+
subcluster_cpus = subcluster_nodes * h['cpus_per_node']
148+
subcluster_cores = subcluster_nodes * h['cpus_per_node'] * h['cores_per_cpu']
149+
text_data << "<hr style=\"height:10pt; visibility:hidden;\" />\n" if i != 0 # smaller vertical <br />
150+
text_data << ["; #{cluster_uid}-#{G5K.nodeset(num)} (#{subcluster_nodes} #{G5K.pluralize(subcluster_nodes, 'node')}, #{subcluster_cpus} #{G5K.pluralize(subcluster_cpus, 'cpu')}, #{subcluster_cores} #{G5K.pluralize(subcluster_cores, 'core')})"]
151+
end
134152

135-
accelerators = nil
136-
if h['gpu_str'] != '' && h['mic_str'] != ''
137-
accelerators = 'GPU/Xeon Phi'
138-
elsif h['gpu_str'] != ''
139-
accelerators = 'GPU'
140-
elsif h['mic_str'] != ''
141-
accelerators = 'Xeon Phi'
142-
end
143-
hash = {
144-
'Model' => h['model'],
145-
'Date of arrival' => h['date'],
146-
'CPU' => h['processor_description'],
147-
'Memory' => h['ram_size'] + (!h['pmem_size'].nil? ? " + #{h['pmem_size']} [[PMEM]]" : ''),
148-
'Storage' => h['storage_description'],
149-
'Network' => h['network_description'],
153+
accelerators = nil
154+
if h['gpu_str'] != '' && h['mic_str'] != ''
155+
accelerators = 'GPU/Xeon Phi'
156+
elsif h['gpu_str'] != ''
157+
accelerators = 'GPU'
158+
elsif h['mic_str'] != ''
159+
accelerators = 'Xeon Phi'
160+
end
161+
hash = {}
162+
hash['Access condition'] = access_conditions.join(", ") if not access_conditions.empty?
163+
hash.merge!({
164+
'Model' => h['model'],
165+
'Date of arrival' => h['date'],
166+
'CPU' => h['processor_description'],
167+
'Memory' => h['ram_size'] + (!h['pmem_size'].nil? ? " + #{h['pmem_size']} [[PMEM]]" : ''),
168+
'Storage' => h['storage_description'],
169+
'Network' => h['network_description'],
170+
})
171+
hash[accelerators] = h['accelerators_long'] if accelerators
172+
text_data << MW::generate_hash_table(hash)
150173
}
151-
hash[accelerators] = h['accelerators_long'] if accelerators
152-
text_data << MW::generate_hash_table(hash)
153174
}
154175
}
155176

156-
generated_content = "\n= Cluster details =\n"
157-
generated_content += text_data.join("\n")
158-
return generated_content
177+
return text_data.join("\n")
159178
end
160179
end
161180

0 commit comments

Comments
 (0)