diff --git a/topics/admin/tutorials/celery/tutorial.md b/topics/admin/tutorials/celery/tutorial.md index 4ee0f41f0c5cce..c0b93aa9655d98 100644 --- a/topics/admin/tutorials/celery/tutorial.md +++ b/topics/admin/tutorials/celery/tutorial.md @@ -149,7 +149,7 @@ First we need to add our new Ansible Roles to the `requirements.yml`: > ```diff > --- a/group_vars/galaxyservers.yml > +++ b/group_vars/galaxyservers.yml -> @@ -285,3 +285,7 @@ rabbitmq_users: +> @@ -286,3 +286,7 @@ rabbitmq_users: > # TUS > galaxy_tusd_port: 1080 > galaxy_tus_upload_store: /data/tus @@ -248,7 +248,7 @@ First we need to add our new Ansible Roles to the `requirements.yml`: > ```diff > --- a/group_vars/galaxyservers.yml > +++ b/group_vars/galaxyservers.yml -> @@ -272,6 +272,7 @@ rabbitmq_config: +> @@ -273,6 +273,7 @@ rabbitmq_config: > > rabbitmq_vhosts: > - /pulsar/pulsar_au @@ -256,7 +256,7 @@ First we need to add our new Ansible Roles to the `requirements.yml`: > > rabbitmq_users: > - user: admin -> @@ -281,6 +282,13 @@ rabbitmq_users: +> @@ -282,6 +283,13 @@ rabbitmq_users: > - user: pulsar_au > password: "{{ vault_rabbitmq_password_vhost }}" > vhost: /pulsar/pulsar_au @@ -299,7 +299,7 @@ First we need to add our new Ansible Roles to the `requirements.yml`: > ```diff > --- a/group_vars/galaxyservers.yml > +++ b/group_vars/galaxyservers.yml -> @@ -297,3 +297,22 @@ galaxy_tus_upload_store: /data/tus +> @@ -298,3 +298,22 @@ galaxy_tus_upload_store: /data/tus > #Redis > galaxy_additional_venv_packages: > - redis diff --git a/topics/admin/tutorials/connect-to-compute-cluster/tutorial.md b/topics/admin/tutorials/connect-to-compute-cluster/tutorial.md index 3100d0dd343163..942706bac05f75 100644 --- a/topics/admin/tutorials/connect-to-compute-cluster/tutorial.md +++ b/topics/admin/tutorials/connect-to-compute-cluster/tutorial.md @@ -151,7 +151,7 @@ be taken into consideration when choosing where to run jobs and what parameters > ```diff > --- a/group_vars/galaxyservers.yml > +++ b/group_vars/galaxyservers.yml -> @@ -197,6 +197,16 @@ nginx_ssl_role: usegalaxy_eu.certbot +> @@ -197,6 +197,17 @@ nginx_ssl_role: usegalaxy_eu.certbot > nginx_conf_ssl_certificate: /etc/ssl/certs/fullchain.pem > nginx_conf_ssl_certificate_key: /etc/ssl/user/privkey-www-data.pem > @@ -160,9 +160,10 @@ be taken into consideration when choosing where to run jobs and what parameters > +slurm_nodes: > +- name: localhost # Name of our host > + CPUs: 2 # Here you would need to figure out how many cores your machine has. For this training we will use 2 but in real life, look at `htop` or similar. +> + RealMemory: 8192 # Adjust based on available memory. For this training 8192 is sufficient. > +slurm_config: > + SlurmdParameters: config_overrides # Ignore errors if the host actually has cores != 2 -> + SelectType: select/cons_res +> + SelectType: select/cons_tres > + SelectTypeParameters: CR_CPU_Memory # Allocate individual cores/memory instead of entire node > + > # TUS diff --git a/topics/admin/tutorials/ftp/tutorial.md b/topics/admin/tutorials/ftp/tutorial.md index ef325cf056e18d..5986485dcd90d6 100644 --- a/topics/admin/tutorials/ftp/tutorial.md +++ b/topics/admin/tutorials/ftp/tutorial.md @@ -159,7 +159,7 @@ If the terms "Ansible", "role" and "playbook" mean nothing to you, please checko > ```diff > --- a/group_vars/galaxyservers.yml > +++ b/group_vars/galaxyservers.yml -> @@ -369,3 +369,24 @@ telegraf_plugins_extra: +> @@ -370,3 +370,24 @@ telegraf_plugins_extra: > tiaas_dir: /srv/tiaas > tiaas_admin_user: admin > tiaas_admin_pass: changeme diff --git a/topics/admin/tutorials/job-destinations/tutorial.md b/topics/admin/tutorials/job-destinations/tutorial.md index 233aa9c677dcf1..6543311db0612c 100644 --- a/topics/admin/tutorials/job-destinations/tutorial.md +++ b/topics/admin/tutorials/job-destinations/tutorial.md @@ -361,7 +361,7 @@ We want our tool to run with more than one core. To do this, we need to instruct > + runner: slurm > + max_accepted_cores: 16 > + params: -> + native_specification: --nodes=1 --ntasks=1 --cpus-per-task={cores} +> + native_specification: --nodes=1 --ntasks=1 --cpus-per-task={cores} --mem={round(mem*1024)} > + > {% endraw %} > ``` @@ -373,8 +373,8 @@ We want our tool to run with more than one core. To do this, we need to instruct > > Destinations must also be defined in TPV itself. Importantly, note that any destinations defined in the job conf are ignored by TPV. Therefore, we have moved all destinations from the job conf to TPV. In addition, we have removed some > redundancy by using the "inherits" clause in the `slurm` destination. This means that slurm will inherit all of the settings defined for singularity, but selectively override some settings. We have additionally -> defined the `native_specification` param for SLURM, which is what SLURM uses to allocate resources per job. Note the use of the `{cores}` -> parameter within the native specification, which TPV will replace at runtime with the value of cores assigned to the tool. +> defined the `native_specification` param for SLURM, which is what SLURM uses to allocate resources per job. Note the use of the `{cores}` and `{mem}` +> parameter within the native specification, which TPV will replace at runtime with the value of cores and memory assigned to the tool. > > Finally, we have also defined a new property named `max_accepted_cores`, which is the maximum amount of cores this destination will accept. Since the testing tool requests 2 cores, but only the `slurm` > destination is able to accept jobs greater than 1 core, TPV will automatically route the job to the best matching destination, in this case, slurm. @@ -441,7 +441,7 @@ Now that we've configured the resource requirements for a single tool, let's see > @@ -27,4 +34,3 @@ destinations: > max_accepted_cores: 16 > params: -> native_specification: --nodes=1 --ntasks=1 --cpus-per-task={cores} +> native_specification: --nodes=1 --ntasks=1 --cpus-per-task={cores} --mem={round(mem*1024)} > - > {% endraw %} > ``` @@ -513,7 +513,7 @@ on settings that have worked well in the usegalaxy.* federation. The rule file c > + max_cores: 2 > + max_mem: 8 > params: -> native_specification: --nodes=1 --ntasks=1 --cpus-per-task={cores} +> native_specification: --nodes=1 --ntasks=1 --cpus-per-task={cores} --mem={round(mem*1024)} > {% endraw %} > ``` > {: data-commit="TPV clamp max cores and mem"} @@ -728,8 +728,8 @@ Lastly, we need to write a rule in TPV that will read the value of the job resou > max_cores: 2 > max_mem: 8 > params: -> - native_specification: --nodes=1 --ntasks=1 --cpus-per-task={cores} -> + native_specification: --nodes=1 --ntasks=1 --cpus-per-task={cores} --time={params['walltime']}:00:00 +> - native_specification: --nodes=1 --ntasks=1 --cpus-per-task={cores} --mem={round(mem*1024)} +> + native_specification: --nodes=1 --ntasks=1 --cpus-per-task={cores} --mem={round(mem*1024)} --time={entity.params['walltime']}:00:00 > {% endraw %} > ``` > {: data-commit="process resource params in TPV"} diff --git a/topics/admin/tutorials/monitoring/tutorial.md b/topics/admin/tutorials/monitoring/tutorial.md index 0a76d3f341ceef..5aad25384fc63a 100644 --- a/topics/admin/tutorials/monitoring/tutorial.md +++ b/topics/admin/tutorials/monitoring/tutorial.md @@ -386,7 +386,7 @@ Setting up Telegraf is again very simple. We just add a single role to our playb > ```diff > --- a/group_vars/galaxyservers.yml > +++ b/group_vars/galaxyservers.yml -> @@ -333,3 +333,12 @@ flower_ui_users: +> @@ -334,3 +334,12 @@ flower_ui_users: > > flower_environment_variables: > GALAXY_CONFIG_FILE: "{{ galaxy_config_file }}" @@ -809,7 +809,7 @@ You can run the playbook now, or wait until you have configured Telegraf below: > ```diff > --- a/group_vars/galaxyservers.yml > +++ b/group_vars/galaxyservers.yml -> @@ -345,3 +345,10 @@ telegraf_plugins_extra: +> @@ -346,3 +346,10 @@ telegraf_plugins_extra: > - service_address = ":8125" > - metric_separator = "." > - allowed_pending_messages = 10000 diff --git a/topics/admin/tutorials/pulsar/tutorial.md b/topics/admin/tutorials/pulsar/tutorial.md index 966fc73890f70f..b58d8d8a5f6e77 100644 --- a/topics/admin/tutorials/pulsar/tutorial.md +++ b/topics/admin/tutorials/pulsar/tutorial.md @@ -306,8 +306,8 @@ More information about the rabbitmq ansible role can be found [in the repository > certbot_domains: > - "{{ inventory_hostname }}" > certbot_agree_tos: --agree-tos -> @@ -228,6 +231,47 @@ slurm_config: -> SelectType: select/cons_res +> @@ -229,6 +232,47 @@ slurm_config: +> SelectType: select/cons_tres > SelectTypeParameters: CR_CPU_Memory # Allocate individual cores/memory instead of entire node > > +#Install pip docker package for ansible @@ -807,7 +807,7 @@ For this tutorial, we will configure Galaxy to run the BWA and BWA-MEM tools on > @@ -54,3 +54,18 @@ destinations: > max_mem: 8 > params: -> native_specification: --nodes=1 --ntasks=1 --cpus-per-task={cores} --time={params['walltime']}:00:00 +> native_specification: --nodes=1 --ntasks=1 --cpus-per-task={cores} --mem={round(mem*1024)} --time={entity.params['walltime']}:00:00 > + > + pulsar: > + runner: pulsar_runner diff --git a/topics/admin/tutorials/tiaas/tutorial.md b/topics/admin/tutorials/tiaas/tutorial.md index 321ed58e92731f..6e06b87287e286 100644 --- a/topics/admin/tutorials/tiaas/tutorial.md +++ b/topics/admin/tutorials/tiaas/tutorial.md @@ -103,7 +103,7 @@ This tutorial will go cover how to set up such a service on your own Galaxy serv > ```diff > --- a/group_vars/galaxyservers.yml > +++ b/group_vars/galaxyservers.yml -> @@ -352,3 +352,8 @@ telegraf_plugins_extra: +> @@ -353,3 +353,8 @@ telegraf_plugins_extra: > - timeout = "10s" > - data_format = "influx" > - interval = "15s" @@ -341,7 +341,7 @@ In order to achieve this, we first need some way to sort the jobs of the trainin > @@ -62,6 +71,19 @@ destinations: > max_mem: 8 > params: -> native_specification: --nodes=1 --ntasks=1 --cpus-per-task={cores} --time={params['walltime']}:00:00 +> native_specification: --nodes=1 --ntasks=1 --cpus-per-task={cores} --mem={round(mem*1024)} --time={entity.params['walltime']}:00:00 > + slurm-training: > + inherits: singularity > + runner: slurm