From ad6b3920bd230467f0e2921f8ebace571a46c310 Mon Sep 17 00:00:00 2001 From: Alex Domingo Date: Mon, 20 Feb 2023 12:15:25 +0100 Subject: [PATCH 1/2] update configuration of JupyterHub to rootless and userless container with tight SSH connection --- .../container/.config/jupyterhub_config.py | 317 ++++++++++++++++++ .../container/.config/templates/login.html | 58 ++++ .../container/.config/templates/page.html | 6 + jupyterhub/container/.ssh/config | 29 ++ jupyterhub/container/Dockerfile | 67 ++-- .../etc/jupyterhub/jupyterhub_config.py | 140 -------- jupyterhub/etc/subgid | 1 - jupyterhub/etc/subuid | 1 - .../etc/systemd/system/jupyterhub.service | 5 +- .../usr/local/bin/jupyterhub-init.sh | 30 +- .../etc/ssh/sshd_config} | 22 +- jupyterhub/slurm_login/etc/sudoers | 34 ++ 12 files changed, 501 insertions(+), 209 deletions(-) create mode 100644 jupyterhub/container/.config/jupyterhub_config.py create mode 100644 jupyterhub/container/.config/templates/login.html create mode 100644 jupyterhub/container/.config/templates/page.html create mode 100644 jupyterhub/container/.ssh/config delete mode 100644 jupyterhub/etc/jupyterhub/jupyterhub_config.py delete mode 100644 jupyterhub/etc/subgid delete mode 100644 jupyterhub/etc/subuid rename jupyterhub/{ => host}/etc/systemd/system/jupyterhub.service (91%) rename jupyterhub/{ => host}/usr/local/bin/jupyterhub-init.sh (77%) rename jupyterhub/{container/sudoers.conf => slurm_login/etc/ssh/sshd_config} (62%) create mode 100644 jupyterhub/slurm_login/etc/sudoers diff --git a/jupyterhub/container/.config/jupyterhub_config.py b/jupyterhub/container/.config/jupyterhub_config.py new file mode 100644 index 0000000..4aa6ca8 --- /dev/null +++ b/jupyterhub/container/.config/jupyterhub_config.py @@ -0,0 +1,317 @@ +# Copyright 2023 Vrije Universiteit Brussel +# +# This file is part of notebook-platform, +# originally created by the HPC team of Vrij Universiteit Brussel (http://hpc.vub.be), +# with support of Vrije Universiteit Brussel (http://www.vub.be), +# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be), +# the Flemish Research Foundation (FWO) (http://www.fwo.be/en) +# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en). +# +# https://github.com/vub-hpc/notebook-platform +# +# notebook-platform is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License v3 as published by +# the Free Software Foundation. +# +# notebook-platform is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +#------------------------------------------------------------------------------ +# Network configuration +#------------------------------------------------------------------------------ +# Listen on all interfaces +# proxy is in localhost, users are external and spawners are internal +c.JupyterHub.bind_url = 'https://0.0.0.0:8000' +c.JupyterHub.hub_ip = '0.0.0.0' +# IP address or hostname that spawners should use to connect to the Hub API +c.JupyterHub.hub_connect_ip = 'jupyterhub.internal.domain' + +#------------------------------------------------------------------------------ +# OAuthenticator configuration +# - use GenericOAuthenticator with the VSC account page +# - work without local VSC users in the JupyterHub container +# - enable SSL +#------------------------------------------------------------------------------ +from oauthenticator.generic import GenericOAuthenticator +c.JupyterHub.authenticator_class = GenericOAuthenticator + +# Oauth application secrets in the VSC account page +c.GenericOAuthenticator.login_service = 'VSC Account' +c.GenericOAuthenticator.client_id = 'SECRET' +c.GenericOAuthenticator.client_secret = 'SECRET' +c.GenericOAuthenticator.oauth_callback_url = 'https://notebooks.hpc.vub.be/hub/oauth_callback' +c.GenericOAuthenticator.scope = ['read'] + +# SSL certificates +c.JupyterHub.ssl_cert = '/home/jupyterhub/.ssl/jupyterhub.crt' +c.JupyterHub.ssl_key = '/home/jupyterhub/.ssl/jupyterhub.key' + +#------------------------------------------------------------------------------ +# Custom notebook spawner for VSC users +# - determine UID and home directory from VSC config +# - works without local VSC users +#------------------------------------------------------------------------------ +from jupyterhub_moss import MOSlurmSpawner, set_config +from traitlets import default +from vsc.config.base import DATA_KEY, HOME_KEY, VSC + +class VSCSlurmSpawner(MOSlurmSpawner): + """ + Spawner that derives user environment from vsc-config to not rely on local users + """ + vsc = VSC() + + def vsc_user_institute(self): + "return institute of VSC user" + vsc_uid = self.vsc.user_uid_institute_map[self.user.name[:3]][0] + int(self.user.name[3:]) + return self.vsc.user_id_to_institute(vsc_uid) + + @default("req_homedir") + def vsc_homedir(self): + "set default home directory to VSC_HOME" + vsc_user_paths = self.vsc.user_pathnames(self.user.name, self.vsc_user_institute()) + return vsc_user_paths[HOME_KEY] + + @default("notebook_dir") + def vsc_datadir(self): + "set default notebook root directory to VSC_DATA" + vsc_user_paths = self.vsc.user_pathnames(self.user.name, self.vsc_user_institute()) + return vsc_user_paths[DATA_KEY] + + def user_env(self, env): + """get VSC user environment""" + env["USER"] = self.user.name + env["SHELL"] = "/bin/bash" + env["HOME"] = self.req_homedir + env["JUPYTERHUB_ROOT_DIR"] = self.notebook_dir + return env + +#------------------------------------------------------------------------------ +# BatchSpawner configuration +# - use VSCSlurmSpawner +# - submit notebook job to Slurm by connecting with SSH to a login node +# - SSH connection stablished as JupyterHub operator +# - define job script parameters and commands launching the notebook +#------------------------------------------------------------------------------ +set_config(c) +c.JupyterHub.spawner_class = VSCSlurmSpawner +c.Spawner.start_timeout = 600 # seconds from job submit to job start +c.Spawner.http_timeout = 120 # seconds from job start to reachable single-user server + +# JupyterLab Environments in VUB +vub_lab_environments = { + "2022_default": { + # Text displayed for this environment select option + "description": "2022a: Python v3.10.4 + kernels (default)", + # Space separated list of modules to be loaded + "modules": "JupyterHub/2.3.1-GCCcore-11.3.0", + # Path to Python environment bin/ used to start jupyter on the Slurm nodes + "path": "", + # Toggle adding the environment to shell PATH (default: True) + "add_to_path": False, + }, + "2022_rstudio": { + "description": "2022a: Python v3.10.4 + RStudio", + "modules": ( + "JupyterHub/2.3.1-GCCcore-11.3.0 " + "jupyter-rsession-proxy/2.1.0-GCCcore-11.3.0 " + "RStudio-Server/2022.07.2+576-foss-2022a-Java-11-R-4.2.1 " + "IRkernel/1.3.2-foss-2022a-R-4.2.1 " + ), + "path": "", + "add_to_path": False, + }, + "2022_matlab": { + "description": "2022a: Python v3.10.4 + MATLAB", + "modules": ( + "MATLAB/2022a-r5 " + "JupyterHub/2.3.1-GCCcore-11.3.0 " + "jupyter-matlab-proxy/0.5.0-GCCcore-11.3.0 " + ), + "path": "", + "add_to_path": False, + }, + "2022_dask": { + "description": "2022a: Python v3.10.4 + dask", + "modules": ( + "JupyterHub/2.3.1-GCCcore-11.3.0 " + "dask-labextension/6.0.0-foss-2022a " + ), + "path": "", + "add_to_path": False, + }, + "2022_nglview": { + "description": "2022a: Python v3.10.4 + nglview", + "modules": ( + "JupyterHub/2.3.1-GCCcore-11.3.0 " + "nglview/3.0.3-foss-2022a " + ), + "path": "", + "add_to_path": False, + }, + "2021_default": { + "description": "2021a: Python v3.9.5 + kernels (default)", + "modules": "JupyterHub/2.3.1-GCCcore-10.3.0", + "path": "", + "add_to_path": False, + }, + "2021_rstudio": { + "description": "2021a: Python v3.9.5 + RStudio", + "modules": ( + "JupyterHub/2.3.1-GCCcore-10.3.0 " + "jupyter-rsession-proxy/2.1.0-GCCcore-10.3.0 " + "RStudio-Server/1.4.1717-foss-2021a-Java-11-R-4.1.0 " + "IRkernel/1.2-foss-2021a-R-4.1.0 " + ), + "path": "", + "add_to_path": False, + }, + "2021_matlab": { + "description": "2021a: Python v3.9.5 + MATLAB", + "modules": ( + "MATLAB/2021a " + "JupyterHub/2.3.1-GCCcore-10.3.0 " + "jupyter-matlab-proxy/0.3.4-GCCcore-10.3.0 " + "MATLAB-Kernel/0.17.1-GCCcore-10.3.0 " + ), + "path": "", + "add_to_path": False, + }, + "2021_dask": { + "description": "2021a: Python v3.9.5 + dask", + "modules": ( + "JupyterHub/2.3.1-GCCcore-10.3.0 " + "dask-labextension/5.3.1-foss-2021a " + ), + "path": "", + "add_to_path": False, + }, + "2021_nglview": { + "description": "2021a: Python v3.9.5 + nglview", + "modules": ( + "JupyterHub/2.3.1-GCCcore-10.3.0 " + "nglview/3.0.3-foss-2021a " + ), + "path": "", + "add_to_path": False, + }, +} + +# Partition descriptions +vub_partitions_hydra = { + "broadwell": { # Partition name + "architecture": "x86_86", # Nodes architecture + "description": "Intel Broadwell", # Displayed description + "max_runtime": 12*3600, # Maximum time limit in seconds (Must be at least 1hour) + "simple": True, # True to show in Simple tab + "jupyter_environments": vub_lab_environments, + }, + "skylake": { + "architecture": "x86_86", + "description": "Intel Skylake", + "max_runtime": 12*3600, + "simple": True, + "jupyter_environments": vub_lab_environments, + }, + "pascal_gpu": { + "architecture": "CUDA", + "description": "Nvidia Pascal P100", + "max_runtime": 6*3600, + "simple": True, + "jupyter_environments": vub_lab_environments, + }, + "skylake_mpi": { + "architecture": "x86_86", + "description": "Intel Skylake with InfiniBand", + "max_runtime": 6*3600, + "simple": False, + "jupyter_environments": vub_lab_environments, + }, +} + +vub_partitions_manticore = { + "ivybridge": { + "architecture": "x86_86", + "description": "Intel Ivybridge", + "max_runtime": 8*3600, + "simple": True, + "jupyter_environments": vub_lab_environments, + }, + "ampere_gpu": { + "architecture": "CUDA", + "description": "Nvidia Ampere", + "max_runtime": 8*3600, + "simple": True, + "jupyter_environments": vub_lab_environments, + }, + "skylake_mpi": { + "architecture": "x86_86", + "description": "Intel Skylake with InfiniBand", + "max_runtime": 4*3600, + "simple": False, + "jupyter_environments": vub_lab_environments, + }, +} + +c.MOSlurmSpawner.partitions = vub_partitions_hydra + +# Single-user serve job loads its own JupyterHub with batchspawner (for comms) +# plus either JupyterLab or JupyterNotebook +# Job environment is reset to an aseptic state avoiding user's customizations +c.BatchSpawnerBase.req_prologue = """ +function serialize_env(){ + # Pick all environment variables matching each given pattern + # output their definitions ready to be exported to the environment + for var_pattern in $@; do + var_pattern="^${var_pattern}=" + while read envar; do + # Protect contents of variables with printf %q because this job + # script is sent as standard input to sbatch through ssh and sudo + envar_name=${envar/=*} + printf "export %q=%q\n" "${envar_name}" "${!envar_name}" + done < <(env | grep "$var_pattern" ) + done +} + +# Launch notebook in aseptic environment +# note: the initial shell of the job script will evaluate the whole `exec env -i bash` +# command before its execution. This means that any variable ${} or command substitution $() +# in the input will be carried out before entering the minimal environment of `env -i`. +exec env -i bash --norc --noprofile < + .container #login-main { + width: 100%; + height: auto; + margin-top: 8ex; + margin-bottom: 8ex; + } + +
+
+
+

Notebook Platform of VUB-HPC

+

+ Welcome to the notebook platform of VUB-HPC. + This portal can be used by any VSC user + to manage and launch Jupyter notebooks + directly on the Tier-2 HPC cluster of VUB (Hydra). + Once you log in with your VSC account, you will be able to + select the computational + resources of your notebook session and start a + JupyterLab environment. +

+

+ Multiple Jupyter environments + are available using different versions of Python, different software + module generations or different lab extensions. + All JupyterLab environments in this platform are integrated with the + software module system + in our HPC cluster. This means that you can load and use in your notebooks the + same software packages used in your computational jobs. +

+

+ You will also find multiple kernels available for your notebooks, such as + Python, + R, + Julia or + MATLAB. + As well as the option to start other environments from your web + browser, such as RStudio or + MATLAB Desktop. +

+ {{ super() }} +
+
+

+ + VUB-HPC Logo + +

+

+ + VSC Logo + +

+
+
+{% endblock %} diff --git a/jupyterhub/container/.config/templates/page.html b/jupyterhub/container/.config/templates/page.html new file mode 100644 index 0000000..021a7ce --- /dev/null +++ b/jupyterhub/container/.config/templates/page.html @@ -0,0 +1,6 @@ +{% extends "templates/page.html" %} {% block nav_bar_right_items %} +
  • + Documentation +
  • +{{ super() }} +{% endblock %} diff --git a/jupyterhub/container/.ssh/config b/jupyterhub/container/.ssh/config new file mode 100644 index 0000000..0def1c9 --- /dev/null +++ b/jupyterhub/container/.ssh/config @@ -0,0 +1,29 @@ +# Copyright 2023 Vrije Universiteit Brussel +# +# This file is part of notebook-platform, +# originally created by the HPC team of Vrij Universiteit Brussel (http://hpc.vub.be), +# with support of Vrije Universiteit Brussel (http://www.vub.be), +# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be), +# the Flemish Research Foundation (FWO) (http://www.fwo.be/en) +# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en). +# +# https://github.com/vub-hpc/notebook-platform +# +# notebook-platform is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License v3 as published by +# the Free Software Foundation. +# +# notebook-platform is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# +### +# pass JupyterHub environment through the SSH connection to login node +Host login* + ForwardX11Trusted no + GSSAPIAuthentication no + SendEnv JUPYTERHUB_API_TOKEN JPY_API_TOKEN JUPYTERHUB_CLIENT_ID JUPYTERHUB_HOST JUPYTERHUB_API_URL JUPYTERHUB_OAUTH_CALLBACK_URL JUPYTERHUB_OAUTH_SCOPES JUPYTERHUB_USER JUPYTERHUB_SERVER_NAME JUPYTERHUB_ACTIVITY_URL JUPYTERHUB_BASE_URL JUPYTERHUB_SERVICE_PREFIX JUPYTERHUB_SERVICE_URL JUPYTERHUB_ROOT_DIR JUPYTERHUB_DEFAULT_URL +# indentity of login nodes is not known in advance + StrictHostKeyChecking no diff --git a/jupyterhub/container/Dockerfile b/jupyterhub/container/Dockerfile index 065d131..c3438c0 100644 --- a/jupyterhub/container/Dockerfile +++ b/jupyterhub/container/Dockerfile @@ -1,4 +1,4 @@ -# Copyright 2022 Vrije Universiteit Brussel +# Copyright 2023 Vrije Universiteit Brussel # # This file is part of notebook-platform, # originally created by the HPC team of Vrij Universiteit Brussel (http://hpc.vub.be), @@ -20,56 +20,57 @@ # ### # -# JupyterHub 2.0 + OAuthenticator + BatchSpawner +# JupyterHub 2.3 + Oauthenticator + batchspawner # based on https://github.com/jupyterhub/oauthenticator/blob/main/examples/full/Dockerfile -# meant to be run rootless +# JupyterHub run as non-root user -FROM jupyterhub/jupyterhub:2.0 +FROM jupyterhub/jupyterhub:2.3 MAINTAINER VUB-HPC -### System tools +# --- System tools --- RUN apt update # install SSH client for BatchSpawner RUN apt install -y --no-install-recommends openssh-client -# install sudo for BatchSpawner -RUN apt install -y sudo # make /bin/sh symlink to bash instead of dash RUN echo "dash dash/sh boolean false" | debconf-set-selections RUN DEBIAN_FRONTEND=noninteractive dpkg-reconfigure dash -### Operator non-root user and extra groups -COPY sudoers.conf /etc/sudoers -RUN groupadd --gid 2000 etcconf # special access to config files in bind folders -RUN groupadd --gid 2001 clients # can be impersonated with sudo -RUN useradd --shell /bin/bash --create-home --groups etcconf jupyterhub -# switch to non-root operator -USER jupyterhub -WORKDIR /home/jupyterhub +# --- Jupyter Hub extras --- +# the following steps need to open many files +# set DefaultLimitNOFILE=65536 in systemd -### Jupyter Hub extras -ENV PATH "/home/jupyterhub/.local/bin:$PATH" +RUN python3 -m pip install --upgrade pip +# install Oauthenticator +RUN python3 -m pip install oauthenticator +# install BatchSpawner and Modular Slurm Spawner (vub-hpc fork) +RUN python3 -m pip install https://github.com/vub-hpc/batchspawner/archive/refs/tags/v1.2.1.tar.gz +RUN python3 -m pip install https://github.com/vub-hpc/jupyterhub_moss/archive/refs/tags/v5.5.2.tar.gz +# install vsc-config +RUN python3 -m pip install vsc-base +COPY vsc-config-master.tar.gz /usr/local/src/ +RUN python3 -m pip install /usr/local/src/vsc-config-master.tar.gz +# install static resources for theming +COPY vub-hpc-logo-horiz-color.png /usr/local/share/jupyterhub/static/images/ +COPY vub-hpc-logo-square-color.png /usr/local/share/jupyterhub/static/images/ +COPY vsc-logo.png /usr/local/share/jupyterhub/static/images/ + +# --- JupyterHub operator: non-root user --- +# create user with same UID as outside of container +ARG operator_name +ARG operator_uid +RUN useradd --shell /bin/bash --create-home --uid $operator_uid $operator_name +# switch to operator +USER $operator_name +WORKDIR /home/$operator_name -# install oauthenticator -RUN python3 -m pip install --user oauthenticator +# --- Configuration --- +# oauthenticator RUN mkdir oauthenticator RUN chmod 700 oauthenticator - ENV OAUTHENTICATOR_DIR "/home/jupyterhub/oauthenticator" +# use Oauth of VSC account page ENV OAUTH2_AUTHORIZE_URL "https://account.vscentrum.be/django/oauth/authorize/" ENV OAUTH2_TOKEN_URL "https://account.vscentrum.be/django/oauth/token/" ENV OAUTH2_USERDATA_URL "https://account.vscentrum.be/django/oauth/current_vsc_user/" ENV OAUTH2_USERNAME_KEY "id" - -# install BatchSpawner -RUN python3 -m pip install --user batchspawner - -# install vsc-config (local sources) -RUN python3 -m pip install --user vsc-base -COPY vsc-config-master.tar.gz /usr/local/src/ -RUN python3 -m pip install --user /usr/local/src/vsc-config-master.tar.gz - -# adduser wrapper -RUN echo -e '#!/bin/sh\nsudo useradd $@' > .local/bin/useradd -RUN chmod 750 .local/bin/useradd - diff --git a/jupyterhub/etc/jupyterhub/jupyterhub_config.py b/jupyterhub/etc/jupyterhub/jupyterhub_config.py deleted file mode 100644 index 5fe947b..0000000 --- a/jupyterhub/etc/jupyterhub/jupyterhub_config.py +++ /dev/null @@ -1,140 +0,0 @@ -# Copyright 2022 Vrije Universiteit Brussel -# -# This file is part of notebook-platform, -# originally created by the HPC team of Vrij Universiteit Brussel (http://hpc.vub.be), -# with support of Vrije Universiteit Brussel (http://www.vub.be), -# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be), -# the Flemish Research Foundation (FWO) (http://www.fwo.be/en) -# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en). -# -# https://github.com/vub-hpc/notebook-platform -# -# notebook-platform is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License v3 as published by -# the Free Software Foundation. -# -# notebook-platform is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -### -# -# Configuration file for JupyterHub integrated into VUB-HPC clusters -# - Authentication with VSC account page (OAuthenticator) -# - Notebook spawend in remote Slurm cluster (BatchSpawner) -# -#------------------------------------------------------------------------------ -# Network configuration -#------------------------------------------------------------------------------ -# Listen on all interfaces -# proxy is in localhost, users are external and spawners are internal -c.JupyterHub.bind_url = 'https://0.0.0.0:8000' -c.JupyterHub.hub_ip = '0.0.0.0' -# IP address or hostname that spawners should use to connect to the Hub API -c.JupyterHub.hub_connect_ip = 'host.internal.domain' - -#------------------------------------------------------------------------------ -# VSC user creation in the system -# - create local VSC users on-the-fly with specicic UID and home directories -#------------------------------------------------------------------------------ -from vsc.config.base import HOME_KEY, VSC - -def vsc_user_uid_home(username): - """Create a new local VSC user on the system.""" - vsc = VSC() - vsc_uid = vsc.user_uid_institute_map[username[:3]][0] + int(username[3:]) - vsc_user_institute = vsc.user_id_to_institute(vsc_uid) - vsc_user_paths = vsc.user_pathnames(username, vsc_user_institute) - vsc_home = vsc_user_paths[HOME_KEY] - - # UID in container corresponds to VSC number - pod_id = username[3:] - - # define command to add VSC user to system - add_user_cmd = ['useradd', '--shell', '/bin/bash'] - # add VSC user details to adduser cmd - add_user_cmd += ['--uid', pod_id, '--user-group'] - add_user_cmd += ['--no-create-home', '--home-dir', vsc_home] - # add VSC user to JupyterHub client group (controls sudo permissions) - add_user_cmd += ['--groups', 'clients'] - - return add_user_cmd - -#------------------------------------------------------------------------------ -# OAuthenticator configuration -# - use GenericOAuthenticator with the VSC account page -# - create local VSC users on-the-fly in the JupyterHub container -# - enable SSL -#------------------------------------------------------------------------------ -# VSC users have to be created with specific UID and home directories -from oauthenticator.generic import LocalGenericOAuthenticator - -class VSCGenericOAuthenticator(LocalGenericOAuthenticator): - def add_system_user(self, user): - """Inject VSC user ID and home directory into adduser command""" - self.add_user_cmd = vsc_user_uid_home(user.name) - super(VSCGenericOAuthenticator, self).add_system_user(user) - -# enable Oauth and automatically create logged users in the system -c.JupyterHub.authenticator_class = VSCGenericOAuthenticator -c.LocalAuthenticator.create_system_users = True - -# Oauth application secrets in the VSC account page -c.GenericOAuthenticator.login_service = 'VSC Account' -c.GenericOAuthenticator.client_id = 'SECRET' -c.GenericOAuthenticator.client_secret = 'SECRET' -c.GenericOAuthenticator.oauth_callback_url = 'https://host.exernal.domain/hub/oauth_callback' -c.GenericOAuthenticator.scope = ['read'] - -# SSL certificates -c.JupyterHub.ssl_cert = '/etc/pki/tls/certs/host-ssl.crt' -c.JupyterHub.ssl_key = '/etc/pki/tls/private/host-ssl.key' - -#------------------------------------------------------------------------------ -# BatchSpawner configuration -# - use Slurm by connecting with SSH to a login node -# - define job script launching the notebook -#------------------------------------------------------------------------------ -import batchspawner -c.JupyterHub.spawner_class = 'batchspawner.SlurmSpawner' -c.Spawner.http_timeout = 120 - -# default profile settings -c.BatchSpawnerBase.req_partition = 'ivybridge,skylake_mpi' -c.BatchSpawnerBase.req_runtime = '2:00:00' -c.BatchSpawnerBase.req_memory = '2G' -c.BatchSpawnerBase.req_nprocs = '1' - -# job script needs to load batchspawner and JupyterLab or JupyterNotebook -c.BatchSpawnerBase.req_prologue = """ -module load JupyterHub/2.0.2-GCCcore-10.3.0 -""" - -# execute all Slurm commands through SSH on the login node -c.SlurmSpawner.exec_prefix = "sudo -E -u {username} ssh " -# auto-accept keys from login node -c.SlurmSpawner.exec_prefix += "-o \'StrictHostKeyChecking no\' " -# login node hostname -c.SlurmSpawner.exec_prefix += "login.internal.domain " - -# protect argument quoting in query command send through SSH -c.SlurmSpawner.batch_query_cmd = r"squeue -h -j {job_id} -o \'%T %B\'" - -# pass the JupyterHub environment to sbatch through the SSH connnection -jh_env = ['JUPYTERHUB_API_TOKEN', 'JPY_API_TOKEN', 'JUPYTERHUB_CLIENT_ID', 'JUPYTERHUB_HOST', 'JUPYTERHUB_API_URL', - 'JUPYTERHUB_OAUTH_CALLBACK_URL', 'JUPYTERHUB_OAUTH_SCOPES', 'JUPYTERHUB_USER', 'JUPYTERHUB_SERVER_NAME', - 'JUPYTERHUB_ACTIVITY_URL', 'JUPYTERHUB_BASE_URL', 'JUPYTERHUB_SERVICE_PREFIX', 'JUPYTERHUB_SERVICE_URL'] -# protect expansion of envars to be reused as input '${VAR@Q}' -jh_env = ' '.join([f'{var}=\"${{{var}@Q}}\"' for var in jh_env]) -# protect envars from templating -jh_env = "{% raw %}env " + jh_env + "{% endraw %}" -# prepend the envars to the default sbatch command -c.SlurmSpawner.batch_submit_cmd = jh_env + " sbatch --parsable" - -# the user server is launched with srun, ensure that it gets the job environment -c.SlurmSpawner.req_srun = 'srun --export=ALL' - -# expand the execution hostname returned by sstat to a FQDN -c.SlurmSpawner.state_exechost_exp = r'\1.domain' - diff --git a/jupyterhub/etc/subgid b/jupyterhub/etc/subgid deleted file mode 100644 index dd4355f..0000000 --- a/jupyterhub/etc/subgid +++ /dev/null @@ -1 +0,0 @@ -jupyterhub:2500001:65536 diff --git a/jupyterhub/etc/subuid b/jupyterhub/etc/subuid deleted file mode 100644 index dd4355f..0000000 --- a/jupyterhub/etc/subuid +++ /dev/null @@ -1 +0,0 @@ -jupyterhub:2500001:65536 diff --git a/jupyterhub/etc/systemd/system/jupyterhub.service b/jupyterhub/host/etc/systemd/system/jupyterhub.service similarity index 91% rename from jupyterhub/etc/systemd/system/jupyterhub.service rename to jupyterhub/host/etc/systemd/system/jupyterhub.service index 7a31f43..0661421 100644 --- a/jupyterhub/etc/systemd/system/jupyterhub.service +++ b/jupyterhub/host/etc/systemd/system/jupyterhub.service @@ -1,4 +1,4 @@ -# Copyright 2022 Vrije Universiteit Brussel +# Copyright 2023 Vrije Universiteit Brussel # # This file is part of notebook-platform, # originally created by the HPC team of Vrij Universiteit Brussel (http://hpc.vub.be), @@ -27,12 +27,13 @@ [Unit] After=network-online.target Description=Jupyter Hub container with server and proxy +RequiresMountsFor=/var/run/container/storage Wants=network-online.target [Service] # start Podman container through control script "jupyterhub-init.sh" Environment="PODMAN_SYSTEMD_UNIT=%n" -Environment="JH_CONTAINER_RUNDIR=%t/jupyterhub-rootless" +Environment="JH_CONTAINER_RUNDIR=%t/jupyterhub-rootless" ExecStart=/usr/local/bin/jupyterhub-init.sh "%n" # Podman will fork and handle the killing on its own ExecStop=/usr/bin/podman stop -t 10 "%n" diff --git a/jupyterhub/usr/local/bin/jupyterhub-init.sh b/jupyterhub/host/usr/local/bin/jupyterhub-init.sh similarity index 77% rename from jupyterhub/usr/local/bin/jupyterhub-init.sh rename to jupyterhub/host/usr/local/bin/jupyterhub-init.sh index e83354f..ac53322 100755 --- a/jupyterhub/usr/local/bin/jupyterhub-init.sh +++ b/jupyterhub/host/usr/local/bin/jupyterhub-init.sh @@ -1,6 +1,6 @@ #!/bin/bash # -# Copyright 2022 Vrije Universiteit Brussel +# Copyright 2023 Vrije Universiteit Brussel # # This file is part of notebook-platform, # originally created by the HPC team of Vrij Universiteit Brussel (http://hpc.vub.be), @@ -37,8 +37,8 @@ # JH_CONTAINER_NAME=${1:-jupyterhub} -JH_CONTAINER_TAG="2.0" -JH_CONTAINER_SRC="localhost/jupyterhub-rootless:$JH_CONTAINER_TAG" +JH_CONTAINER_TAG="2.3" +JH_CONTAINER_SRC="localhost/jupyterhub-moss:$JH_CONTAINER_TAG" # Custom runtime directory pid_file="" @@ -51,16 +51,18 @@ if [ -n "$JH_CONTAINER_RUNDIR" ]; then fi # Container port -# Forward container port 8000 (web) and port 8081 (api) +# and forwards port 80 to 8000 (web) and port 8081 to 8081 (api) podman_port_web="-p 8000:8000/tcp" podman_port_api="-p 8081:8081/tcp" podman_port="$podman_port_web $podman_port_api" -# VSC user home directories -vsc_institutes="brussel" -vsc_home_mounts="" -for institute in $vsc_institutes; do - vsc_home_mounts="$vsc_home_mounts -v /user/$institute:/user/$institute" +# Bind mounts of JupyterHub operator +bind_folders=".ssh .ssl .config" +podman_homebinds="" +for folder in $bind_folders; do + if [ -d "$HOME/$folder" ]; then + podman_homebinds="$podman_homebinds -v $HOME/$folder:$HOME/$folder" + fi done # Run JupyterHub container @@ -77,16 +79,14 @@ else -d # run in detached mode $podman_pid $podman_cid # define runtime files $podman_port # define container network settings - $vsc_home_mounts # bind mount VSC_HOME directories --log-driver=journald # use journald to manage container logs -v /dev/log:/dev/log # bind mount /dev/log to make container logs available in host - -v /etc/pki:/etc/pki # bind directory with certificates - -v /etc/jupyterhub:/etc/jupyterhub # bind directory with configuration files - --group-add=etcconf # add secondary groups from host system + $podman_homebinds # bind mount of directories in operator home + --userns=keep-id # keep UID of operator inside the container --name=$JH_CONTAINER_NAME # define container name $JH_CONTAINER_SRC # source of container image - jupyterhub # COMMAND # - -f /etc/jupyterhub/jupyterhub_config.py # configuration file + jupyterhub # COMMAND # + -f ~/.config/jupyterhub_config.py # configuration file ) podman_cmd="run ${podman_run_args[@]}" fi diff --git a/jupyterhub/container/sudoers.conf b/jupyterhub/slurm_login/etc/ssh/sshd_config similarity index 62% rename from jupyterhub/container/sudoers.conf rename to jupyterhub/slurm_login/etc/ssh/sshd_config index 97b189e..eb3ebe7 100644 --- a/jupyterhub/container/sudoers.conf +++ b/jupyterhub/slurm_login/etc/ssh/sshd_config @@ -1,4 +1,4 @@ -# Copyright 2022 Vrije Universiteit Brussel +# Copyright 2023 Vrije Universiteit Brussel # # This file is part of notebook-platform, # originally created by the HPC team of Vrij Universiteit Brussel (http://hpc.vub.be), @@ -18,20 +18,8 @@ # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # -### -# -# This file MUST be edited with the 'visudo' command as root. # -# See the man page for details on how to write a sudoers file. -# -Defaults env_reset -Defaults mail_badpass -Defaults secure_path="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:" - -# User privilege specification -root ALL=(ALL:ALL) ALL - -# Allow JupyterHub operator to add new users -jupyterhub ALL=(root) NOPASSWD:/usr/sbin/useradd -# Allow JupyterHub operator to SSH as other VSC users -jupyterhub ALL=(%clients) NOPASSWD:SETENV:/usr/bin/ssh +### +# accept JupyterHub environment sent by the hub through the SSH connection +Match Address jupyterhub.internal.domain User jupyterhub + AcceptEnv JUPYTERHUB_API_TOKEN JPY_API_TOKEN JUPYTERHUB_CLIENT_ID JUPYTERHUB_HOST JUPYTERHUB_API_URL JUPYTERHUB_OAUTH_CALLBACK_URL JUPYTERHUB_OAUTH_SCOPES JUPYTERHUB_USER JUPYTERHUB_SERVER_NAME JUPYTERHUB_ACTIVITY_URL JUPYTERHUB_BASE_URL JUPYTERHUB_SERVICE_PREFIX JUPYTERHUB_SERVICE_URL JUPYTERHUB_ROOT_DIR JUPYTERHUB_DEFAULT_URL SLURM_CLUSTERS SLURM_CONF diff --git a/jupyterhub/slurm_login/etc/sudoers b/jupyterhub/slurm_login/etc/sudoers new file mode 100644 index 0000000..4bacfd2 --- /dev/null +++ b/jupyterhub/slurm_login/etc/sudoers @@ -0,0 +1,34 @@ +# Copyright 2023 Vrije Universiteit Brussel +# +# This file is part of notebook-platform, +# originally created by the HPC team of Vrij Universiteit Brussel (http://hpc.vub.be), +# with support of Vrije Universiteit Brussel (http://www.vub.be), +# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be), +# the Flemish Research Foundation (FWO) (http://www.fwo.be/en) +# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en). +# +# https://github.com/vub-hpc/notebook-platform +# +# notebook-platform is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License v3 as published by +# the Free Software Foundation. +# +# notebook-platform is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# +### +# User jupyterhub can execute certain Slurm commands as VSC users +# Reset environment except for those variables sent by the hub + +# Command alias specification +Cmnd_Alias SLURM = /usr/bin/sbatch, /usr/bin/squeue, /usr/bin/scancel, /usr/bin/sinfo + +# Defaults specification +Defaults env_reset +Defaults:jupyterhub env_keep="JUPYTERHUB_API_TOKEN JPY_API_TOKEN JUPYTERHUB_CLIENT_ID JUPYTERHUB_HOST JUPYTERHUB_API_URL JUPYTERHUB_OAUTH_CALLBACK_URL JUPYTERHUB_OAUTH_SCOPES JUPYTERHUB_USER JUPYTERHUB_SERVER_NAME JUPYTERHUB_ACTIVITY_URL JUPYTERHUB_BASE_URL JUPYTERHUB_SERVICE_PREFIX JUPYTERHUB_SERVICE_URL JUPYTERHUB_ROOT_DIR JUPYTERHUB_DEFAULT_URL SLURM_CLUSTERS SLURM_CONF" + +# User privilege specification +jupyterhub ALL= (%vsc) NOPASSWD: SLURM From bad7fa51ccded565ed76c4d5b88060f39ca811d1 Mon Sep 17 00:00:00 2001 From: Alex Domingo Date: Mon, 20 Feb 2023 14:13:01 +0100 Subject: [PATCH 2/2] update documentation with details of rootless and userless implementation --- README.md | 17 ++-- jupyterhub/README.md | 195 ++++++++++++++++++------------------------- 2 files changed, 94 insertions(+), 118 deletions(-) diff --git a/README.md b/README.md index 1d5d50c..afbe244 100644 --- a/README.md +++ b/README.md @@ -3,13 +3,18 @@ The goal of our notebook platform is to provide a web-based interface to our tier-2 HPC cluster. This alternative interface to the standard shell interface is based on [computational notebooks](https://en.wikipedia.org/wiki/Notebook_interface). -The notebook platform must be capable to handle user authentication, launch -notebooks leveraging the computational resources of our HPC infrastructure and -allow users to manage a library of notebooks. +The notebook platform must be capable to: +* handle VSC user authentication +* allow selection of computational resources +* launch notebooks leveraging the computational resources of our HPC infrastructure +* allow users to manage a library of notebooks +* integrate with the software module system of our HPC clusters ## JupyterHub -[JupyterHub](https://jupyter.org/hub) from the Jupyter Project fulfills all the -requirements of this platform. The details of its integration in the HPC -cluster of VUB are available in [notebook-platform/jupyterhub](jupyterhub). +[JupyterHub](https://jupyter.org/hub) from the Jupyter Project fulfills all +requirements of this platform. Moreover, the modular architecture of JupyterHub +allows to easily implement solutions for those requirements that are not +covered natively. The details of its integration in the HPC cluster of VUB are +available in [notebook-platform/jupyterhub](jupyterhub). diff --git a/jupyterhub/README.md b/jupyterhub/README.md index c5439c9..15be378 100644 --- a/jupyterhub/README.md +++ b/jupyterhub/README.md @@ -3,16 +3,42 @@ ![JupyterHub integration in HPC cluster](jupyterhub-diagram.png "JupyterHub integration in HPC cluster") The hub and its HTTP proxy are run by a non-root user in a rootless container. -The container is managed by a service in [systemd](https://systemd.io/) with -[podman](https://podman.io/). +The container is managed in the host systems by a service in +[systemd](https://systemd.io/) with [podman](https://podman.io/). -Notebooks are run remotely, in any available compute node in the HPC cluster. +Notebooks are launched remotely, on the compute nodes of our HPC cluster. The allocation of hardware resources for the notebook is done on-demand by -[Slurm](https://slurm.schedmd.com/). JupyterHub can submit jobs to Slurm to -launch new notebooks thanks to [batchspawner](https://github.com/jupyterhub/batchspawner). +the resource manager [Slurm](https://slurm.schedmd.com/). Users can select the +resources for their notebooks from the JupyterHub interface thanks to the +[JupyterHub MOdular Slurm Spawner](https://github.com/silx-kit/jupyterhub_moss), +which leverages [batchspawner](https://github.com/jupyterhub/batchspawner) to +submit jobs to Slurm in user's behalf that will launch the single-user server. + The main particularity of our setup is that such jobs are not submitted to Slurm from the host running JupyterHub, but from the login nodes of the HPC -cluster. +cluster via an SSH connection. This approach has the advantage that the system +running JupyterHub can be very minimal, avoiding the need for local users, +special file-system mounts and the complexity of provisioning a Slurm +installation capable of submitting jobs to the HPC cluster. + +## Rootless + +JupyterHub is run by a non-root user in a rootless container. Setting up a +rootless container is well described in the [podman rootless +tutorial](https://github.com/containers/podman/blob/main/docs/tutorials/rootless_tutorial.md). + +We use a [system service](host/etc/systemd/system/jupyterhub.service) to +execute `podman` by a non-root user `jupyterhub` (*aka* JupyterHub operator). +This service relies on a [custom shell script](host/usr/local/bin/jupyterhub-init.sh) +to automatically initialize a new image of the rootless container or start an +existing one. + +The container [binds a few mounts with sensitive configuration +files](host/usr/local/bin/jupyterhub-init.sh#L59-L66) for JupyterHub, SSL +certificates for the web server and SSH keys to connect to the login nodes. +Provisioning these files in the container through bind-mounts allows to have +secret-free container images and seamlessly deploy updates to the configuration +of the hub. ## Network @@ -21,13 +47,13 @@ have a routable IP address, so they rely on the network interfaces of the host system. The hub must be able to talk to the notebooks being executed on the compute nodes in the internal network, as well as serve the HTTPS requests (through its proxy) from users on the external network. Therefore, ports 8000 -(HTTP proxy) and 8081 (REST API) in the -[container are forwarded to the host system](usr/local/bin/jupyterhub-init.sh#L54). +(HTTP proxy) and 8081 (REST API) in the [container are forwarded to the host +system](host/usr/local/bin/jupyterhub-init.sh#L53-L57). The firewall on the host systems blocks all connection through the external network interface and forwards port 8000 on the internal interface (HTTP proxy) -to port 443 on the external one. This setup allows accessing the web interface -of the hub/notebooks from both the internal and external networks. The REST API +to port 443 on the external one. This setup renders the web interface of the +hub/notebooks accessible from both the internal and external networks. The REST API of the hub is only available on port 8081 of the internal network. ## Authentication @@ -36,112 +62,57 @@ User authentication is handled through delegation via the [OAuth](https://en.wikipedia.org/wiki/OAuth) service of the [VSC](https://www.vscentrum.be/) accounts used by our users. -We made a custom -[VSCGenericOAuthenticator](etc/jupyterhub/jupyterhub_config.py#L73-L77) which -is heavily based on `LocalGenericOAuthenticator` from -[OAuthenticator](https://github.com/jupyterhub/oauthenticator/): - -* entirely relies on OAuthenticator to carry out a standard OAuth delegation - with the VSC account page, the [URLs of the VSC OAuth are defined in the - environment of the container](container/Dockerfile#L59-L61) and the [secrets - to connect to it are defined in JupyterHub's configuration - file](etc/jupyterhub/jupyterhub_config.py#L83-L88) -* automatically creates local users in the container for any VSC account logged - in to JupyterHub and ensures correct UID mapping to allow local VSC users to - [access their home directories](usr/local/bin/jupyterhub-init.sh#L80), - which is needed to securely connect to the login nodes in the HPC cluster - with their SSH keys +We use the [GenericOAuthenticator](https://github.com/jupyterhub/oauthenticator/) +from JupyterHub: -## Rootless +* carry out a standard OAuth delegation with the VSC account page -JupyterHub is run by a non-root user in a rootless container. Setting up a -rootless container is well described in the [podman rootless -tutorial](https://github.com/containers/podman/blob/main/docs/tutorials/rootless_tutorial.md). - -We use a [system service](etc/systemd/system/jupyterhub.service) to execute -`podman` by a non-root user `jupyterhub` (*aka* JupyterHub operator). This -service relies on a [custom shell script](usr/local/bin/jupyterhub-init.sh) to -automatically initialize a new image of the rootless container or start an -existing one. - -### Extra permissions - -In the current setup, running JupyterHub fully non-root is not possible because -the hub needs superuser permissions for two specific tasks: - -* `VSCGenericOAuthenticator` creates local users in the container -* `SlurmSpawner` switches to VSC users (other non-root users) to launch their - notebooks through Slurm + * [URLs of the VSC OAuth](container/Dockerfile#L72-L76) are defined in the + environment of the container -These additional permissions are granted to the hub user discretely by means of -`sudo`. The definitions of each extra permission is defined in the -[sudoers](container/sudoers.conf) file of the container. + * [OAuth secrets](container/.config/jupyterhub_config.py#L40-L45) are + defined in JupyterHub's configuration file -### Container namespace - -Users logging in JupyterHub have to access their home directories to be able to -connect to the login nodes of the HPC cluster with their SSH keys. Since home -directories are bound to the mounts in the host system, it is critical to -properly define the namespace used by the rootless container to cover the real -UIDs of the users in the host system. - -The UID/GIDs of VSC users are all in the 250000-2599999 range. We can -easily create a [mapping for the container](etc/subuid) with a straightforward -relationship between the UIDs inside and outside the container: - -``` -$ podman unshare cat /proc/self/uid_map - 0 4009 1 - 1 2500001 65536 -``` - -Therefore, the non-root user executing the rootless container will be mapped to -the root user of the container, as usual. While, for instance, user with UID 1 -in the container will be able to access the files of UID 250001 outside. -The custom method [`vsc_user_uid_home`](etc/jupyterhub/jupyterhub_config.py#L43) -ensures that VSC users created inside the container have the correct UID with -regards to this mapping. - -The namespace used by the container must be available in the host system (*i.e* -not assigned to any user or group in the system), which means that the VSC -users must not exist in the host system of the container. This requirement does -not hinder mounting the home directories of those VSC users in the system -though, as any existing files owned by those UID/GIDs of the VSC users will be -just non-assigned to any known user/group. +* local users beyond the non-root user running JupyterHub are **not needed** ## Slurm -Integration with Slurm is leveraged by `SlurmSpawner` of -[batchspawner](https://github.com/jupyterhub/batchspawner). - -We modified the submission command to execute `sbatch` in the login nodes of -the HPC cluster through SSH. The login nodes already run Slurm and are the sole -systems handling job submission in our cluster. Delegating job submission to -them avoids having to install and configure Slurm in the container running -JupyterHub. - -The user's environment in the hub is passed through the SSH connection by -selectively selecting the needed environment variables to launch the user's -notebook: - -``` -sudo -E -u vscXXXXX ssh -o 'StrictHostKeyChecking no' login.host.domain \ - env JUPYTERHUB_API_TOKEN="${JUPYTERHUB_API_TOKEN@Q}" \ - JPY_API_TOKEN="${JPY_API_TOKEN@Q}" \ - JUPYTERHUB_CLIENT_ID="${JUPYTERHUB_CLIENT_ID@Q}" \ - JUPYTERHUB_HOST="${JUPYTERHUB_HOST@Q}" \ - JUPYTERHUB_API_URL="${JUPYTERHUB_API_URL@Q}" \ - JUPYTERHUB_OAUTH_CALLBACK_URL="${JUPYTERHUB_OAUTH_CALLBACK_URL@Q}" \ - JUPYTERHUB_OAUTH_SCOPES="${JUPYTERHUB_OAUTH_SCOPES@Q}" \ - JUPYTERHUB_USER="${JUPYTERHUB_USER@Q}" \ - JUPYTERHUB_SERVER_NAME="${JUPYTERHUB_SERVER_NAME@Q}" \ - JUPYTERHUB_ACTIVITY_URL="${JUPYTERHUB_ACTIVITY_URL@Q}" \ - JUPYTERHUB_BASE_URL="${JUPYTERHUB_BASE_URL@Q}" \ - JUPYTERHUB_SERVICE_PREFIX="${JUPYTERHUB_SERVICE_PREFIX@Q}" \ - JUPYTERHUB_SERVICE_URL="${JUPYTERHUB_SERVICE_URL@Q}" \ - sbatch --parsable -``` - -Note: the expansion operator `${var@Q}` is available in bash 4.4+ and returns a -quoted string with escaped special characters - +Integration with Slurm is leveraged through a custom Spawner called +[VSCSlurmSpawner](container/.config/jupyterhub_config.py#L60) based on +[MOSlurmSpawner](https://github.com/silx-kit/jupyterhub_moss). +`VSCSlurmSpawner` allows JupyterHub to generate the user's environment needed +to spawn its single-user server without any local users. All user settings are +taken from `vsc-config`. + +We modified the [submission command](container/.config/jupyterhub_config.py#L295) +to execute `sbatch` in the login nodes of the HPC cluster through SSH. +The login nodes already run Slurm and are the sole systems handling job +submission in our cluster. Delegating job submission to them avoids having to +install and configure Slurm in the container running JupyterHub. The hub +environment is passed over SSH with a strict control over the variables that +are [sent](container/.ssh/config) and [accepted](slurm_login/etc/ssh/sshd_config) +on both ends. + +The SSH connection is established by the non-root user running JupyterHub (the +hub container does not have other local users). This jupyterhub user has +special `sudo` permissions on the login nodes to submit jobs to Slurm as other +users. The specific group of users and list of commands allowed to the +jupyterhub user are defined in the [sudoers file](slurm_login/etc/sudoers). + +Single-user server spawn process: + +1. user selects computational resources for the notebook in the + [web interface of the hub](https://github.com/silx-kit/jupyterhub_moss) + +2. `VSCSlurmSpawner` generates environment for the user without any local users + in the system of the hub + +3. jupyterhub user connects to login node with SSH, environment is passed + through the wire + +4. jupyterhub user submits new job to Slurm cluster as target user keeping the + hub environment + +5. single-user server job fully [resets the environment and + re-generates](container/.config/jupyterhub_config.py#L264-L285) specific + environment variables for the single-user server