Skip to content

Commit

Permalink
Add Ansible playbook to re-create the VM used for the training.
Browse files Browse the repository at this point in the history
  • Loading branch information
riccardomurri committed Aug 30, 2017
1 parent d1bc57d commit 39c09f2
Show file tree
Hide file tree
Showing 22 changed files with 800 additions and 0 deletions.
@@ -0,0 +1,85 @@
#
# This is an example configuration file for GC3Pie,
# meant to be used in the "GC3Pie for programmer" course.
#
# At the end of the course, it will contain real working definitions
# of the UZH computing resources so it can also be used as a basis
# for your own configuration file for production use.
#

# run tasks on the computer where GC3Pie commands are typed
[resource/localhost]
enabled = yes
type = shellcmd
frontend = localhost
transport = local
max_cores_per_job = 2
max_memory_per_core = 2GiB
max_walltime = 12 hours
max_cores = 2
architecture = x86_64
auth = none
override = no


# run tasks on the Hydra large-memory cluster
[resource/hydra]
enabled = no
type = slurm
frontend = login.s3it.uzh.ch
transport = ssh
auth = ssh_user_account
max_walltime = 1 day
max_cores = 96
max_cores_per_job = 64
max_memory_per_core = 1 TiB
architecture = x86_64
prologue_content =
module load cluster/largemem

[auth/ssh_user_account]
type=ssh
# TO-DO: replace `uzh-shortname` with your actual UZH shortname
username=uzh-shortname


# run jobs on the UZH "ScienceCloud"
[resource/sciencecloud]
enabled=no
type=openstack+shellcmd
auth=openstack

vm_pool_max_size = 32
security_group_name=default
security_group_rules=
tcp:22:22:0.0.0.0/0,
icmp:-1:-1:0.0.0.0/0
network_ids=
c86b320c-9542-4032-a951-c8a068894cc2

# definition of a single execution VM:
# - flavor (i.e., size) of the VM
instance_type=1cpu-4ram-hpc
# - image_id of `Ubuntu 14.04 (2017-07-18)
image_id=2a7a2c1a-76a7-4174-9116-4aa8799347ac

max_cores_per_job = 8
max_memory_per_core = 4 GiB
max_walltime = 90 days
max_cores = 32
architecture = x86_64

# how to connect
vm_auth=ssh_user_ubuntu
keypair_name=CHANGEMEPLEASE
public_key=~/.ssh/id_rsa.pub

[auth/ssh_user_ubuntu]
# default user on Ubuntu VM images
type=ssh
username=ubuntu

[auth/openstack]
# only need to set the `type` here; any other value will be taken from
# the `OS_*` environment variables
type = openstack
@@ -0,0 +1,3 @@
---

gc3pie_dir: '{{ansible_user_dir}}/gc3pie'
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
36 changes: 36 additions & 0 deletions docs/programmers/tutorials/workflows/roles/gc3pie/tasks/main.yml
@@ -0,0 +1,36 @@
#
# Install GC3Pie in `{{gc3pie_dir}}` (default: `$HOME/gc3pie`)
#
---

- name: Require that OS is Debian or Ubuntu
fail:
msg: This playbook can only run on Debian or Ubuntu
when: 'ansible_os_family != "Debian"'

- name: Ensure dependent packages are installed
package:
name='{{item}}'
state=installed
with_items:
- g++
- gcc
- git
- libffi-dev
- libssl-dev
- python
- python-dev
- python-virtualenv
become: yes

- name: Download GC3Pie installer
get_url:
url: https://raw.githubusercontent.com/uzh/gc3pie/master/install.py
dest: /var/tmp/install.py
mode: 0755

- name: Run GC3Pie installer
command:
python /var/tmp/install.py --yes --develop --overwrite --feature openstack,ec2 -d {{gc3pie_dir}}
args:
creates: '{{gc3pie_dir}}/bin/gsession'
@@ -0,0 +1,20 @@
This directory contains files that are deployed on the GC3Pie
tutorial VM.

All files are copyright (c) 2010-2017 University of Zurich and
licensed under the same conditions as the rest of GC3Pie, with the
following exceptions:

* [`bfly.jpg`](bfly.jpg) -- Originally found on [pixabay.com][1] (which see for copyright info), [CC0][cc0]-licensed
* [``](lena.jpg) -- The *Lenna* "standard test image", see [Wikipedia article][4] for history and [Wikimedia page][5] for download and copyright info.
* [`coffee.jpg`](coffee.jpg) -- Originally found on [pixabay.com][2] (which see for copyright info), [CC0][cc0]-licensed
* [`zebra.jpg`](zebra.jpg) -- Originally found on [pixabay.com][3] (which see for copyright info), [CC0][cc0]-licensed
[cc0]: https://creativecommons.org/publicdomain/zero/1.0/deed.en
[1]: https://pixabay.com/it/farfalla-rondine-farfalla-a-coda-1228639/
[2]: https://pixabay.com/it/caff%C3%A8-cafe-coffee-shop-americano-843278/
[3]: https://pixabay.com/it/zebra-animale-selvatico-safari-175085/
[4]: https://en.wikipedia.org/wiki/Lenna
[5]: https://en.wikipedia.org/wiki/Lenna#/media/File:Lenna.png
@@ -0,0 +1,7 @@
# match the style used in GC3Pie slides
export PS1='> '

# automatically activate the virtualenv
if [ -r "$HOME/gc3pie/bin/activate" ]; then
. "$HOME/gc3pie/bin/activate"
fi
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
@@ -0,0 +1,85 @@
#
# This is an example configuration file for GC3Pie,
# meant to be used in the "GC3Pie for programmer" course.
#
# At the end of the course, it will contain real working definitions
# of the UZH computing resources so it can also be used as a basis
# for your own configuration file for production use.
#

# run tasks on the computer where GC3Pie commands are typed
[resource/localhost]
enabled = yes
type = shellcmd
frontend = localhost
transport = local
max_cores_per_job = 2
max_memory_per_core = 2GiB
max_walltime = 12 hours
max_cores = 2
architecture = x86_64
auth = none
override = no


# run tasks on the Hydra large-memory cluster
[resource/hydra]
enabled = no
type = slurm
frontend = login.s3it.uzh.ch
transport = ssh
auth = ssh_user_account
max_walltime = 1 day
max_cores = 96
max_cores_per_job = 64
max_memory_per_core = 1 TiB
architecture = x86_64
prologue_content =
module load cluster/largemem

[auth/ssh_user_account]
type=ssh
# TO-DO: replace `uzh-shortname` with your actual UZH shortname
username=uzh-shortname


# run jobs on the UZH "ScienceCloud"
[resource/sciencecloud]
enabled=no
type=openstack+shellcmd
auth=openstack

vm_pool_max_size = 32
security_group_name=default
security_group_rules=
tcp:22:22:0.0.0.0/0,
icmp:-1:-1:0.0.0.0/0
network_ids=
c86b320c-9542-4032-a951-c8a068894cc2

# definition of a single execution VM:
# - flavor (i.e., size) of the VM
instance_type=1cpu-4ram-hpc
# - image_id of `Ubuntu 14.04 (2017-07-18)
image_id=2a7a2c1a-76a7-4174-9116-4aa8799347ac

max_cores_per_job = 8
max_memory_per_core = 4 GiB
max_walltime = 90 days
max_cores = 32
architecture = x86_64

# how to connect
vm_auth=ssh_user_ubuntu
keypair_name=CHANGEMEPLEASE
public_key=~/.ssh/id_rsa.pub

[auth/ssh_user_ubuntu]
# default user on Ubuntu VM images
type=ssh
username=ubuntu

[auth/openstack]
# only need to set the `type` here; any other value will be taken from
# the `OS_*` environment variables
type = openstack
@@ -0,0 +1,76 @@
# ~/.gc3/logging.conf
#
# Configuration file for gc3utils logging
#
# See http://docs.python.org/release/2.4/lib/logging-config-fileformat.html
# for the syntax of the configuration directives.
# In addition to the standard syntax, the following strings will be given
# special treatment:
#
# %(HOMEDIR)s is substituted with the full path to the home directory
# of the running user
#
# %(RCDIR)s is substituted with the full path to the directory where
# the gc3utils configuration file resides
#

[loggers]
keys=root,gc3

[logger_root]
# The "root" logger gets messages from all running code, not just gc3utils
# (e.g., from Paramiko); dump all this into a logfile at the most verbose level
level=NOTSET
handlers=logfile

[logger_gc3]
# The "gc3" logger is the root logger for all GC3 stuff. Each program
# library requests a logger in this hierarchy, named after the
# program/library name; for instance, "grosetta" uses the
# "gc3.grosetta" logger, GC3Libs use the "gc3.gc3libs" logger, etc.
# The GC3Utils are a slight exception to this rule, in that they use
# "gc3.gc3utils" regardless of the actual invocation name.
qualname=gc3
# If you change this, it will take precedence over command-line options.
level=NOTSET
handlers=stderr
# This ensures that anything is also logged by the root/logfile logger above.
propagate=1

# If you want to override log settings as used by the GC3Libs,
# uncomment this section and alter it to your wishes.
# [logger_gc3_gc3libs]
# qualname=gc3.gc3libs
# level=DEBUG
# handlers=stderr
# # This ensures that anything is also logged by the root/logfile logger above.
# propagate=1

[handlers]
keys=logfile,stderr

[handler_logfile]
# verbose output to logfile located in ~/.gc3/gc3utils.log
# rotate logfile on monday every week
level=DEBUG
class=handlers.TimedRotatingFileHandler
args=('%(RCDIR)s/debug.log','w0',1,5)
formatter=logfile

[handler_stderr]
# console output level should not be set here, as it
# will limit the effectiveness of option "-v"
level=NOTSET
class=StreamHandler
args=(sys.stderr,)
formatter=console

[formatters]
keys=logfile,console

[formatter_logfile]
format=[%(asctime)s] %(name)-12s %(levelname)-8s: %(message)s
datefmt=%Y-%m-%d %H:%M:%S

[formatter_console]
format=%(name)s: %(levelname)s: %(message)s
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
@@ -0,0 +1,4 @@
Welcome to the GC3Pie training VM.

This VM is preconfigured with the latest GC3Pie and example scripts.
See http://tinyurl.com/gc3pie-workflows-tutorial for teaching material.

0 comments on commit 39c09f2

Please sign in to comment.