Skip to content

HTTPS clone URL

Subversion checkout URL

You can clone with
or
.
Download ZIP
Fetching contributors…

Cannot retrieve contributors at this time

126 lines (87 sloc) 4.196 kB
################################################################################
#
# Settings for using the "pilot" resource locator.
#
# A set of VMM resources may be managed entirely by the workspace service or
# alternatively, using the pilot, integrated with a site's resource manager
# (such as PBS).
#
# This way a dual use grid cluster can be achieved: regular grid jobs can run
# on the VMM node with no guest VMs, unless the node is at that time allocated
# to the workspace service. The site resource manager maintains full control
# over the cluster and does not need to be modified.
#
# NOTE: To enable the pilot you need to run:
# "cp other/resource-locator-pilot.xml other/resource-locator-ACTIVE.xml"
#
# NOTE: For notifications to be meaningful, reasonable clock sync between all
# nodes is important (use NTP for example)
#
# NOTE: Logs are set to be delivered to the workspace service's persistence
# directory (something like "var/nimbus/pilot-logs")
#
################################################################################
################################################################################
#
# Configurations for HTTP Digest access authentication based notifications.
#
################################################################################
# host:port for HTTP based notifications. This must be an address or hostname
# that is recognized on the VMM nodes.
contact.socket=1.2.3.4:41999
################################################################################
#
# General settings
#
################################################################################
# The path to the pilot program on the VMM nodes.
#
# If you would like to use a configuration file, rather than the embedded
# configuration, add the -p and path to your configuration file to your path.
# For example:
#
# pilot.path=/opt/workspacepilot.py -p /etc/workspacepilot.conf
pilot.path=/opt/workspacepilot.py
# The maximum available memory for a guest on the VMM nodes:
memory.maxMB=2048
################################################################################
#
# LRM specific settings (Torque)
#
# This version only supports starting the pilot with Torque (tested with
# version 2.1.8 and 2.2.1). Contact the workspace-user mailing list for
# other requirements, adapting to other LRMs should be straightforward if
# they support typical job cancellation semantics.
#
################################################################################
# Path to job submission program (if program is on the container account's
# PATH, then this does not need to be an absolute path):
pbs.submit.path=qsub
# Path to job deletion program (if program is on the container account's
# PATH, then this does not need to be an absolute path):
pbs.delete.path=qdel
# Processors per node. If this is set to 0, your pilot job will request
# as many processors as are requested for a VM. For example, if a user requests
# a 2 core VM, ppn will be set to 2.
#
# On some installations, you may wish to hardcode this to a specific value
# to ensure that each pilot job reserves a whole node for a VM. In this case,
# choose a non-zero value.
pbs.ppn=0
# If the pilot job should be submitted to a special queue/server, configure
# that here. For Torque, this value is sent as the "-q destination" parameter,
# and can take one of three forms: "queue", "@server", or "queue@server".
# If this configuration is empty or missing, the pilot job is run with defaults
# for the submitting user (the container).
pbs.destination=
# The grace period (seconds) between LRM's SIGTERM and SIGKILL. Can be zero,
# but this gives no time for VWS to cleanly kill the running workspaces in the
# slot in the case where the slot job is preempted.
pbs.grace=8
# Optional, extra node properties needed in the submission. (for example you
# might label nodes in the PBS pool with a string to distinguish them, like
# 'xen' perhaps).
pbs.extra.properties=
# Optional, if configured this is prepended to the pilot exe invocation if
# nodes needed are greater than one. Torque uses pbsdsh for this.
pbs.multijob.prefix=pbsdsh -u
Jump to Line
Something went wrong with that request. Please try again.