Skip to content

Commit

Permalink
Merging with dev
Browse files Browse the repository at this point in the history
  • Loading branch information
anuprulez committed Jun 12, 2017
2 parents 6d9a911 + f81c5bb commit 050ff3b
Show file tree
Hide file tree
Showing 26 changed files with 678 additions and 122 deletions.
5 changes: 5 additions & 0 deletions client/galaxy/scripts/apps/analysis.js
Expand Up @@ -87,6 +87,7 @@ window.app = function app( options, bootstrapped ){
'(/)workflow(/)' : 'show_workflows',
'(/)workflow/run(/)' : 'show_run',
'(/)pages(/)(:action_id)' : 'show_pages',
'(/)datasets(/)(:action_id)' : 'show_datasets',
'(/)workflow/configure_menu(/)' : 'show_configure_menu',
'(/)workflow/import_workflow' : 'show_import_workflow',
'(/)custom_builds' : 'show_custom_builds'
Expand Down Expand Up @@ -123,6 +124,10 @@ window.app = function app( options, bootstrapped ){
this.page.display( new UserPreferences.Forms( { form_id: form_id, user_id: Galaxy.params.id } ) );
},

show_datasets : function() {
this.page.display( new GridView( { url_base: Galaxy.root + 'dataset/list', dict_format: true } ) );
},

show_pages : function( action_id ) {
if ( action_id == 'list' ) {
this.page.display( new PageList.View() );
Expand Down
4 changes: 2 additions & 2 deletions client/galaxy/scripts/layout/menu.js
Expand Up @@ -271,8 +271,8 @@ var Collection = Backbone.Collection.extend({
target : 'galaxy_main'
},{
title : _l('Saved Datasets'),
url : 'dataset/list',
target : 'galaxy_main'
url : 'datasets/list',
target : '_top'
},{
title : _l('Saved Pages'),
url : 'pages/list',
Expand Down
47 changes: 47 additions & 0 deletions config/job_conf.xml.sample_advanced
Expand Up @@ -440,6 +440,53 @@
the deployer. -->
<!-- <param id="require_container">true</param> -->
</destination>
<destination id="singularity_local" runner="local">
<param id="singularity_enabled">true</param>
<!-- See the above documentation for docker_volumes, singularity_volumes works
almost the same way. The only difference is that $default will expand with
rw directories that in Docker would expand as ro if any of subdirectories are rw.
As an example consider that Docker mounts the parent of the working directory
(this is known as the job directory) as ro and the working directory itself as rw.
This doesn't work in Singularity because if any parent directory is mounted as ro
none of its children will be rw. So the job directory will be mounted rw for
Singularity.
-->
<!--
<param id="singularity_volumes">$defaults,/mnt/galaxyData/libraries:ro,/mnt/galaxyData/indices:ro</param>
-->
<!-- You can configure singularity to run using sudo - this probably should not
be set and may be removed in the future.
-->
<!-- <param id="singularity_sudo">false</param> -->
<!-- Following option can be used to tweak sudo command used by
default. -->
<!-- <param id="singularity_sudo_cmd">/usr/bin/sudo -extra_param</param> -->
<!-- Pass extra arguments to the singularity exec command not covered by the
above options. -->
<!-- <param id="singularity_run_extra_arguments"></param> -->
<!-- Following command can be used to tweak singularity command. -->
<!-- <param id="singularity_cmd">/usr/local/custom_docker/docker</param> -->

<!-- If deployer wants to use singularity for isolation, but does not
trust tool's specified container - a destination wide override
can be set. This will cause all jobs on this destination to use
that singularity image. -->
<!-- <param id="singularity_container_id_override">/path/to/singularity/image</param> -->

<!-- Likewise, if deployer wants to use singularity for isolation and
does trust tool's specified container - but also wants tool's not
configured to run in a container the following option can provide
a fallback. -->
<!-- <param id="singularity_default_container_id">/path/to/singularity/image</param> -->

<!-- If the destination should be secured to only allow containerized jobs
the following parameter may be set for the job destination. Not all,
or even most, tools available in Galaxy core or in the Tool Shed
support Docker yet so this option may require a lot of extra work for
the deployer. -->
<!-- <param id="require_container">true</param> -->
</destination>
<destination id="pbs" runner="pbs" tags="mycluster"/>
<destination id="pbs_longjobs" runner="pbs" tags="mycluster,longjobs">
<!-- Define parameters that are native to the job runner plugin. -->
Expand Down
4 changes: 3 additions & 1 deletion lib/galaxy/tools/deps/conda_util.py
Expand Up @@ -488,7 +488,7 @@ def cleanup_failed_install(conda_target, conda_context=None):
cleanup_failed_install_of_environment(conda_target.install_environment, conda_context=conda_context)


def best_search_result(conda_target, conda_context=None, channels_override=None):
def best_search_result(conda_target, conda_context=None, channels_override=None, offline=False):
"""Find best "conda search" result for specified target.
Return ``None`` if no results match.
Expand All @@ -498,6 +498,8 @@ def best_search_result(conda_target, conda_context=None, channels_override=None)
conda_context.ensure_channels_configured()

search_cmd = [conda_context.conda_exec, "search", "--full-name", "--json"]
if offline:
search_cmd.append("--offline")
if channels_override:
search_cmd.append("--override-channels")
for channel in channels_override:
Expand Down
113 changes: 90 additions & 23 deletions lib/galaxy/tools/deps/container_resolvers/mulled.py
Expand Up @@ -18,23 +18,26 @@
)
from ..mulled.mulled_build_tool import requirements_to_mulled_targets
from ..mulled.util import (
image_name,
mulled_tags_for,
split_tag,
v1_image_name,
v2_image_name,
)
from ..requirements import ContainerDescription

log = logging.getLogger(__name__)


CachedMulledImageSingleTarget = collections.namedtuple("CachedMulledImageSingleTarget", ["package_name", "version", "build", "image_identifier"])
CachedMulledImageMultiTarget = collections.namedtuple("CachedMulledImageMultiTarget", ["hash", "image_identifier"])
CachedV1MulledImageMultiTarget = collections.namedtuple("CachedV1MulledImageMultiTarget", ["hash", "build", "image_identifier"])
CachedV2MulledImageMultiTarget = collections.namedtuple("CachedV2MulledImageMultiTarget", ["package_hash", "version_hash", "build", "image_identifier"])

CachedMulledImageSingleTarget.multi_target = False
CachedMulledImageMultiTarget.multi_target = True
CachedV1MulledImageMultiTarget.multi_target = "v1"
CachedV2MulledImageMultiTarget.multi_target = "v2"


def list_cached_mulled_images(namespace=None):
def list_cached_mulled_images(namespace=None, hash_func="v2"):
command = build_docker_images_command(truncate=True, sudo=False)
command = "%s | tail -n +2 | tr -s ' ' | cut -d' ' -f1,2" % command
images_and_versions = check_output(command)
Expand All @@ -44,35 +47,58 @@ def output_line_to_image(line):
image_name, version = line.split(" ", 1)
identifier = "%s:%s" % (image_name, version)
url, namespace, package_description = image_name.split("/")
if not version or version == "latest":
version = None

image = None
if package_description.startswith("mulled-v1-"):
if hash_func == "v2":
return None

hash = package_description
image = CachedMulledImageMultiTarget(hash, identifier)
else:
build = None
if not version or version == "latest":
version = None
if version and version.isdigit():
build = version
image = CachedV1MulledImageMultiTarget(hash, build, identifier)
elif package_description.startswith("mulled-v2-"):
if hash_func == "v1":
return None

version_hash = None
build = None

if version and "-" in version:
version_hash, build = version.rsplit("-", 1)
elif version.isdigit():
version_hash, build = None, version
elif version:
log.debug("Unparsable mulled image tag encountered [%s]" % version)

image = CachedV2MulledImageMultiTarget(package_description, version_hash, build, identifier)
else:
build = None
if version and "--" in version:
version, build = split_tag(version)

image = CachedMulledImageSingleTarget(image_name, version, build, identifier)

return image

return [output_line_to_image(_) for _ in filter(name_filter, images_and_versions.splitlines())]
# TODO: Sort on build ...
raw_images = [output_line_to_image(_) for _ in filter(name_filter, images_and_versions.splitlines())]
return [i for i in raw_images if i is not None]


def get_filter(namespace):
prefix = "quay.io/" if namespace is None else "quay.io/%s" % namespace
return lambda name: name.startswith(prefix) and name.count("/") == 2


def cached_container_description(targets, namespace):
def cached_container_description(targets, namespace, hash_func="v2"):
if len(targets) == 0:
return None

cached_images = list_cached_mulled_images(namespace)
cached_images = list_cached_mulled_images(namespace, hash_func=hash_func)
image = None
if len(targets) == 1:
target = targets[0]
Expand All @@ -84,10 +110,32 @@ def cached_container_description(targets, namespace):
if not target.version or target.version == cached_image.version:
image = cached_image
break
else:
name = image_name(targets)
elif hash_func == "v2":
name = v2_image_name(targets)
if ":" in name:
package_hash, version_hash = name.split(":", 2)
else:
package_hash, version_hash = name, None

for cached_image in cached_images:
if cached_image.multi_target != "v2":
continue

if version_hash is None:
# Just match on package hash...
if package_hash == cached_image.package_hash:
image = cached_image
break
else:
# Match on package and version hash...
if package_hash == cached_image.package_hash and version_hash == cached_image.version_hash:
image = cached_image
break

elif hash_func == "v1":
name = v1_image_name(targets)
for cached_image in cached_images:
if not cached_image.multi_target:
if cached_image.multi_target != "v1":
continue

if name == cached_image.hash:
Expand All @@ -109,16 +157,17 @@ class CachedMulledContainerResolver(ContainerResolver):

resolver_type = "cached_mulled"

def __init__(self, app_info=None, namespace=None):
def __init__(self, app_info=None, namespace=None, hash_func="v2"):
super(CachedMulledContainerResolver, self).__init__(app_info)
self.namespace = namespace
self.hash_func = hash_func

def resolve(self, enabled_container_types, tool_info):
if tool_info.requires_galaxy_python_environment:
return None

targets = mulled_targets(tool_info)
return cached_container_description(targets, self.namespace)
return cached_container_description(targets, self.namespace, hash_func=self.hash_func)

def __str__(self):
return "CachedMulledContainerResolver[namespace=%s]" % self.namespace
Expand All @@ -130,9 +179,10 @@ class MulledContainerResolver(ContainerResolver):

resolver_type = "mulled"

def __init__(self, app_info=None, namespace="biocontainers"):
def __init__(self, app_info=None, namespace="biocontainers", hash_func="v2"):
super(MulledContainerResolver, self).__init__(app_info)
self.namespace = namespace
self.hash_func = hash_func

def resolve(self, enabled_container_types, tool_info):
if tool_info.requires_galaxy_python_environment:
Expand Down Expand Up @@ -162,10 +212,25 @@ def resolve(self, enabled_container_types, tool_info):
version, build = split_tag(tags[0])
name = "%s:%s--%s" % (target.package_name, version, build)
else:
base_image_name = image_name(targets)
tags = mulled_tags_for(self.namespace, base_image_name)
if tags:
name = "%s:%s" % (base_image_name, tags[0])
def tags_if_available(image_name):
if ":" in image_name:
repo_name, tag_prefix = image_name.split(":", 2)
else:
repo_name = image_name
tag_prefix = None
tags = mulled_tags_for(self.namespace, repo_name, tag_prefix=tag_prefix)
return tags

if self.hash_func == "v2":
base_image_name = v2_image_name(targets)
tags = tags_if_available(base_image_name)
if tags:
name = "%s:%s" % (base_image_name, tags[0])
elif self.hash_func == "v1":
base_image_name = v1_image_name(targets)
tags = tags_if_available(base_image_name)
if tags:
name = "%s:%s" % (base_image_name, tags[0])

if name:
return ContainerDescription(
Expand All @@ -183,12 +248,13 @@ class BuildMulledContainerResolver(ContainerResolver):

resolver_type = "build_mulled"

def __init__(self, app_info=None, namespace="local", **kwds):
def __init__(self, app_info=None, namespace="local", hash_func="v2", **kwds):
super(BuildMulledContainerResolver, self).__init__(app_info)
self._involucro_context_kwds = {
'involucro_bin': self._get_config_option("involucro_path", None)
}
self.namespace = namespace
self.hash_func = hash_func
self._mulled_kwds = {
'namespace': namespace,
'channels': self._get_config_option("channels", DEFAULT_CHANNELS, prefix="mulled"),
Expand All @@ -206,9 +272,10 @@ def resolve(self, enabled_container_types, tool_info):
mull_targets(
targets,
involucro_context=self._get_involucro_context(),
hash_func=self.hash_func,
**self._mulled_kwds
)
return cached_container_description(targets, self.namespace)
return cached_container_description(targets, self.namespace, hash_func=self.hash_func)

def _get_involucro_context(self):
involucro_context = InvolucroContext(**self._involucro_context_kwds)
Expand Down

0 comments on commit 050ff3b

Please sign in to comment.