Skip to content

Commit

Permalink
Remove ToolsController._rerun_tool() API method
Browse files Browse the repository at this point in the history
  • Loading branch information
nsoranzo committed Feb 11, 2019
1 parent 8a16d68 commit 7940bf3
Show file tree
Hide file tree
Showing 2 changed files with 2 additions and 264 deletions.
1 change: 1 addition & 0 deletions lib/galaxy/web/framework/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,4 +3,5 @@
"""

from . import base

url_for = base.routes.url_for
265 changes: 1 addition & 264 deletions lib/galaxy/webapps/galaxy/api/tools.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,6 @@
from galaxy.tools import global_tool_errors
from galaxy.util.json import safe_dumps
from galaxy.util.odict import odict
from galaxy.visualization.genomes import GenomeRegion
from galaxy.web import _future_expose_api as expose_api
from galaxy.web import _future_expose_api_anonymous as expose_api_anonymous
from galaxy.web import _future_expose_api_anonymous_and_sessionless as expose_api_anonymous_and_sessionless
Expand Down Expand Up @@ -456,10 +455,9 @@ def create(self, trans, payload, **kwd):
return self._create(trans, payload, **kwd)

def _create(self, trans, payload, **kwd):
# HACK: for now, if action is rerun, rerun tool.
action = payload.get('action', None)
if action == 'rerun':
return self._rerun_tool(trans, payload, **kwd)
raise Exception("'rerun' action has been deprecated")

# -- Execute tool. --

Expand Down Expand Up @@ -559,264 +557,3 @@ def _get_tool(self, id, tool_version=None, user=None):
if not tool.allow_user_access(user):
raise exceptions.AuthenticationFailed("Access denied, please login for tool with id '%s'." % id)
return tool

def _rerun_tool(self, trans, payload, **kwargs):
"""
Rerun a tool to produce a new output dataset that corresponds to a
dataset that a user is currently viewing.
"""

#
# TODO: refactor to use same code as run_tool.
#

# Run tool on region if region is specificied.
run_on_regions = False
regions = payload.get('regions', None)
if regions:
if isinstance(regions, dict):
# Regions is a single region.
regions = [GenomeRegion.from_dict(regions)]
elif isinstance(regions, list):
# There is a list of regions.
regions = [GenomeRegion.from_dict(r) for r in regions]

if len(regions) > 1:
# Sort by chrom name, start so that data is not fetched out of order.
regions = sorted(regions, key=lambda r: (r.chrom.lower(), r.start))

# Merge overlapping regions so that regions do not overlap
# and hence data is not included multiple times.
prev = regions[0]
cur = regions[1]
index = 1
while True:
if cur.chrom == prev.chrom and cur.start <= prev.end:
# Found overlapping regions, so join them into prev.
prev.end = cur.end
del regions[index]
else:
# No overlap, move to next region.
prev = cur
index += 1

# Get next region or exit.
if index == len(regions):
# Done.
break
else:
cur = regions[index]

run_on_regions = True

# Dataset check.
decoded_dataset_id = self.decode_id(payload.get('target_dataset_id'))
original_dataset = self.hda_manager.get_accessible(decoded_dataset_id, user=trans.user)
original_dataset = self.hda_manager.error_if_uploading(original_dataset)
msg = self.hda_manager.data_conversion_status(original_dataset)
if msg:
return msg

# Set tool parameters--except non-hidden dataset parameters--using combination of
# job's previous parameters and incoming parameters. Incoming parameters
# have priority.
#
original_job = self.hda_manager.creating_job(original_dataset)
tool = trans.app.toolbox.get_tool(original_job.tool_id)
if not tool or not tool.allow_user_access(trans.user):
return trans.app.model.Dataset.conversion_messages.NO_TOOL
tool_params = dict([(p.name, p.value) for p in original_job.parameters])

# TODO: rather than set new inputs using dict of json'ed value, unpack parameters and set using set_param_value below.
# TODO: need to handle updates to conditional parameters; conditional
# params are stored in dicts (and dicts within dicts).
new_inputs = payload['inputs']
tool_params.update(dict([(key, dumps(value)) for key, value in new_inputs.items() if key in tool.inputs and value is not None]))
tool_params = tool.params_from_strings(tool_params, self.app)

#
# If running tool on region, convert input datasets (create indices) so
# that can regions of data can be quickly extracted.
#
data_provider_registry = trans.app.data_provider_registry
messages_list = []
if run_on_regions:
for jida in original_job.input_datasets:
input_dataset = jida.dataset
data_provider = data_provider_registry.get_data_provider(trans, original_dataset=input_dataset, source='data')
if data_provider and (not data_provider.converted_dataset or
data_provider.converted_dataset.state != trans.app.model.Dataset.states.OK):
# Can convert but no converted dataset yet, so return message about why.
data_sources = input_dataset.datatype.data_sources
msg = input_dataset.convert_dataset(trans, data_sources['data'])
if msg is not None:
messages_list.append(msg)

# Return any messages generated during conversions.
return_message = self._get_highest_priority_msg(messages_list)
if return_message:
return return_message

#
# Set target history (the history that tool will use for inputs/outputs).
# If user owns dataset, put new data in original dataset's history; if
# user does not own dataset (and hence is accessing dataset via sharing),
# put new data in user's current history.
#
if original_dataset.history.user == trans.user:
target_history = original_dataset.history
else:
target_history = trans.get_history(create=True)
hda_permissions = trans.app.security_agent.history_get_default_permissions(target_history)

def set_param_value(param_dict, param_name, param_value):
"""
Set new parameter value in a tool's parameter dictionary.
"""

# Recursive function to set param value.
def set_value(param_dict, group_name, group_index, param_name, param_value):
if group_name in param_dict:
param_dict[group_name][group_index][param_name] = param_value
return True
elif param_name in param_dict:
param_dict[param_name] = param_value
return True
else:
# Recursive search.
return_val = False
for value in param_dict.values():
if isinstance(value, dict):
return_val = set_value(value, group_name, group_index, param_name, param_value)
if return_val:
return return_val
return False

# Parse parameter name if necessary.
if param_name.find("|") == -1:
# Non-grouping parameter.
group_name = group_index = None
else:
# Grouping parameter.
group, param_name = param_name.split("|")
index = group.rfind("_")
group_name = group[:index]
group_index = int(group[index + 1:])

return set_value(param_dict, group_name, group_index, param_name, param_value)

# Set parameters based tool's trackster config.
params_set = {}
for action in tool.trackster_conf.actions:
success = False
for joda in original_job.output_datasets:
if joda.name == action.output_name:
set_param_value(tool_params, action.name, joda.dataset)
params_set[action.name] = True
success = True
break

if not success:
return trans.app.model.Dataset.conversion_messages.ERROR

#
# Set input datasets for tool. If running on regions, extract and use subset
# when possible.
#
if run_on_regions:
regions_str = ",".join([str(r) for r in regions])
for jida in original_job.input_datasets:
# If param set previously by config actions, do nothing.
if jida.name in params_set:
continue

input_dataset = jida.dataset
if input_dataset is None: # optional dataset and dataset wasn't selected
tool_params[jida.name] = None
elif run_on_regions and 'data' in input_dataset.datatype.data_sources:
# Dataset is indexed and hence a subset can be extracted and used
# as input.

# Look for subset.
subset_dataset_association = trans.sa_session.query(trans.app.model.HistoryDatasetAssociationSubset) \
.filter_by(hda=input_dataset, location=regions_str) \
.first()
if subset_dataset_association:
# Data subset exists.
subset_dataset = subset_dataset_association.subset
else:
# Need to create subset.
data_source = input_dataset.datatype.data_sources['data']
input_dataset.get_converted_dataset(trans, data_source)
input_dataset.get_converted_dataset_deps(trans, data_source)

# Create new HDA for input dataset's subset.
new_dataset = trans.app.model.HistoryDatasetAssociation(extension=input_dataset.ext,
dbkey=input_dataset.dbkey,
create_dataset=True,
sa_session=trans.sa_session,
name="Subset [%s] of data %i" %
(regions_str, input_dataset.hid),
visible=False)
target_history.add_dataset(new_dataset)
trans.sa_session.add(new_dataset)
trans.app.security_agent.set_all_dataset_permissions(new_dataset.dataset, hda_permissions)

# Write subset of data to new dataset
data_provider = data_provider_registry.get_data_provider(trans, original_dataset=input_dataset, source='data')
trans.app.object_store.create(new_dataset.dataset)
data_provider.write_data_to_file(regions, new_dataset.file_name)

# TODO: (a) size not working; (b) need to set peek.
new_dataset.set_size()
new_dataset.info = "Data subset for trackster"
new_dataset.set_dataset_state(trans.app.model.Dataset.states.OK)

# Set metadata.
# TODO: set meta internally if dataset is small enough?
trans.app.datatypes_registry.set_external_metadata_tool.tool_action.execute(trans.app.datatypes_registry.set_external_metadata_tool,
trans, incoming={'input1': new_dataset},
overwrite=False, job_params={"source": "trackster"})
# Add HDA subset association.
subset_association = trans.app.model.HistoryDatasetAssociationSubset(hda=input_dataset, subset=new_dataset, location=regions_str)
trans.sa_session.add(subset_association)

subset_dataset = new_dataset

trans.sa_session.flush()

# Add dataset to tool's parameters.
if not set_param_value(tool_params, jida.name, subset_dataset):
return {"error": True, "message": "error setting parameter %s" % jida.name}

#
# Execute tool and handle outputs.
#
try:
subset_job, subset_job_outputs = tool.execute(trans, incoming=tool_params,
history=target_history,
job_params={"source": "trackster"})
except Exception as e:
# Lots of things can go wrong when trying to execute tool.
return {"error": True, "message": e.__class__.__name__ + ": " + str(e)}
if run_on_regions:
for output in subset_job_outputs.values():
output.visible = False
trans.sa_session.flush()

#
# Return new track that corresponds to the original dataset.
#
output_name = None
for joda in original_job.output_datasets:
if joda.dataset == original_dataset:
output_name = joda.name
break
for joda in subset_job.output_datasets:
if joda.name == output_name:
output_dataset = joda.dataset

dataset_dict = output_dataset.to_dict()
dataset_dict['id'] = trans.security.encode_id(dataset_dict['id'])
dataset_dict['track_config'] = self.get_new_track_config(trans, output_dataset)
return dataset_dict

0 comments on commit 7940bf3

Please sign in to comment.