Skip to content

Commit

Permalink
Fix more indentation errors
Browse files Browse the repository at this point in the history
  • Loading branch information
nsoranzo committed Aug 17, 2017
1 parent 62b478a commit 7552aad
Show file tree
Hide file tree
Showing 33 changed files with 135 additions and 160 deletions.
3 changes: 1 addition & 2 deletions lib/galaxy/jobs/actions/post.py
Expand Up @@ -374,8 +374,7 @@ class ActionBox(object):
"EmailAction": EmailAction,
"DeleteIntermediatesAction": DeleteIntermediatesAction,
"TagDatasetAction": TagDatasetAction,
"RemoveTagDatasetAction": RemoveTagDatasetAction,
}
"RemoveTagDatasetAction": RemoveTagDatasetAction}
public_actions = ['RenameDatasetAction', 'ChangeDatatypeAction',
'ColumnSetAction', 'EmailAction',
'DeleteIntermediatesAction', 'TagDatasetAction',
Expand Down
3 changes: 1 addition & 2 deletions lib/galaxy/jobs/error_level.py
Expand Up @@ -19,7 +19,6 @@ class StdioErrorLevel(object):
@staticmethod
def desc(error_level):
err_msg = "Unknown error"
if (error_level > 0 and
error_level <= StdioErrorLevel.MAX):
if error_level > 0 and error_level <= StdioErrorLevel.MAX:
err_msg = StdioErrorLevel.descs[error_level]
return err_msg
2 changes: 1 addition & 1 deletion lib/galaxy/jobs/handler.py
Expand Up @@ -400,7 +400,7 @@ def __verify_job_ready(self, job, job_wrapper):
pass # No history, should not happen with an anon user
# Check total walltime limits
if (state == JOB_READY and
"delta" in self.app.job_config.limits.total_walltime):
"delta" in self.app.job_config.limits.total_walltime):
jobs_to_check = self.sa_session.query(model.Job).filter(
model.Job.user_id == job.user.id,
model.Job.update_time >= datetime.datetime.now() -
Expand Down
31 changes: 14 additions & 17 deletions lib/galaxy/jobs/output_checker.py
Expand Up @@ -3,6 +3,7 @@
import traceback

from logging import getLogger

log = getLogger(__name__)


Expand Down Expand Up @@ -31,8 +32,7 @@ def check_output(tool, stdout, stderr, tool_exit_code, job):
# then we assume that the tool writer overwrote the default
# behavior of just setting an error if there is *anything* on
# stderr.
if (len(tool.stdio_regexes) > 0 or
len(tool.stdio_exit_codes) > 0):
if len(tool.stdio_regexes) > 0 or len(tool.stdio_exit_codes) > 0:
# Check the exit code ranges in the order in which
# they were specified. Each exit_code is a StdioExitCode
# that includes an applicable range. If the exit code was in
Expand All @@ -42,23 +42,22 @@ def check_output(tool, stdout, stderr, tool_exit_code, job):
if tool_exit_code is not None:
for stdio_exit_code in tool.stdio_exit_codes:
if (tool_exit_code >= stdio_exit_code.range_start and
tool_exit_code <= stdio_exit_code.range_end):
tool_exit_code <= stdio_exit_code.range_end):
# Tack on a generic description of the code
# plus a specific code description. For example,
# this might prepend "Job 42: Warning (Out of Memory)\n".
code_desc = stdio_exit_code.desc
if (None is code_desc):
if None is code_desc:
code_desc = ""
tool_msg = ("%s: Exit code %d (%s)" % (
StdioErrorLevel.desc(stdio_exit_code.error_level),
tool_exit_code,
code_desc))
StdioErrorLevel.desc(stdio_exit_code.error_level),
tool_exit_code,
code_desc))
log.info("Job %s: %s" % (job.get_id_tag(), tool_msg))
stderr = tool_msg + "\n" + stderr
max_error_level = max(max_error_level,
stdio_exit_code.error_level)
if (max_error_level >=
StdioErrorLevel.FATAL):
if max_error_level >= StdioErrorLevel.FATAL:
break

if max_error_level < StdioErrorLevel.FATAL:
Expand All @@ -78,32 +77,30 @@ def check_output(tool, stdout, stderr, tool_exit_code, job):
# o If it was fatal, then we're done - break.
# Repeat the stdout stuff for stderr.
# TODO: Collapse this into a single function.
if (regex.stdout_match):
if regex.stdout_match:
regex_match = re.search(regex.match, stdout,
re.IGNORECASE)
if (regex_match):
if regex_match:
rexmsg = __regex_err_msg(regex_match, regex)
log.info("Job %s: %s"
% (job.get_id_tag(), rexmsg))
stdout = rexmsg + "\n" + stdout
max_error_level = max(max_error_level,
regex.error_level)
if (max_error_level >=
StdioErrorLevel.FATAL):
if max_error_level >= StdioErrorLevel.FATAL:
break

if (regex.stderr_match):
if regex.stderr_match:
regex_match = re.search(regex.match, stderr,
re.IGNORECASE)
if (regex_match):
if regex_match:
rexmsg = __regex_err_msg(regex_match, regex)
log.info("Job %s: %s"
% (job.get_id_tag(), rexmsg))
stderr = rexmsg + "\n" + stderr
max_error_level = max(max_error_level,
regex.error_level)
if (max_error_level >=
StdioErrorLevel.FATAL):
if max_error_level >= StdioErrorLevel.FATAL:
break

# If we encountered a fatal error, then we'll need to set the
Expand Down
15 changes: 7 additions & 8 deletions lib/galaxy/model/__init__.py
Expand Up @@ -1475,14 +1475,13 @@ def active_datasets_and_roles(self):
if not hasattr(self, '_active_datasets_and_roles'):
db_session = object_session(self)
query = (db_session.query(HistoryDatasetAssociation)
.filter(HistoryDatasetAssociation.table.c.history_id == self.id)
.filter(not_(HistoryDatasetAssociation.deleted))
.order_by(HistoryDatasetAssociation.table.c.hid.asc())
.options(joinedload("dataset"),
joinedload("dataset.actions"),
joinedload("dataset.actions.role"),
joinedload("tags"),
))
.filter(HistoryDatasetAssociation.table.c.history_id == self.id)
.filter(not_(HistoryDatasetAssociation.deleted))
.order_by(HistoryDatasetAssociation.table.c.hid.asc())
.options(joinedload("dataset"),
joinedload("dataset.actions"),
joinedload("dataset.actions.role"),
joinedload("tags")))
self._active_datasets_and_roles = query.all()
return self._active_datasets_and_roles

Expand Down
8 changes: 4 additions & 4 deletions lib/galaxy/model/tool_shed_install/mapping.py
Expand Up @@ -111,10 +111,10 @@

mapper(install_model.ToolVersion, install_model.ToolVersion.table,
properties=dict(
parent_tool_association=relation(install_model.ToolVersionAssociation,
primaryjoin=(install_model.ToolVersion.table.c.id == install_model.ToolVersionAssociation.table.c.tool_id)),
child_tool_association=relation(install_model.ToolVersionAssociation,
primaryjoin=(install_model.ToolVersion.table.c.id == install_model.ToolVersionAssociation.table.c.parent_id))))
parent_tool_association=relation(install_model.ToolVersionAssociation,
primaryjoin=(install_model.ToolVersion.table.c.id == install_model.ToolVersionAssociation.table.c.tool_id)),
child_tool_association=relation(install_model.ToolVersionAssociation,
primaryjoin=(install_model.ToolVersion.table.c.id == install_model.ToolVersionAssociation.table.c.parent_id))))

mapper(install_model.ToolVersionAssociation, install_model.ToolVersionAssociation.table)

Expand Down
22 changes: 10 additions & 12 deletions lib/galaxy/security/__init__.py
Expand Up @@ -626,10 +626,10 @@ def get_accessible_libraries(self, trans, user):
library_access_action = self.permitted_actions.LIBRARY_ACCESS.action
restricted_library_ids = [lp.library_id for lp in trans.sa_session.query(trans.model.LibraryPermissions)
.filter(trans.model.LibraryPermissions.table.c.action == library_access_action).distinct()]
accessible_restricted_library_ids = [lp.library_id for lp in trans.sa_session.query(
trans.model.LibraryPermissions).filter(
and_(trans.model.LibraryPermissions.table.c.action == library_access_action,
trans.model.LibraryPermissions.table.c.role_id.in_(current_user_role_ids)))]
accessible_restricted_library_ids = [lp.library_id for lp in trans.sa_session.query(trans.model.LibraryPermissions)
.filter(and_(
trans.model.LibraryPermissions.table.c.action == library_access_action,
trans.model.LibraryPermissions.table.c.role_id.in_(current_user_role_ids)))]
# Filter to get libraries accessible by the current user. Get both
# public libraries and restricted libraries accessible by the current user.
for library in trans.sa_session.query(trans.model.Library) \
Expand Down Expand Up @@ -946,14 +946,12 @@ def get_accessible_request_types(self, trans, user):
accessible_request_types = []
current_user_role_ids = [role.id for role in user.all_roles()]
request_type_access_action = self.permitted_actions.REQUEST_TYPE_ACCESS.action
restricted_request_type_ids = [rtp.request_type_id for rtp in trans.sa_session.query(
trans.model.RequestTypePermissions).filter(
trans.model.RequestTypePermissions.table.c.action == request_type_access_action).distinct()
]
accessible_restricted_request_type_ids = [rtp.request_type_id for rtp in trans.sa_session.query(
trans.model.RequestTypePermissions).filter(
and_(trans.model.RequestTypePermissions.table.c.action == request_type_access_action,
trans.model.RequestTypePermissions.table.c.role_id.in_(current_user_role_ids)))]
restricted_request_type_ids = [rtp.request_type_id for rtp in trans.sa_session.query(trans.model.RequestTypePermissions)
.filter(trans.model.RequestTypePermissions.table.c.action == request_type_access_action).distinct()]
accessible_restricted_request_type_ids = [rtp.request_type_id for rtp in trans.sa_session.query(trans.model.RequestTypePermissions)
.filter(and_(
trans.model.RequestTypePermissions.table.c.action == request_type_access_action,
trans.model.RequestTypePermissions.table.c.role_id.in_(current_user_role_ids)))]
# Filter to get libraries accessible by the current user. Get both
# public libraries and restricted libraries accessible by the current user.
for request_type in trans.sa_session.query(trans.model.RequestType) \
Expand Down
8 changes: 4 additions & 4 deletions lib/galaxy/tools/imp_exp/__init__.py
Expand Up @@ -217,8 +217,8 @@ def as_hda(obj_dct):
""" Hook to 'decode' an HDA; method uses history and HID to get the HDA represented by
the encoded object. This only works because HDAs are created above. """
if obj_dct.get('__HistoryDatasetAssociation__', False):
return self.sa_session.query(model.HistoryDatasetAssociation
).filter_by(history=new_history, hid=obj_dct['hid']).first()
return self.sa_session.query(model.HistoryDatasetAssociation) \
.filter_by(history=new_history, hid=obj_dct['hid']).first()
return obj_dct
jobs_attrs = loads(jobs_attr_str, object_hook=as_hda)

Expand Down Expand Up @@ -276,8 +276,8 @@ def default(self, obj):
# Connect jobs to output datasets.
for output_hid in job_attrs['output_datasets']:
# print "%s job has output dataset %i" % (imported_job.id, output_hid)
output_hda = self.sa_session.query(model.HistoryDatasetAssociation
).filter_by(history=new_history, hid=output_hid).first()
output_hda = self.sa_session.query(model.HistoryDatasetAssociation) \
.filter_by(history=new_history, hid=output_hid).first()
if output_hda:
imported_job.add_output_dataset(output_hda.name, output_hda)

Expand Down
2 changes: 1 addition & 1 deletion lib/galaxy/tools/parser/xml.py
Expand Up @@ -716,7 +716,7 @@ def parse_stdio_exit_codes(self, stdio_elem):
# the start must be -inf and the end must be +inf.
# So at least warn about this situation:
if (isinf(exit_code.range_start) and
isinf(exit_code.range_end)):
isinf(exit_code.range_end)):
log.warning("Tool exit_code range %s will match on " +
"all exit codes" % code_range)
self.stdio_exit_codes.append(exit_code)
Expand Down
12 changes: 4 additions & 8 deletions lib/galaxy/visualization/data_providers/genome.py
Expand Up @@ -328,8 +328,7 @@ def is_float(column_text):
'type': 'number',
'index': filter_col,
'tool_id': 'Filter1',
'tool_exp_name': 'c5'
}]
'tool_exp_name': 'c5'}]

return filters

Expand Down Expand Up @@ -838,8 +837,7 @@ def get_filters(self):
filters = []
filters.append({'name': 'Mapping Quality',
'type': 'number',
'index': filter_col}
)
'index': filter_col})
return filters

def write_data_to_file(self, regions, filename):
Expand Down Expand Up @@ -1028,8 +1026,7 @@ def _nth_read_iterator(read_iterator, threshold):
qname,
[pair['start'], pair['end'], pair['cigar'], pair['strand'], pair['seq']],
[read.pos, read.pos + read_len, read.cigar, strand, seq],
None, [pair['mapq'], read.mapq]
])
None, [pair['mapq'], read.mapq]])
del paired_pending[qname]
else:
# Insert first of pair.
Expand Down Expand Up @@ -1674,8 +1671,7 @@ def package_gff_feature(feature, no_detail=False, filter_cols=[]):
# No notion of thick start, end in GFF, so make everything
# thick.
feature.start,
feature.end
]
feature.end]

# HACK: ignore interval with name 'transcript' from feature.
# Cufflinks puts this interval in each of its transcripts,
Expand Down
3 changes: 1 addition & 2 deletions lib/galaxy/web/framework/helpers/grids.py
Expand Up @@ -654,8 +654,7 @@ def get_foreign_key(source_class, target_class):
# Subquery to get average rating for each item.
ave_rating_subquery = trans.sa_session.query(fk_col,
func.avg(item_rating_assoc_class.table.c.rating).label('avg_rating')) \
.group_by(fk_col) \
.subquery()
.group_by(fk_col).subquery()
# Integrate subquery into main query.
query = query.outerjoin((ave_rating_subquery, referent_col == ave_rating_subquery.columns[fk_col.name]))
# Sort using subquery results; use coalesce to avoid null values.
Expand Down
6 changes: 2 additions & 4 deletions lib/galaxy/webapps/galaxy/api/folder_contents.py
Expand Up @@ -109,8 +109,7 @@ def index(self, trans, folder_id, **kwd):
is_unrestricted=is_unrestricted,
is_private=is_private,
can_manage=can_manage,
file_size=nice_size
))
file_size=nice_size))
if content_item.library_dataset_dataset_association.message:
return_item.update(dict(message=content_item.library_dataset_dataset_association.message))

Expand All @@ -120,8 +119,7 @@ def index(self, trans, folder_id, **kwd):
name=content_item.name,
update_time=update_time,
create_time=create_time,
deleted=content_item.deleted
))
deleted=content_item.deleted))
folder_contents.append(return_item)

# Return the reversed path so it starts with the library node.
Expand Down
17 changes: 8 additions & 9 deletions lib/galaxy/webapps/galaxy/buildapp.py
Expand Up @@ -199,8 +199,7 @@ def populate_api_routes(webapp, app):
name_prefix="history_",
controller='history_contents',
path_prefix='/api/histories/{history_id}/contents',
parent_resources=dict(member_name='history', collection_name='histories'),
)
parent_resources=dict(member_name='history', collection_name='histories'))
# Legacy access to HDA details via histories/{history_id}/contents/{hda_id}
webapp.mapper.resource('content',
'contents',
Expand Down Expand Up @@ -981,13 +980,13 @@ def wrap_in_middleware(app, global_conf, application_stack, **local_conf):
from galaxy.web.framework.middleware.remoteuser import RemoteUser
app = wrap_if_allowed(app, stack, RemoteUser,
kwargs=dict(
maildomain=conf.get('remote_user_maildomain', None),
display_servers=util.listify(conf.get('display_servers', '')),
single_user=single_user,
admin_users=conf.get('admin_users', '').split(','),
remote_user_header=conf.get('remote_user_header', 'HTTP_REMOTE_USER'),
remote_user_secret_header=conf.get('remote_user_secret', None),
normalize_remote_user_email=conf.get('normalize_remote_user_email', False)))
maildomain=conf.get('remote_user_maildomain', None),
display_servers=util.listify(conf.get('display_servers', '')),
single_user=single_user,
admin_users=conf.get('admin_users', '').split(','),
remote_user_header=conf.get('remote_user_header', 'HTTP_REMOTE_USER'),
remote_user_secret_header=conf.get('remote_user_secret', None),
normalize_remote_user_email=conf.get('normalize_remote_user_email', False)))
# The recursive middleware allows for including requests in other
# requests or forwarding of requests, all on the server side.
if asbool(conf.get('use_recursive', True)):
Expand Down

0 comments on commit 7552aad

Please sign in to comment.