-
Notifications
You must be signed in to change notification settings - Fork 967
/
__init__.py
4395 lines (3910 loc) · 173 KB
/
__init__.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
"""
Galaxy data model classes
Naming: try to use class names that have a distinct plural form so that
the relationship cardinalities are obvious (e.g. prefer Dataset to Data)
"""
from galaxy import eggs
eggs.require("pexpect")
import codecs
import errno
import logging
import operator
import os
import pexpect
import json
import socket
import time
import numbers
from datetime import datetime, timedelta
from uuid import UUID, uuid4
from string import Template
from itertools import ifilter
from itertools import chain
import galaxy.datatypes
import galaxy.datatypes.registry
import galaxy.security.passwords
from galaxy.datatypes.metadata import MetadataCollection
from galaxy.model.item_attrs import Dictifiable, UsesAnnotations
import galaxy.model.orm.now
from galaxy.security import get_permitted_actions
from galaxy.util import is_multi_byte, nice_size, Params, restore_text, send_mail
from galaxy.util import ready_name_for_url, unique_id
from galaxy.util.bunch import Bunch
from galaxy.util.hash_util import new_secure_hash
from galaxy.util.directory_hash import directory_hash_id
from galaxy.util.sanitize_html import sanitize_html
from galaxy.web.framework.helpers import to_unicode
from galaxy.web.form_builder import (AddressField, CheckboxField, HistoryField,
PasswordField, SelectField, TextArea, TextField, WorkflowField,
WorkflowMappingField)
from galaxy.model.orm import and_, or_
from sqlalchemy.orm import object_session
from sqlalchemy.orm import joinedload
from sqlalchemy.sql.expression import func
from sqlalchemy import not_
log = logging.getLogger( __name__ )
datatypes_registry = galaxy.datatypes.registry.Registry()
# Default Value Required for unit tests
datatypes_registry.load_datatypes()
# When constructing filters with in for a fixed set of ids, maximum
# number of items to place in the IN statement. Different databases
# are going to have different limits so it is likely best to not let
# this be unlimited - filter in Python if over this limit.
MAX_IN_FILTER_LENGTH = 100
class NoConverterException(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class ConverterDependencyException(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
def set_datatypes_registry( d_registry ):
"""
Set up datatypes_registry
"""
global datatypes_registry
datatypes_registry = d_registry
class HasName:
def get_display_name( self ):
"""
These objects have a name attribute can be either a string or a unicode
object. If string, convert to unicode object assuming 'utf-8' format.
"""
name = self.name
if isinstance(name, str):
name = unicode(name, 'utf-8')
return name
class HasJobMetrics:
def _init_metrics( self ):
self.text_metrics = []
self.numeric_metrics = []
def add_metric( self, plugin, metric_name, metric_value ):
if isinstance( plugin, str ):
plugin = unicode( plugin, 'utf-8' )
if isinstance( metric_name, str ):
metric_name = unicode( metric_name, 'utf-8' )
if isinstance( metric_value, numbers.Number ):
metric = self._numeric_metric( plugin, metric_name, metric_value )
self.numeric_metrics.append( metric )
else:
if isinstance( metric_value, str ):
metric_value = unicode( metric_value, 'utf-8' )
if len( metric_value ) > 1022:
# Truncate these values - not needed with sqlite
# but other backends must need it.
metric_value = metric_value[ :1022 ]
metric = self._text_metric( plugin, metric_name, metric_value )
self.text_metrics.append( metric )
@property
def metrics( self ):
# TODO: Make iterable, concatenate with chain
return self.text_metrics + self.numeric_metrics
class User( object, Dictifiable ):
use_pbkdf2 = True
"""
Data for a Galaxy user or admin and relations to their
histories, credentials, and roles.
"""
# attributes that will be accessed and returned when calling to_dict( view='collection' )
dict_collection_visible_keys = ( 'id', 'email', 'username' )
# attributes that will be accessed and returned when calling to_dict( view='element' )
dict_element_visible_keys = ( 'id', 'email', 'username', 'total_disk_usage', 'nice_total_disk_usage' )
def __init__( self, email=None, password=None ):
self.email = email
self.password = password
self.external = False
self.deleted = False
self.purged = False
self.active = False
self.activation_token = None
self.username = None
# Relationships
self.histories = []
self.credentials = []
#? self.roles = []
def set_password_cleartext( self, cleartext ):
"""
Set user password to the digest of `cleartext`.
"""
if User.use_pbkdf2:
self.password = galaxy.security.passwords.hash_password( cleartext )
else:
self.password = new_secure_hash( text_type=cleartext )
def check_password( self, cleartext ):
"""
Check if `cleartext` matches user password when hashed.
"""
return galaxy.security.passwords.check_password( cleartext, self.password )
def all_roles( self ):
"""
Return a unique list of Roles associated with this user or any of their groups.
"""
roles = [ ura.role for ura in self.roles ]
for group in [ uga.group for uga in self.groups ]:
for role in [ gra.role for gra in group.roles ]:
if role not in roles:
roles.append( role )
return roles
def get_disk_usage( self, nice_size=False ):
"""
Return byte count of disk space used by user or a human-readable
string if `nice_size` is `True`.
"""
rval = 0
if self.disk_usage is not None:
rval = self.disk_usage
if nice_size:
rval = galaxy.datatypes.data.nice_size( rval )
return rval
def set_disk_usage( self, bytes ):
"""
Manually set the disk space used by a user to `bytes`.
"""
self.disk_usage = bytes
total_disk_usage = property( get_disk_usage, set_disk_usage )
@property
def nice_total_disk_usage( self ):
"""
Return byte count of disk space used in a human-readable string.
"""
return self.get_disk_usage( nice_size=True )
def calculate_disk_usage( self ):
"""
Return byte count total of disk space used by all non-purged, non-library
HDAs in non-purged histories.
"""
# maintain a list so that we don't double count
dataset_ids = []
total = 0
# this can be a huge number and can run out of memory, so we avoid the mappers
db_session = object_session( self )
for history in db_session.query( History ).enable_eagerloads( False ).filter_by( user_id=self.id, purged=False ).yield_per( 1000 ):
for hda in db_session.query( HistoryDatasetAssociation ).enable_eagerloads( False ).filter_by( history_id=history.id, purged=False ).yield_per( 1000 ):
#TODO: def hda.counts_toward_disk_usage():
# return ( not self.dataset.purged and not self.dataset.library_associations )
if not hda.dataset.id in dataset_ids and not hda.dataset.purged and not hda.dataset.library_associations:
dataset_ids.append( hda.dataset.id )
total += hda.dataset.get_total_size()
return total
@staticmethod
def user_template_environment( user ):
"""
>>> env = User.user_template_environment(None)
>>> env['__user_email__']
'Anonymous'
>>> env['__user_id__']
'Anonymous'
>>> user = User('foo@example.com')
>>> user.id = 6
>>> user.username = 'foo2'
>>> env = User.user_template_environment(user)
>>> env['__user_id__']
'6'
>>> env['__user_name__']
'foo2'
"""
if user:
user_id = '%d' % user.id
user_email = str( user.email )
user_name = str( user.username )
else:
user = None
user_id = 'Anonymous'
user_email = 'Anonymous'
user_name = 'Anonymous'
environment = {}
environment[ '__user__' ] = user
environment[ '__user_id__' ] = environment[ 'userId' ] = user_id
environment[ '__user_email__' ] = environment[ 'userEmail' ] = user_email
environment[ '__user_name__' ] = user_name
return environment
@staticmethod
def expand_user_properties( user, in_string ):
"""
"""
environment = User.user_template_environment( user )
return Template( in_string ).safe_substitute( environment )
class PasswordResetToken( object ):
def __init__( self, user, token=None):
if token:
self.token = token
else:
self.token = unique_id()
self.user = user
self.expiration_time = datetime.now() + timedelta(hours=24)
class BaseJobMetric( object ):
def __init__( self, plugin, metric_name, metric_value ):
self.plugin = plugin
self.metric_name = metric_name
self.metric_value = metric_value
class JobMetricText( BaseJobMetric ):
pass
class JobMetricNumeric( BaseJobMetric ):
pass
class TaskMetricText( BaseJobMetric ):
pass
class TaskMetricNumeric( BaseJobMetric ):
pass
class Job( object, HasJobMetrics, Dictifiable ):
dict_collection_visible_keys = [ 'id', 'state', 'exit_code', 'update_time', 'create_time' ]
dict_element_visible_keys = [ 'id', 'state', 'exit_code', 'update_time', 'create_time' ]
"""
A job represents a request to run a tool given input datasets, tool
parameters, and output datasets.
"""
_numeric_metric = JobMetricNumeric
_text_metric = JobMetricText
states = Bunch( NEW = 'new',
RESUBMITTED = 'resubmitted',
UPLOAD = 'upload',
WAITING = 'waiting',
QUEUED = 'queued',
RUNNING = 'running',
OK = 'ok',
ERROR = 'error',
PAUSED = 'paused',
DELETED = 'deleted',
DELETED_NEW = 'deleted_new' )
# Please include an accessor (get/set pair) for any new columns/members.
def __init__( self ):
self.session_id = None
self.user_id = None
self.tool_id = None
self.tool_version = None
self.command_line = None
self.param_filename = None
self.parameters = []
self.input_datasets = []
self.output_datasets = []
self.input_dataset_collections = []
self.output_dataset_collection_instances = []
self.output_dataset_collections = []
self.input_library_datasets = []
self.output_library_datasets = []
self.state = Job.states.NEW
self.info = None
self.job_runner_name = None
self.job_runner_external_id = None
self.destination_id = None
self.destination_params = None
self.post_job_actions = []
self.imported = False
self.handler = None
self.exit_code = None
self._init_metrics()
self.state_history.append( JobStateHistory( self ) )
@property
def finished( self ):
states = self.states
return self.state in [
states.OK,
states.ERROR,
states.DELETED,
states.DELETED_NEW,
]
# TODO: Add accessors for members defined in SQL Alchemy for the Job table and
# for the mapper defined to the Job table.
def get_external_output_metadata( self ):
"""
The external_output_metadata is currently a reference from Job to
JobExternalOutputMetadata. It exists for a job but not a task.
"""
return self.external_output_metadata
def get_session_id( self ):
return self.session_id
def get_user_id( self ):
return self.user_id
def get_tool_id( self ):
return self.tool_id
def get_tool_version( self ):
return self.tool_version
def get_command_line( self ):
return self.command_line
def get_param_filename( self ):
return self.param_filename
def get_parameters( self ):
return self.parameters
def get_input_datasets( self ):
return self.input_datasets
def get_output_datasets( self ):
return self.output_datasets
def get_input_library_datasets( self ):
return self.input_library_datasets
def get_output_library_datasets( self ):
return self.output_library_datasets
def get_state( self ):
return self.state
def get_info( self ):
return self.info
def get_job_runner_name( self ):
# This differs from the Task class in that job_runner_name is
# accessed instead of task_runner_name. Note that the field
# runner_name is not the same thing.
return self.job_runner_name
def get_job_runner_external_id( self ):
# This is different from the Task just in the member accessed:
return self.job_runner_external_id
def get_post_job_actions( self ):
return self.post_job_actions
def get_imported( self ):
return self.imported
def get_handler( self ):
return self.handler
def get_params( self ):
return self.params
def get_user( self ):
# This is defined in the SQL Alchemy mapper as a relation to the User.
return self.user
def get_id( self ):
# This is defined in the SQL Alchemy's Job table (and not in the model).
return self.id
def get_tasks( self ):
# The tasks member is pert of a reference in the SQL Alchemy schema:
return self.tasks
def get_id_tag( self ):
"""
Return a tag that can be useful in identifying a Job.
This returns the Job's get_id
"""
return "%s" % self.id;
def set_session_id( self, session_id ):
self.session_id = session_id
def set_user_id( self, user_id ):
self.user_id = user_id
def set_tool_id( self, tool_id ):
self.tool_id = tool_id
def set_tool_version( self, tool_version ):
self.tool_version = tool_version
def set_command_line( self, command_line ):
self.command_line = command_line
def set_param_filename( self, param_filename ):
self.param_filename = param_filename
def set_parameters( self, parameters ):
self.parameters = parameters
def set_input_datasets( self, input_datasets ):
self.input_datasets = input_datasets
def set_output_datasets( self, output_datasets ):
self.output_datasets = output_datasets
def set_input_library_datasets( self, input_library_datasets ):
self.input_library_datasets = input_library_datasets
def set_output_library_datasets( self, output_library_datasets ):
self.output_library_datasets = output_library_datasets
def set_info( self, info ):
self.info = info
def set_runner_name( self, job_runner_name ):
self.job_runner_name = job_runner_name
def get_job( self ):
# Added so job and task have same interface (.get_job() ) to get at
# underlying job object.
return self
def set_runner_external_id( self, job_runner_external_id ):
self.job_runner_external_id = job_runner_external_id
def set_post_job_actions( self, post_job_actions ):
self.post_job_actions = post_job_actions
def set_imported( self, imported ):
self.imported = imported
def set_handler( self, handler ):
self.handler = handler
def set_params( self, params ):
self.params = params
def add_parameter( self, name, value ):
self.parameters.append( JobParameter( name, value ) )
def add_input_dataset( self, name, dataset ):
self.input_datasets.append( JobToInputDatasetAssociation( name, dataset ) )
def add_output_dataset( self, name, dataset ):
self.output_datasets.append( JobToOutputDatasetAssociation( name, dataset ) )
def add_input_dataset_collection( self, name, dataset ):
self.input_dataset_collections.append( JobToInputDatasetCollectionAssociation( name, dataset ) )
def add_output_dataset_collection( self, name, dataset_collection_instance ):
self.output_dataset_collection_instances.append( JobToOutputDatasetCollectionAssociation( name, dataset_collection_instance ) )
def add_implicit_output_dataset_collection( self, name, dataset_collection ):
self.output_dataset_collections.append( JobToImplicitOutputDatasetCollectionAssociation( name, dataset_collection ) )
def add_input_library_dataset( self, name, dataset ):
self.input_library_datasets.append( JobToInputLibraryDatasetAssociation( name, dataset ) )
def add_output_library_dataset( self, name, dataset ):
self.output_library_datasets.append( JobToOutputLibraryDatasetAssociation( name, dataset ) )
def add_post_job_action(self, pja):
self.post_job_actions.append( PostJobActionAssociation( pja, self ) )
def set_state( self, state ):
"""
Save state history
"""
self.state = state
self.state_history.append( JobStateHistory( self ) )
def get_param_values( self, app, ignore_errors=False ):
"""
Read encoded parameter values from the database and turn back into a
dict of tool parameter values.
"""
param_dict = self.raw_param_dict()
tool = app.toolbox.get_tool( self.tool_id )
param_dict = tool.params_from_strings( param_dict, app, ignore_errors=ignore_errors )
return param_dict
def raw_param_dict( self ):
param_dict = dict( [ ( p.name, p.value ) for p in self.parameters ] )
return param_dict
def check_if_output_datasets_deleted( self ):
"""
Return true if all of the output datasets associated with this job are
in the deleted state
"""
for dataset_assoc in self.output_datasets:
dataset = dataset_assoc.dataset
# only the originator of the job can delete a dataset to cause
# cancellation of the job, no need to loop through history_associations
if not dataset.deleted:
return False
return True
def mark_deleted( self, track_jobs_in_database=False ):
"""
Mark this job as deleted, and mark any output datasets as discarded.
"""
if self.finished:
# Do not modify the state/outputs of jobs that are already terminal
return
if track_jobs_in_database:
self.state = Job.states.DELETED_NEW
else:
self.state = Job.states.DELETED
self.info = "Job output deleted by user before job completed."
for dataset_assoc in self.output_datasets:
dataset = dataset_assoc.dataset
dataset.deleted = True
dataset.state = dataset.states.DISCARDED
for dataset in dataset.dataset.history_associations:
# propagate info across shared datasets
dataset.deleted = True
dataset.blurb = 'deleted'
dataset.peek = 'Job deleted'
dataset.info = 'Job output deleted by user before job completed'
def to_dict( self, view='collection', system_details=False ):
rval = super( Job, self ).to_dict( view=view )
rval['tool_id'] = self.tool_id
if system_details:
# System level details that only admins should have.
rval['external_id'] = self.job_runner_external_id
rval['command_line'] = self.command_line
if view == 'element':
param_dict = dict( [ ( p.name, p.value ) for p in self.parameters ] )
rval['params'] = param_dict
input_dict = {}
for i in self.input_datasets:
if i.dataset is not None:
input_dict[i.name] = {"id" : i.dataset.id, "src" : "hda"}
for i in self.input_library_datasets:
if i.dataset is not None:
input_dict[i.name] = {"id" : i.dataset.id, "src" : "ldda"}
for k in input_dict:
if k in param_dict:
del param_dict[k]
rval['inputs'] = input_dict
output_dict = {}
for i in self.output_datasets:
if i.dataset is not None:
output_dict[i.name] = {"id" : i.dataset.id, "src" : "hda"}
for i in self.output_library_datasets:
if i.dataset is not None:
output_dict[i.name] = {"id" : i.dataset.id, "src" : "ldda"}
rval['outputs'] = output_dict
return rval
def set_final_state( self, final_state ):
self.set_state( final_state )
if self.workflow_invocation_step:
self.workflow_invocation_step.update()
class Task( object, HasJobMetrics ):
"""
A task represents a single component of a job.
"""
_numeric_metric = TaskMetricNumeric
_text_metric = TaskMetricText
states = Bunch( NEW = 'new',
WAITING = 'waiting',
QUEUED = 'queued',
RUNNING = 'running',
OK = 'ok',
ERROR = 'error',
DELETED = 'deleted' )
# Please include an accessor (get/set pair) for any new columns/members.
def __init__( self, job, working_directory, prepare_files_cmd ):
self.command_line = None
self.parameters = []
self.state = Task.states.NEW
self.info = None
self.working_directory = working_directory
self.task_runner_name = None
self.task_runner_external_id = None
self.job = job
self.stdout = ""
self.stderr = ""
self.exit_code = None
self.prepare_input_files_cmd = prepare_files_cmd
self._init_metrics()
def get_param_values( self, app ):
"""
Read encoded parameter values from the database and turn back into a
dict of tool parameter values.
"""
param_dict = dict( [ ( p.name, p.value ) for p in self.parent_job.parameters ] )
tool = app.toolbox.get_tool( self.tool_id )
param_dict = tool.params_from_strings( param_dict, app )
return param_dict
def get_id( self ):
# This is defined in the SQL Alchemy schema:
return self.id
def get_id_tag( self ):
"""
Return an id tag suitable for identifying the task.
This combines the task's job id and the task's own id.
"""
return "%s_%s" % ( self.job.get_id(), self.get_id() )
def get_command_line( self ):
return self.command_line
def get_parameters( self ):
return self.parameters
def get_state( self ):
return self.state
def get_info( self ):
return self.info
def get_working_directory( self ):
return self.working_directory
def get_task_runner_name( self ):
return self.task_runner_name
def get_task_runner_external_id( self ):
return self.task_runner_external_id
def get_job( self ):
return self.job
def get_stdout( self ):
return self.stdout
def get_stderr( self ):
return self.stderr
def get_prepare_input_files_cmd( self ):
return self.prepare_input_files_cmd
# The following accessors are for members that are in the Job class but
# not in the Task class. So they can either refer to the parent Job
# or return None, depending on whether Tasks need to point to the parent
# (e.g., for a session) or never use the member (e.g., external output
# metdata). These can be filled in as needed.
def get_external_output_metadata( self ):
"""
The external_output_metadata is currently a backref to
JobExternalOutputMetadata. It exists for a job but not a task,
and when a task is cancelled its corresponding parent Job will
be cancelled. So None is returned now, but that could be changed
to self.get_job().get_external_output_metadata().
"""
return None
def get_job_runner_name( self ):
"""
Since runners currently access Tasks the same way they access Jobs,
this method just refers to *this* instance's runner.
"""
return self.task_runner_name
def get_job_runner_external_id( self ):
"""
Runners will use the same methods to get information about the Task
class as they will about the Job class, so this method just returns
the task's external id.
"""
# TODO: Merge into get_runner_external_id.
return self.task_runner_external_id
def get_session_id( self ):
# The Job's galaxy session is equal to the Job's session, so the
# Job's session is the same as the Task's session.
return self.get_job().get_session_id()
def set_id( self, id ):
# This is defined in the SQL Alchemy's mapper and not here.
# This should never be called.
self.id = id
def set_command_line( self, command_line ):
self.command_line = command_line
def set_parameters( self, parameters ):
self.parameters = parameters
def set_state( self, state ):
self.state = state
def set_info( self, info ):
self.info = info
def set_working_directory( self, working_directory ):
self.working_directory = working_directory
def set_task_runner_name( self, task_runner_name ):
self.task_runner_name = task_runner_name
def set_job_runner_external_id( self, task_runner_external_id ):
# This method is available for runners that do not want/need to
# differentiate between the kinds of Runnable things (Jobs and Tasks)
# that they're using.
log.debug( "Task %d: Set external id to %s"
% ( self.id, task_runner_external_id ) )
self.task_runner_external_id = task_runner_external_id
def set_task_runner_external_id( self, task_runner_external_id ):
self.task_runner_external_id = task_runner_external_id
def set_job( self, job ):
self.job = job
def set_stdout( self, stdout ):
self.stdout = stdout
def set_stderr( self, stderr ):
self.stderr = stderr
def set_prepare_input_files_cmd( self, prepare_input_files_cmd ):
self.prepare_input_files_cmd = prepare_input_files_cmd
class JobParameter( object ):
def __init__( self, name, value ):
self.name = name
self.value = value
class JobToInputDatasetAssociation( object ):
def __init__( self, name, dataset ):
self.name = name
self.dataset = dataset
class JobToOutputDatasetAssociation( object ):
def __init__( self, name, dataset ):
self.name = name
self.dataset = dataset
class JobToInputDatasetCollectionAssociation( object ):
def __init__( self, name, dataset ):
self.name = name
self.dataset = dataset
# Many jobs may map to one HistoryDatasetCollection using these for a given
# tool output (if mapping over an input collection).
class JobToOutputDatasetCollectionAssociation( object ):
def __init__( self, name, dataset_collection_instance ):
self.name = name
self.dataset_collection_instance = dataset_collection_instance
# A DatasetCollection will be mapped to at most one job per tool output
# using these. (You can think of many of these models as going into the
# creation of a JobToOutputDatasetCollectionAssociation.)
class JobToImplicitOutputDatasetCollectionAssociation( object ):
def __init__( self, name, dataset_collection ):
self.name = name
self.dataset_collection = dataset_collection
class JobToInputLibraryDatasetAssociation( object ):
def __init__( self, name, dataset ):
self.name = name
self.dataset = dataset
class JobToOutputLibraryDatasetAssociation( object ):
def __init__( self, name, dataset ):
self.name = name
self.dataset = dataset
class JobStateHistory( object ):
def __init__( self, job ):
self.job = job
self.state = job.state
self.info = job.info
class ImplicitlyCreatedDatasetCollectionInput( object ):
def __init__( self, name, input_dataset_collection ):
self.name = name
self.input_dataset_collection = input_dataset_collection
class PostJobAction( object ):
def __init__( self, action_type, workflow_step, output_name = None, action_arguments = None):
self.action_type = action_type
self.output_name = output_name
self.action_arguments = action_arguments
self.workflow_step = workflow_step
class PostJobActionAssociation( object ):
def __init__(self, pja, job):
self.job = job
self.post_job_action = pja
class JobExternalOutputMetadata( object ):
def __init__( self, job = None, dataset = None ):
self.job = job
if isinstance( dataset, galaxy.model.HistoryDatasetAssociation ):
self.history_dataset_association = dataset
elif isinstance( dataset, galaxy.model.LibraryDatasetDatasetAssociation ):
self.library_dataset_dataset_association = dataset
@property
def dataset( self ):
if self.history_dataset_association:
return self.history_dataset_association
elif self.library_dataset_dataset_association:
return self.library_dataset_dataset_association
return None
class JobExportHistoryArchive( object ):
def __init__( self, job=None, history=None, dataset=None, compressed=False, \
history_attrs_filename=None, datasets_attrs_filename=None,
jobs_attrs_filename=None ):
self.job = job
self.history = history
self.dataset = dataset
self.compressed = compressed
self.history_attrs_filename = history_attrs_filename
self.datasets_attrs_filename = datasets_attrs_filename
self.jobs_attrs_filename = jobs_attrs_filename
@property
def up_to_date( self ):
""" Return False, if a new export should be generated for corresponding
history.
"""
job = self.job
return job.state not in [ Job.states.ERROR, Job.states.DELETED ] \
and job.update_time > self.history.update_time
@property
def ready( self ):
return self.job.state == Job.states.OK
@property
def preparing( self ):
return self.job.state in [ Job.states.RUNNING, Job.states.QUEUED, Job.states.WAITING ]
@property
def export_name( self ):
# Stream archive.
hname = ready_name_for_url( self.history.name )
hname = "Galaxy-History-%s.tar" % ( hname )
if self.compressed:
hname += ".gz"
return hname
class JobImportHistoryArchive( object ):
def __init__( self, job=None, history=None, archive_dir=None ):
self.job = job
self.history = history
self.archive_dir=archive_dir
class GenomeIndexToolData( object ):
def __init__( self, job=None, params=None, dataset=None, deferred_job=None, \
transfer_job=None, fasta_path=None, created_time=None, modified_time=None, \
dbkey=None, user=None, indexer=None ):
self.job = job
self.dataset = dataset
self.fasta_path = fasta_path
self.user = user
self.indexer = indexer
self.created_time = created_time
self.modified_time = modified_time
self.deferred = deferred_job
self.transfer = transfer_job
class DeferredJob( object ):
states = Bunch( NEW = 'new',
WAITING = 'waiting',
QUEUED = 'queued',
RUNNING = 'running',
OK = 'ok',
ERROR = 'error' )
def __init__( self, state=None, plugin=None, params=None ):
self.state = state
self.plugin = plugin
self.params = params
def get_check_interval( self ):
if not hasattr( self, '_check_interval' ):
self._check_interval = None
return self._check_interval
def set_check_interval( self, seconds ):
self._check_interval = seconds
check_interval = property( get_check_interval, set_check_interval )
def get_last_check( self ):
if not hasattr( self, '_last_check' ):
self._last_check = 0
return self._last_check
def set_last_check( self, seconds ):
try:
self._last_check = int( seconds )
except:
self._last_check = time.time()
last_check = property( get_last_check, set_last_check )
@property
def is_check_time( self ):
if self.check_interval is None:
return True
elif ( int( time.time() ) - self.last_check ) > self.check_interval:
return True
else:
return False
class Group( object, Dictifiable ):
dict_collection_visible_keys = ( 'id', 'name' )
dict_element_visible_keys = ( 'id', 'name' )
def __init__( self, name = None ):
self.name = name
self.deleted = False
class UserGroupAssociation( object ):
def __init__( self, user, group ):
self.user = user
self.group = group
class History( object, Dictifiable, UsesAnnotations, HasName ):
dict_collection_visible_keys = ( 'id', 'name', 'published', 'deleted' )
dict_element_visible_keys = ( 'id', 'name', 'genome_build', 'deleted', 'purged', 'update_time',
'published', 'importable', 'slug', 'empty' )
default_name = 'Unnamed history'
def __init__( self, id=None, name=None, user=None ):
self.id = id
self.name = name or History.default_name
self.deleted = False
self.purged = False
self.importing = False
self.genome_build = None
self.published = False
# Relationships
self.user = user
self.datasets = []
self.galaxy_sessions = []
self.tags = []
@property
def empty( self ):
return self.hid_counter == 1
def _next_hid( self ):
# this is overriden in mapping.py db_next_hid() method
if len( self.datasets ) == 0:
return 1
else:
last_hid = 0
for dataset in self.datasets:
if dataset.hid > last_hid:
last_hid = dataset.hid
return last_hid + 1
def add_galaxy_session( self, galaxy_session, association=None ):
if association is None:
self.galaxy_sessions.append( GalaxySessionToHistoryAssociation( galaxy_session, self ) )
else:
self.galaxy_sessions.append( association )
def add_dataset( self, dataset, parent_id=None, genome_build=None, set_hid=True, quota=True ):
if isinstance( dataset, Dataset ):
dataset = HistoryDatasetAssociation(dataset=dataset)
object_session( self ).add( dataset )
object_session( self ).flush()
elif not isinstance( dataset, HistoryDatasetAssociation ):
raise TypeError, ( "You can only add Dataset and HistoryDatasetAssociation instances to a history" +
" ( you tried to add %s )." % str( dataset ) )
if parent_id:
for data in self.datasets:
if data.id == parent_id:
dataset.hid = data.hid
break
else:
if set_hid:
dataset.hid = self._next_hid()
else:
if set_hid:
dataset.hid = self._next_hid()
if quota and self.user:
self.user.total_disk_usage += dataset.quota_amount( self.user )
dataset.history = self
if genome_build not in [None, '?']:
self.genome_build = genome_build
self.datasets.append( dataset )
return dataset
def add_dataset_collection( self, history_dataset_collection, set_hid=True ):
if set_hid:
history_dataset_collection.hid = self._next_hid()
history_dataset_collection.history = self
# TODO: quota?
self.dataset_collections.append( history_dataset_collection )
return history_dataset_collection
def copy( self, name=None, target_user=None, activatable=False, all_datasets=False ):
"""
Return a copy of this history using the given `name` and `target_user`.