forked from fBeyer89/life_followup_preproc
-
Notifications
You must be signed in to change notification settings - Fork 0
/
run_workflow_hcplike.py
178 lines (168 loc) · 6.26 KB
/
run_workflow_hcplike.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
#!/usr/bin/env python
# encoding: utf-8
"""
hcpipe.py
Copyright (c) 2013 Ben Acland
"""
import os
import sys
import getopt
from workflow import *
help_message = """
Provides command line tools to build and work on workflow confiuration files,
and for launching the pipeline using a specific config. You can either import
HCPrepWorkflow, then configure and run in from your own code, or use the
built-in command line tools to launch the workflow on your data.
Commands
--------
-h, --help
Prints out this message.
-i, --init
Creates a new config file in your current directory.
-u, --update
Reruns part of the config file setup script on any config file in your current directory.
-g, --graph
Draws a graph of the nipype workflow to the current directory.
-r, --run
Runs the workflow using any config file in the current directory.
Parameters
----------
-c, --config (path)
The config file to use.
-s (comma separated subject numbers)
The list of subjects who you'd like to put through the workflow. Overrides
setting in the config file.
-n (integer)
The number of threads you would like to use. Higher numbers are faster to
a point, but depending on how large your data is (chances are, it is quite
large), you may well want to limit yourself to something below 8 for
starters if you're working on a large server. Default is 1. Ignored if
you use -p.
-p, --pbs
Causes nipype to try to use the PBS plugin. For use on the cluster only.
Experimental.
-o (path)
The directory to put preprocessed data in. Default is current directory.
-v
Verbose. At this point, just makes -g print a graph that expands iterables.
"""
class Usage(Exception):
def __init__(self, msg=help_message):
self.msg = msg
def main(argv=None):
import os
if argv is None:
argv = sys.argv
try:
try:
opts, args = getopt.getopt(argv[1:], "pvrhiugs:n:o:c:", ["run", "help", "init", "update", "graph", "config", "pbs"])
except getopt.error, msg:
raise Usage(msg="\n"+str(msg))
# option processing
update = False
graph = False
subs = None
N_PROCS = None
run = False
verbose = False
c_file = None
use_pbs = False
out_dir = os.getcwd()
working_dir="/data/pt_life/LIFE_fu/wd_preprocessing/"
for option, value in opts:
if option in ("-h", "--help"):
raise Usage()
if option in ("-i", "--init"):
setup_conf()
return
if option in ("-u", "--update"):
update = True
if option in ("-g", "--graph"):
graph = True
if option in ("-r", "--run"):
run = True
if option in ("-c", "--config"):
c_file = value
if option in ("-p", "--pbs"):
use_pbs = True
if option in ("-s"):
subs = [sub.strip() for sub in value.split(",")]
if option in ("-v"):
verbose = True
if option in ('-n'):
N_PROCS = int_or_none(value)
if option in ('-o'):
out_dir = value
print N_PROCS
# select config file
if not c_file:
try:
c_file = select_conf()
except Exception, e:
raise Usage(msg="Could not find a config file.")
# update if necessary
if update:
update_conf(c_file)
return
# make sure we're going to do something
if not run and not graph:
raise Usage(msg="Nothing to do...")
# validate the config for running
conf = get_config_dict(c_file)
if not conf:
raise Usage(msg="Could not parse config file.")
if not validate_config(conf):
raise Usage(msg="Invalid config file.")
# build the workflow, pass it subjects if they were given in the command line
wk = HCPrepWorkflow(name="hcp_prep_workflow", config=conf, base_dir=working_dir)
if subs:
wk.subjects_node.iterables = ("subject", subs)
# set the output dir
out_dir = os.path.abspath(out_dir)
if not os.path.exists(out_dir):
os.makedirs(out_dir)
#wk.data_sink.inputs.base_directory = out_dir
# graph if you like
if graph:
g2u = "exec" if verbose else "orig"
wk.write_graph(dotfilename="hcp_pipe_graph", graph2use=g2u)
return
if not run:
return
if use_pbs:
print "running using PBS"
# note - the default qsub args are very modest. use this structure
# to scale up if necessary.
ques = [
[[wk.dicom_convert, wk.dicom_info, wk.nii_wrangler],
{"qsub_args":"-l nodes=1:ppn=1,mem=1gb,walltime=1:00:00"}],
[[wk.hc_pre_fs],
{"qsub_args":"-l nodes=1:ppn=2,mem=10gb,vmem=10gb,walltime=6:00:00"}],
[[wk.hc_fs],
{"qsub_args":"-l nodes=1:ppn=2,mem=5gb,walltime=24:00:00"}],
[[wk.hc_post_fs],
{"qsub_args":"-l nodes=1:ppn=4,mem=10gb,walltime=4:00:00"}],
[[wk.hc_volume, wk.hc_surface],
{"qsub_args":"-l nodes=1:ppn=4,mem=10gb,walltime=12:00:00"}],
]
for q in ques:
p_args = dict(q[1],**{"overwrite":True})
for n in q[0]:
n.plugin_args = p_args
wk.run(plugin="PBS",
plugin_args={"qsub_args":"-l nodes=1:ppn=1,mem=1gb,walltime=1:00:00"})
elif N_PROCS > 0:
print "running with %d processes" % N_PROCS
wk.run(plugin="MultiProc", plugin_args={"n_procs" : N_PROCS, "non_daemon" : True})
else:
print "running single process"
wk.run()
except Usage, err:
f_str = sys.argv[0].split("/")[-1] + ":"
lfs = len(f_str)
f_str = "%s\n%s\n%s\n" % ("-"*lfs, f_str, "-"*lfs)
print >> sys.stderr, f_str + str(err.msg)
print >> sys.stderr, "-------------------\nfor help use --help\n-------------------"
return 2
if __name__ == "__main__":
sys.exit(main())