-
Notifications
You must be signed in to change notification settings - Fork 90
/
workunit.py
314 lines (285 loc) · 10.4 KB
/
workunit.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
import logging
import pipes
import os
from teuthology import misc as teuthology
from teuthology.parallel import parallel
from teuthology.orchestra import run
log = logging.getLogger(__name__)
def task(ctx, config):
"""
Run ceph all workunits found under the specified path.
For example::
tasks:
- ceph:
- ceph-fuse: [client.0]
- workunit:
clients:
client.0: [direct_io, xattrs.sh]
client.1: [snaps]
branch: foo
You can also run a list of workunits on all clients:
tasks:
- ceph:
- ceph-fuse:
- workunit:
tag: v0.47
clients:
all: [direct_io, xattrs.sh, snaps]
If you have an "all" section it will run all the workunits
on each client simultaneously, AFTER running any workunits specified
for individual clients. (This prevents unintended simultaneous runs.)
To customize tests, you can specify environment variables as a dict::
tasks:
- ceph:
- ceph-fuse:
- workunit:
sha1: 9b28948635b17165d17c1cf83d4a870bd138ddf6
clients:
all: [snaps]
env:
FOO: bar
BAZ: quux
"""
assert isinstance(config, dict)
assert isinstance(config.get('clients'), dict), \
'configuration must contain a dictionary of clients'
overrides = ctx.config.get('overrides', {})
teuthology.deep_merge(config, overrides.get('workunit', {}))
refspec = config.get('branch')
if refspec is None:
refspec = config.get('sha1')
if refspec is None:
refspec = config.get('tag')
if refspec is None:
refspec = 'HEAD'
log.info('Pulling workunits from ref %s', refspec)
created_dir_dict = {}
if config.get('env') is not None:
assert isinstance(config['env'], dict), 'env must be a dictionary'
clients = config['clients']
log.info('Making a separate scratch dir for every client...')
for role in clients.iterkeys():
assert isinstance(role, basestring)
if role == "all":
continue
PREFIX = 'client.'
assert role.startswith(PREFIX)
created_mnt_dir = _make_scratch_dir(ctx, role, config.get('subdir'))
created_dir_dict[role] = created_mnt_dir
all_spec = False #is there an all grouping?
with parallel() as p:
for role, tests in clients.iteritems():
if role != "all":
p.spawn(_run_tests, ctx, refspec, role, tests, config.get('env'))
else:
all_spec = True
if all_spec:
all_tasks = clients["all"]
_spawn_on_all_clients(ctx, refspec, all_tasks, config.get('env'), config.get('subdir'))
for role in clients.iterkeys():
assert isinstance(role, basestring)
if role == "all":
continue
PREFIX = 'client.'
assert role.startswith(PREFIX)
if created_dir_dict[role]:
_delete_dir(ctx, role, config.get('subdir'))
def _delete_dir(ctx, role, subdir):
PREFIX = 'client.'
testdir = teuthology.get_testdir(ctx)
id_ = role[len(PREFIX):]
(remote,) = ctx.cluster.only(role).remotes.iterkeys()
mnt = os.path.join(testdir, 'mnt.{id}'.format(id=id_))
client = os.path.join(mnt, 'client.{id}'.format(id=id_))
try:
remote.run(
args=[
'rm',
'-rf',
'--',
client,
],
)
log.info("Deleted dir {dir}".format(dir=client))
except:
log.debug("Caught an execption deleting dir {dir}".format(dir=client))
try:
remote.run(
args=[
'rmdir',
'--',
mnt,
],
)
log.info("Deleted dir {dir}".format(dir=mnt))
except:
log.debug("Caught an execption deleting dir {dir}".format(dir=mnt))
def _make_scratch_dir(ctx, role, subdir):
retVal = False
PREFIX = 'client.'
id_ = role[len(PREFIX):]
log.debug("getting remote for {id} role {role_}".format(id=id_, role_=role))
(remote,) = ctx.cluster.only(role).remotes.iterkeys()
dir_owner = remote.user
mnt = os.path.join(teuthology.get_testdir(ctx), 'mnt.{id}'.format(id=id_))
# if neither kclient nor ceph-fuse are required for a workunit,
# mnt may not exist. Stat and create the directory if it doesn't.
try:
remote.run(
args=[
'stat',
'--',
mnt,
],
)
log.info('Did not need to create dir {dir}'.format(dir=mnt))
except:
remote.run(
args=[
'mkdir',
'--',
mnt,
],
)
log.info('Created dir {dir}'.format(dir=mnt))
retVal = True
if not subdir: subdir = 'client.{id}'.format(id=id_)
if retVal:
remote.run(
args=[
'cd',
'--',
mnt,
run.Raw('&&'),
'mkdir',
'--',
subdir,
],
)
else:
remote.run(
args=[
# cd first so this will fail if the mount point does
# not exist; pure install -d will silently do the
# wrong thing
'cd',
'--',
mnt,
run.Raw('&&'),
'sudo',
'install',
'-d',
'-m', '0755',
'--owner={user}'.format(user=dir_owner),
'--',
subdir,
],
)
return retVal
def _spawn_on_all_clients(ctx, refspec, tests, env, subdir):
client_generator = teuthology.all_roles_of_type(ctx.cluster, 'client')
client_remotes = list()
for client in client_generator:
(client_remote,) = ctx.cluster.only('client.{id}'.format(id=client)).remotes.iterkeys()
client_remotes.append((client_remote, 'client.{id}'.format(id=client)))
_make_scratch_dir(ctx, "client.{id}".format(id=client), subdir)
for unit in tests:
with parallel() as p:
for remote, role in client_remotes:
p.spawn(_run_tests, ctx, refspec, role, [unit], env, subdir)
# cleanup the generated client directories
client_generator = teuthology.all_roles_of_type(ctx.cluster, 'client')
for client in client_generator:
_delete_dir(ctx, 'client.{id}'.format(id=client), subdir)
def _run_tests(ctx, refspec, role, tests, env, subdir=None):
testdir = teuthology.get_testdir(ctx)
assert isinstance(role, basestring)
PREFIX = 'client.'
assert role.startswith(PREFIX)
id_ = role[len(PREFIX):]
(remote,) = ctx.cluster.only(role).remotes.iterkeys()
mnt = os.path.join(testdir, 'mnt.{id}'.format(id=id_))
# subdir so we can remove and recreate this a lot without sudo
if subdir is None:
scratch_tmp = os.path.join(mnt, 'client.{id}'.format(id=id_), 'tmp')
else:
scratch_tmp = os.path.join(mnt, subdir)
srcdir = '{tdir}/workunit.{role}'.format(tdir=testdir, role=role)
remote.run(
logger=log.getChild(role),
args=[
'mkdir', '--', srcdir,
run.Raw('&&'),
'git',
'archive',
'--remote=git://ceph.newdream.net/git/ceph.git',
'%s:qa/workunits' % refspec,
run.Raw('|'),
'tar',
'-C', srcdir,
'-x',
'-f-',
run.Raw('&&'),
'cd', '--', srcdir,
run.Raw('&&'),
'if', 'test', '-e', 'Makefile', run.Raw(';'), 'then', 'make', run.Raw(';'), 'fi',
run.Raw('&&'),
'find', '-executable', '-type', 'f', '-printf', r'%P\0'.format(srcdir=srcdir),
run.Raw('>{tdir}/workunits.list'.format(tdir=testdir)),
],
)
workunits = sorted(teuthology.get_file(
remote,
'{tdir}/workunits.list'.format(tdir=testdir)).split('\0'))
assert workunits
try:
assert isinstance(tests, list)
for spec in tests:
log.info('Running workunits matching %s on %s...', spec, role)
prefix = '{spec}/'.format(spec=spec)
to_run = [w for w in workunits if w == spec or w.startswith(prefix)]
if not to_run:
raise RuntimeError('Spec did not match any workunits: {spec!r}'.format(spec=spec))
for workunit in to_run:
log.info('Running workunit %s...', workunit)
args = [
'mkdir', '-p', '--', scratch_tmp,
run.Raw('&&'),
'cd', '--', scratch_tmp,
run.Raw('&&'),
run.Raw('CEPH_CLI_TEST_DUP_COMMAND=1'),
run.Raw('CEPH_REF={ref}'.format(ref=refspec)),
run.Raw('TESTDIR="{tdir}"'.format(tdir=testdir)),
run.Raw('CEPH_ID="{id}"'.format(id=id_)),
run.Raw('PYTHONPATH="$PYTHONPATH:{tdir}/binary/usr/local/lib/python2.7/dist-packages:{tdir}/binary/usr/local/lib/python2.6/dist-packages"'.format(tdir=testdir)),
]
if env is not None:
for var, val in env.iteritems():
quoted_val = pipes.quote(val)
env_arg = '{var}={val}'.format(var=var, val=quoted_val)
args.append(run.Raw(env_arg))
args.extend([
'adjust-ulimits',
'ceph-coverage',
'{tdir}/archive/coverage'.format(tdir=testdir),
'{srcdir}/{workunit}'.format(
srcdir=srcdir,
workunit=workunit,
),
])
remote.run(
logger=log.getChild(role),
args=args,
)
remote.run(
logger=log.getChild(role),
args=['sudo', 'rm', '-rf', '--', scratch_tmp],
)
finally:
log.info('Stopping %s on %s...', spec, role)
remote.run(
logger=log.getChild(role),
args=[
'rm', '-rf', '--', '{tdir}/workunits.list'.format(tdir=testdir), srcdir,
],
)