/
main.py
executable file
·823 lines (664 loc) · 23.5 KB
/
main.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
#!/usr/bin/env python3
# vim: ft=python
"""
Run a series of benchmarks against a particular Bitcoin Core revision(s).
"""
import atexit
import os
import datetime
import getpass
import traceback
import sys
import pickle
import random
import time
import typing as t
from pathlib import Path
from textwrap import dedent
import clii
from . import (
output,
config,
bitcoind,
results,
slack,
benchmarks,
logging,
git,
sh,
hwinfo,
util,
)
from .globals import G
from .logging import get_logger
logger = get_logger()
assert sys.version_info >= (3, 8), "Python >=3.8 required"
# Maintain a lockfile that is global across the host to ensure that we're not
# running more than one instance on a given system.
LOCKFILE_PATH = Path("/tmp/bitcoin_bench.lock")
def _startup_assertions(cfg):
"""
Ensure the benchmark environment is suitable in various ways.
"""
if sh.run("$(which time) -f %M sleep 0.01", quiet=True).returncode != 0:
raise RuntimeError("the time package is required")
def warn(msg):
if cfg.safety_checks:
raise RuntimeError(msg)
else:
logger.warning(msg)
if (
sh.run("pgrep --list-name bitcoin | grep -v bitcoinperf", quiet=True).returncode
== 0
):
warn(
"benchmarks shouldn't run concurrently with unrelated bitcoin processes"
)
if cfg.safety_checks:
sh.run("sudo -n swapoff -a")
if sh.run('cat /proc/swaps | grep -v "^Filename"', check=False).returncode != 1:
warn("swap should be disabled during benchmarking")
avg, _, _ = os.getloadavg()
load_tries = 10
while avg > 1.0:
if load_tries > 0:
wait = 30
logger.info(f"1min load average high: {avg}; waiting {wait}s to cool down")
load_tries -= 1
time.sleep(wait)
else:
warn(f"1min load average high: {avg}")
break
avg, _, _ = os.getloadavg()
if not _try_acquire_lockfile():
raise RuntimeError("Couldn't acquire lockfile %s; exiting", LOCKFILE_PATH)
def _cleanup_tmpfiles():
"""Remove temporary bitcoinperf directories older than 2 days."""
# TODO parameterize this
sh.run(
r"ls -t1 %s | tail -n +5 | xargs -I {} rm -rf %s/{} \;"
% (config.workdir_path, config.workdir_path)
)
sh.run(r"find /tmp/test_runner_* -mtime +3 -exec rm -rf {} \;")
def run_full_suite(cfg) -> bool:
"""
Create a tmp directory in which we will clone bitcoin, build it, and run
various benchmarks.
Return whether we successfully completed the run.
"""
logger.info(
"Running benchmarks %s with compilers %s",
[i[0] for i in cfg.benches if i[1]],
cfg.compilers,
)
# TODO: move this somewhere more appropriate.
_cleanup_tmpfiles()
_startup_assertions(cfg)
repodir = cfg.workdir / "bitcoin"
git.get_repo(repodir)
checkouts, bad_targets = git.resolve_targets(repodir, cfg.to_bench)
if bad_targets:
logger.warning("Couldn't resolve git targets: %s", bad_targets)
return False
config.link_latest_run(cfg)
for target in cfg.to_bench:
assert target.gitco
G.gitco = target.gitco
for compiler in cfg.compilers:
maybe_run_bench_some_times(
target,
cfg,
compiler,
cfg.benches.build,
benchmarks.Build,
always_run=True,
)
maybe_run_bench_some_times(
target, cfg, compiler, cfg.benches.unittests, benchmarks.MakeCheck
)
maybe_run_bench_some_times(
target, cfg, compiler, cfg.benches.functests, benchmarks.FunctionalTests
)
maybe_run_bench_some_times(
target, cfg, compiler, cfg.benches.microbench, benchmarks.Microbench
)
compiler = config.Compilers.gcc
# Only do the following for gcc (since they're expensive)
build_step = benchmarks.Build(cfg, cfg.benches.build, compiler, target, 0)
build_step.run(cfg, cfg.benches.build)
maybe_run_bench_some_times(
target, cfg, compiler, cfg.benches.ibd_from_network, benchmarks.IbdReal
)
maybe_run_bench_some_times(
target, cfg, compiler, cfg.benches.ibd_from_local, benchmarks.IbdLocal
)
maybe_run_bench_some_times(
target,
cfg,
compiler,
cfg.benches.ibd_range_from_local,
benchmarks.IbdRangeLocal,
)
maybe_run_bench_some_times(
target, cfg, compiler, cfg.benches.reindex, benchmarks.Reindex
)
maybe_run_bench_some_times(
target,
cfg,
compiler,
cfg.benches.reindex_chainstate,
benchmarks.ReindexChainstate,
)
return True
def maybe_run_bench_some_times(
target, cfg, compiler, bench_cfg, bench_class, *, always_run=False
):
if not bench_cfg and not always_run:
logger.info("[%s] skipping benchmark", bench_class.name)
return
elif not bench_cfg:
bench_cfg = config.BenchBuild()
for i in range(getattr(bench_cfg, "run_count", 1)):
b = bench_class(cfg, bench_cfg, compiler, target, i)
results.ALL_RUNS.append(b)
b.run(cfg, bench_cfg)
def _try_acquire_lockfile():
if LOCKFILE_PATH.exists():
return False
with LOCKFILE_PATH.open("w") as f:
f.write("%s,%s" % (datetime.datetime.utcnow(), getpass.getuser()))
G.lockfile_held = True
return True
def _get_shutdown_handler(cfg: config.Config):
def handler():
for node in bitcoind.Node.all_instances:
if node.ps and node.ps.returncode is None:
node.terminate()
node.join()
# Release lockfile if we've got it
if G.lockfile_held:
LOCKFILE_PATH.unlink()
logger.debug("shutdown: removed lockfile at %s", LOCKFILE_PATH)
# Clean up to avoid filling disk
# TODO add more granular cleanup options
if cfg.teardown and (not cli.args.no_teardown) and cfg.workdir.is_dir():
sh.cd(cfg.workdir)
_stash_debug_file(cfg)
# For now only remove the bitcoin subdir, since that'll be far and
# away the biggest subdir.
sh.run("rm -rf %s" % (cfg.workdir / "bitcoin"))
logger.info("shutdown: removed bitcoin dir at %s", cfg.workdir)
elif not cfg.teardown:
logger.info("shutdown: leaving bitcoin dir at %s", cfg.workdir)
return handler
def _stash_debug_file(cfg: config.Config):
"""
Throw the last debug file so that we avoid removing it with the
rest of the bitcoin stuff.
"""
assert cfg.workdir
# Move the debug.log file out into /tmp for diagnostics.
debug_file = cfg.workdir / "bitcoin" / "data" / "debug.log"
if debug_file.is_file():
# Overwrite the file so as not to fill up disk.
debug_file.rename(cfg.workdir / "stashed-debug.log")
cli = clii.App(description=__doc__)
cli.add_arg("--verbose", "-v", action="store_true")
cli.add_arg("--no-teardown", action="store_true")
def _missing_pkgs() -> t.List[str]:
errs = []
if "GNU time" not in sh.run("/usr/bin/time --version").stderr:
errs.append("Need to install GNU time (sudo apt install time)")
if not sh.run("which fio", quiet=True).ok:
errs.append("Need to install fio (sudo apt install fio)")
return errs
@cli.cmd
def setup():
"""
Run a guided setup of the fixture data needed to benchmark.
"""
from .thirdparty import color as c # type: ignore
def warn(*args, **kwargs):
print(c.yellow(dedent(*args, **kwargs)))
def scare(*args, **kwargs):
print(c.red(c.bold(dedent(*args, **kwargs))))
catchphrase = random.choice(
[
"let's be honest, it's basically your only option",
"barely adequate but almost certainly better than guessing",
"just slightly easier to configure than autotools",
"WITH_LOCK(::cs_main, chainstate->IsLoveReal())",
"get out, before the rats eat you!",
]
)
print(
fr"""
_ _ _ _ __
| |__(_) |_ __ ___(_)_ _ _ __ ___ _ _ / _|
| '_ \ | _/ _/ _ \ | ' \| '_ \/ -_) '_| _|
|_.__/_|\__\__\___/_|_||_| .__/\___|_| |_|
|_|
{c.yellow(catchphrase)}
"""
)
def ent():
input("\npress [enter] to continue ")
print(
dedent(
"""
Bitcoinperf requires the existence of some data and git repos;
we're going to set those up now.
"""
),
end="",
)
ent()
def div():
print("\n" + "-" * 80 + "\n")
div()
_15m_load = os.getloadavg()[-1] > 1.0
if _15m_load > 1.0:
warn(
f"""
Warning: I've noticed your load is highish (15m avg: {_15m_load}).
Please note that benchmark results are very suspect when run on
a computer used for regular activity. If you're doing other things
with the computer, the load may vary while bitcoinperf runs,
skewing results.
"""
)
ent()
if not config.config_path.exists():
config.config_path.mkdir()
print("Created config dir at {config.config_path}")
print(
dedent(
f"""
Bitcoinperf benchmarks often rely on one bitcoind process, a fixture
peer, serving data to the bitcoind process being benchmarked. Since the
fixture peer needs data to serve, we have to prepopulate a repository
and datadir used to create the synced peer.
The bitcoin repo and datadir for this peer will be in
{config.peer_path}/bitcoin
{config.peer_path}/datadir
"""
)
)
ent()
if not config.peer_path.exists():
config.peer_path.mkdir()
def yn(prompt: str) -> bool:
return input(prompt).lower() in ["y", ""]
if not config.peer_repo.exists():
print(
dedent(
f"""
The peer requires a bitcoin repo to exist at
{config.peer_repo}
"""
)
)
if yn("Clone bitcoin.git from GitHub? [Y/n] "):
url = "https://github.com/bitcoin/bitcoin.git"
print(f"Cloning from {url}... ", end="")
sys.stdout.flush()
sh.run(f"git clone --depth 1 {url} {config.peer_repo}")
print(c.green("finished!"))
sys.stdout.flush()
time.sleep(0.8)
else:
scare(
f"""
!! You'll need to provide a repo for the synced peer to
use at {config.peer_repo}, or use the networked peer option
(see config:SyncedPeer.address).
You can symlink a repo, if that floats your boat.
"""
)
if not config.peer_datadir.exists():
scare(
f"""
!! You'll also need to provide the synced peer with a
populated datadir at
{config.peer_datadir}
Either symlink or copy a datadir here that is synced to a height
above the range you want to benchmark (probably above at least
550,000).
"""
)
print(
c.cyan(
c.bold(
dedent(
"""
!! Alternatively you can specify a network address to use in lieu
of a local peer with the `--peer-address` flag. Bitcoinperf
will (obviously) not manage the setup/teardown of this peer.
"""
)
)
)
)
ent()
if not config.base_datadirs.exists():
config.base_datadirs.mkdir()
print(
dedent(
f"""
To do meaningful benchmarking, we often have to look at a region
of the chain that is well past the first few hundred thousand
blocks, since these blocks are not characteristic of where the
IBD process bottlenecks.
To this end, you can download datadirs that are pre-synced up to
a certain height. We will seed the benchmark node
from these datadir so that you can immediately start the
benchmark from a part of the chain that is meaningful to
examine for overall performance.
There are a few regions of the chain available, though the default
is {config.DEFAULT_REGION}
You will be prompted if you'd like to download each, though only
the first is used by default.
"""
)
)
for region in config.CHAIN_REGIONS.values():
if not region.path.exists():
prompt = (
f"Download pre-synced, pruned {region.height} block datadir? [Y/n] "
)
if yn(prompt):
url = f"https://storage.googleapis.com/chaincode-bitcoinperf/{region.filename}" # noqa
print(f"Downloading and decompressing {url}...")
print("└─ this will take about 15 minutes (~5.5GB down)")
sh.run(
f"cd {config.base_datadirs} && "
f"mkdir -p {region.path} && cd {region.path} && ( curl {url} | tar -xzv )",
check=True
)
print(c.green(f"Download of {region} complete"))
elif region == config.DEFAULT_REGION:
scare(
f"""
Be warned: `bitcoinperf bench-pr` will not work out of the box
without the default prebuilt datadir {region}
"""
)
else:
print("Region skipped")
print(
c.blue(
dedent(
"""
Ensure you've installed all dependencies to compile bitcoin core
locally. See
- `./bin/install.sh` or
- https://github.com/bitcoin/bitcoin/blob/master/doc/build-unix.md
"""
)
)
)
pkgs = _missing_pkgs()
if pkgs:
print(c.red("Missing packages: "))
for pkg_msg in pkgs:
print(c.red(f" - {pkg_msg}"))
ent()
username = getpass.getuser()
print(
c.blue(
dedent(
f"""
Be sure you've added the following lines to your /etc/sudoers file
so that we can drop caches:
{username} ALL = NOPASSWD: /sbin/sysctl vm.drop_caches=3
{username} ALL = NOPASSWD: /sbin/swapoff -a
"""
)
)
)
print(
c.green(
dedent(
"""
cool, have fun.
`bitcoinperf bench-pr $PR_NUMBER` is probably what you want.
"""
)
)
)
def die(*args, **kwargs):
kwargs.setdefault("file", sys.stderr)
print(*args, **kwargs)
sys.exit(1)
@cli.cmd
def bench_pr(
pr_num: str,
run_id: str = None,
peer_tag: str = "v26.0",
peer_address: str = None,
num_blocks: int = 1_000,
run_count: int = 2,
run_micros: bool = False,
compare_ref: str = "",
bitcoind_args: str = "",
chain_region: str = "2021-02",
):
"""
Benchmark a PR relative to its merge base for some number of blocks,
starting from a variable region of the chain (`chain_region`). By default,
IBD starts from height 667,200 (2021-02).
Args:
run_id: label for the run - will create /tmp/bitcoinperf-[run_id]
peer_tag: which git tag the server bitcoind process will run
peer_address: network address to use as peer instead of local instance
num_blocks: the number of blocks to benchmark
run_count: number of times to test IBD of each git ref
run_micros: if true, run the microbenchmarks
compare_ref: compare the PR against this git ref instead of inferred mergebase
bitcoind_args: additional arguments to pass to bitcoind invocations
chain_region: which region of the chain to benchmark from. Choices are: 2021-02, 2017-12
"""
run_id = run_id or pr_num
workdir = Path(f"/tmp/bitcoinperf-{run_id}")
if chain_region not in config.CHAIN_REGIONS:
die(f"chain_region must be in {config.CHAIN_REGIONS}")
elif not (pruned_datadir := config.CHAIN_REGIONS[chain_region]).path.exists():
die(f"pruned datadir {pruned_datadir.path} not found - try `bitcoinperf setup`")
if workdir.exists():
logger.warning(f"Removing existing (old?) workdir {workdir}")
sh.rm(workdir)
workdir.mkdir()
logging.configure_logger(workdir, "DEBUG" if cli.args.verbose else "INFO")
repodir = workdir / "bitcoin"
# If pr_num is [remote]/[ref], just use that as it isn't actually PR.
if "/" in pr_num:
name = pr_num
gitremote, gitref = pr_num.split("/")
else:
name = f"#{pr_num}"
gitremote = "origin"
gitref = f"pr/{pr_num}"
targets = [
config.Target(
name=name,
gitref=gitref,
gitremote=gitremote,
rebase=False,
bitcoind_extra_args=bitcoind_args,
),
]
if compare_ref:
if "/" not in compare_ref:
compare_ref = f"origin/{compare_ref}"
remote, ref = compare_ref.split("/")
name = ref[:8] if util.is_hex(ref) else ref[:24]
targets.append(
config.Target(
name=name,
gitref=ref,
gitremote=remote,
rebase=False,
bitcoind_extra_args=bitcoind_args,
)
)
else:
targets.append(
config.Target(
name=git.MERGEBASE_REF,
gitref="master",
gitremote="origin",
rebase=False,
bitcoind_extra_args=bitcoind_args,
)
)
git.get_repo(repodir)
checkouts, bad_targets = git.resolve_targets(repodir, targets)
if bad_targets:
print(f"failed to find commit for {[t.gitref for t in bad_targets]}")
sys.exit(1)
elif len(set([co.sha for co in checkouts])) == 1:
# Don't do the run if we're comparing a commit to itself.
print("benchmarking a commit against itself - quitting")
sys.exit(1)
# This is hardcoded per the preexisting datadir.
start_height = pruned_datadir.height
end_height = start_height + num_blocks
peer_args: t.Dict[str, t.Union[str, Path]] = {}
if peer_address:
peer_args["address"] = peer_address
else:
peer_args.update(
dict(
datadir=config.peer_datadir,
repodir=config.peer_repo,
gitref=peer_tag,
)
)
logger.info("Running benchmarks for:")
for target in targets:
logger.info(" %s", target.gitco)
peer = config.SyncedPeer(**peer_args)
build_config = config.BenchBuild()
ibd_config = config.BenchIbdRangeFromLocal(
src_datadir=pruned_datadir.path,
start_height=start_height,
end_height=end_height,
)
cfg = config.Config(
to_bench=targets,
workdir=workdir,
synced_peer=peer,
compilers=[config.Compilers.gcc],
safety_checks=False,
)
_startup_assertions(cfg)
atexit.register(_get_shutdown_handler(cfg))
results: t.List[benchmarks.Benchmark] = []
if run_micros:
for i, ts in enumerate([targets]):
for target in ts:
for compiler in config.Compilers:
git.checkout_in_dir(workdir / "bitcoin", target)
build = benchmarks.Build(cfg, build_config, compiler, target, i)
build.run(cfg, build_config)
assert build.gitco
results.append(build)
micro_conf = config.BenchMicrobench()
micro = benchmarks.Microbench(cfg, micro_conf, compiler, target, i)
micro.run(cfg, micro_conf)
assert micro.gitco
results.append(micro)
if "clang" in os.environ.get("CXX", ""):
compiler = config.Compilers.clang
else:
# Default to gcc since these tests are long and we ship binaries
# built with gcc.
compiler = config.Compilers.gcc
for i, ts in enumerate([targets] * run_count):
for target in ts:
git.checkout_in_dir(workdir / "bitcoin", target)
build = benchmarks.Build(cfg, build_config, compiler, target, i)
build.run(cfg, build_config)
assert build.gitco
ibd = benchmarks.IbdRangeLocal(cfg, ibd_config, compiler, target, i)
ibd.run(cfg, ibd_config)
assert ibd.gitco
results.append(ibd)
_persist_results(cfg, results)
_print_results(cfg, results)
@cli.cmd
def run(yaml_filename: Path):
"""Do a benchmark run based on a yaml configuration file."""
config_file = Path(yaml_filename)
if not config_file.exists():
print(".yaml config file required as only argument", file=sys.stderr)
sys.exit(1)
cfg = config.load(config_file)
assert cfg.workdir
logging.configure_logger(cfg.workdir, "DEBUG" if cli.args.verbose else "INFO")
if cfg.codespeed:
results.Reporters.codespeed = results.CodespeedReporter(cfg.codespeed)
G.slack = slack.Client(cfg.slack.webhook_url if cfg.slack else "")
slack.attach_slack_handler_to_logger(cfg, G.slack, logger)
atexit.register(_get_shutdown_handler(cfg))
logger.info(
"Started on host %s (codespeed env %s)",
config.HOSTNAME,
cfg.codespeed.envname if cfg.codespeed else "[none]",
)
logger.info(str(cfg))
try:
completed = run_full_suite(cfg)
except Exception:
G.slack.send_to_slack_attachment(
G.gitco, "Error", {}, text=traceback.format_exc(), success=False
)
raise
if completed:
_persist_results(cfg, results.ALL_RUNS)
_print_results()
else:
print("run failed")
sys.exit(1)
def _persist_results(cfg, results):
logger.info("Getting hardware information")
hw = hwinfo.get_hwinfo(cfg.workdir, None)
res_dict = {
"runs": results,
"hwinfo": hw,
}
try:
results_path = cfg.results_dir / "results.pickle"
results_path.write_bytes(pickle.dumps(res_dict))
logger.info("Wrote serialized benchmark results to %s", results_path)
except Exception:
logger.exception("failed to pickle results")
@cli.cmd
def render(pickle_filename: Path):
"""Render (or re-render) the pickled results of a benchmark run."""
unpickled = pickle.loads(Path(pickle_filename).read_bytes())
results.ALL_RUNS = unpickled["runs"]
results.HWINFO = unpickled["hwinfo"]
_print_results()
def _print_results(
cfg: config.Config = None, all_runs: t.List[benchmarks.Benchmark] = None
) -> None:
all_runs = all_runs or results.ALL_RUNS
grouped = output.GroupedRuns.from_list(all_runs)
if not cfg:
cfg = list(list(grouped.values())[0].values())[0][0].cfg
output.print_comparative_times_table(grouped, config=cfg)
if len(cfg.to_bench) > 1:
output.make_plots(cfg, grouped)
def main():
try:
cli.run()
except Exception:
# Release lockfile if we've got it
if G.lockfile_held:
LOCKFILE_PATH.unlink()
G.lockfile_held = False
logger.debug("shutdown: removed lockfile at %s", LOCKFILE_PATH)
raise
if __name__ == "__main__":
main()