This repository has been archived by the owner. It is now read-only.
Permalink
Cannot retrieve contributors at this time
Join GitHub today
GitHub is home to over 31 million developers working together to host and review code, manage projects, and build software together.
Sign up
Find file
Copy path
Fetching contributors…
| # -*- python -*- | |
| # ex: set syntax=python: | |
| # This is rust-buildbot's buildmaster config file. It must be installed as | |
| # 'master.cfg' in your buildmaster's base directory. | |
| # You will probably need to read the buildbot manual at some length to | |
| # understand what's going on in here. A quick summary follows: | |
| # | |
| # - ChangeSoruces (GitPoller) run server-side polling git for changes | |
| # - Schedulers (AnyBranchScheduler) trigger builds | |
| # - Builders represent work-queues that Schedulers dump BuildRequests into | |
| # - BuildRequests cause a BuildFactory to make Builds | |
| # - Builds get dispatched to Slaves | |
| # - The slave runs the sequence of Steps in the Build | |
| # | |
| # To customize the behavior of a _Step_, we pass _Properties_. | |
| # | |
| # A property is a k=v pair attached to a BuildRequest passing through | |
| # the system. It can be overridden at each stage of processing, but | |
| # since we want to give forced-builds (from users) a fair amount of | |
| # flexibility, we try to set our default properties early (in | |
| # Schedulers). | |
| # | |
| # Properties can be set by users, by schedulers, by builders, by | |
| # slaves, and by buildsteps themselves in response to their | |
| # environment. | |
| # | |
| # We often want a mixture of such configuration and control, so we use | |
| # properties for everything. Any steps in the BuildFactory that we | |
| # expect to vary (aside from the branch being served), we parameterize | |
| # through properties and adjust the commands issued in the steps | |
| # themselves through IRenderables, doStepIf, and similar | |
| # property-driven customization. Note that this means most variability | |
| # can change _request by request_; if you wire-in variability when | |
| # setting up the builder, you'll be stuck always doing the same thing | |
| # in that builder. | |
| import time | |
| import re | |
| from buildbot.process.buildstep import BuildStep, SUCCESS, FAILURE | |
| from buildbot.status.logfile import STDOUT | |
| # This is the dictionary that the buildmaster pays attention to. We also use | |
| # a shorter alias to save typing. | |
| c = BuildmasterConfig = {} | |
| # from buildbot import manhole | |
| # c['manhole'] = manhole.AuthorizedKeysManhole("tcp:1234:interface=127.0.0.1", "~/.ssh/authorized_keys") | |
| c['changeHorizon'] = 200 | |
| c['buildHorizon'] = 500 | |
| c['eventHorizon'] = 50 | |
| c['logHorizon'] = 500 | |
| c['caches'] = { | |
| 'Changes' : 1000, | |
| 'Builds' : 500, | |
| 'chdicts' : 1000, | |
| 'BuildRequests' : 100, | |
| 'SourceStamps' : 200, | |
| 'ssdicts' : 200, | |
| 'objectids' : 100, | |
| 'usdicts' : 1000, | |
| } | |
| c['logCompressionLimit'] = 16384 | |
| c['logCompressionMethod'] = 'gz' | |
| c['logMaxSize'] = 1024*1024*10 # 10M | |
| c['logMaxTailSize'] = 32768 | |
| WORKDIR = "build" | |
| BUILD_WORKDIR = "build/obj" | |
| ####### Site-specific configuration | |
| keypair_name = 'buildbot-west-slave-key' | |
| security_name = None | |
| # The time to wait before shutting down an ec2 slave. | |
| # 1 hour should be smaller than the diff between fastest and slowest build, | |
| # to prevent slaves from needing to be restarted while bors's queue is full | |
| build_wait_timeout = (60*60)*3 | |
| compile_timeout = 30*60 # rustc can take a while to compile | |
| test_timeout = 20*60 | |
| region = 'us-west-1' | |
| master_config = { } | |
| for line in open("master.cfg.txt"): | |
| fields = line.split() | |
| if len(fields) >= 2: | |
| k = fields.pop(0) | |
| v = fields.pop(0) | |
| master_config[k] = v | |
| env = master_config['env'] | |
| master_addy = master_config['master_addy'] | |
| git_source = master_config['git_source'] | |
| cargo_source = master_config['cargo_source'] | |
| packaging_source = master_config['packaging_source'] | |
| s3_addy = master_config['s3_addy'] | |
| s3_cargo_addy = master_config['s3_cargo_addy'] | |
| dist_server_addy = master_config['dist_server_addy'] | |
| public_dist_server_addy = master_config['public_dist_server_addy'] | |
| all_branches = ["auto", "master", "try", "snap-stage3"] | |
| # Production configuration | |
| auto_platforms = [ | |
| "mac-32-opt", | |
| #"mac-32-nopt-c", FIXME #7221 can't fit metedata sections | |
| #"mac-32-nopt-t", | |
| "mac-64-opt", | |
| #"mac-64-nopt-c", | |
| "mac-64-nopt-t", | |
| #"mac-64-opt-vg", | |
| #"mac-all-opt", | |
| "mac-cross-ios-opt", | |
| "mac-64-opt-rustbuild", | |
| "linux-32-opt", | |
| #"linux-32-nopt-c", | |
| "linux-32-nopt-t", | |
| "linux-64-opt", | |
| #"linux-64-nopt-c", | |
| "linux-64-nopt-t", | |
| "linux-64-debug-opt", | |
| "linux-musl-64-opt", | |
| "linux-cross-opt", | |
| "linux-32cross-opt", | |
| "linux-64-opt-rustbuild", | |
| "linux-64-opt-mir", | |
| #"linux-64-opt-vg", | |
| #"linux-all-opt", | |
| "linux-64-x-android-t", | |
| "linux-64-cross-netbsd", | |
| "linux-64-cross-freebsd", | |
| "linux-64-cross-armsf", | |
| "linux-64-cross-armhf", | |
| "win-gnu-32-opt", | |
| #"win-gnu-32-nopt-c", | |
| "win-gnu-32-nopt-t", | |
| "win-gnu-64-opt", | |
| #"win-gnu-64-nopt-c", | |
| "win-gnu-64-nopt-t", | |
| "win-msvc-32-opt", | |
| "win-msvc-64-opt", | |
| "win-msvc-64-opt-mir", | |
| "win-msvc-32-cross-opt", | |
| "win-gnu-32-opt-rustbuild", | |
| "win-msvc-64-opt-rustbuild", | |
| # Bots running check-cargotest | |
| "linux-64-cargotest", | |
| "win-msvc-64-cargotest", | |
| # Tier 2 platforms, also modify nogate_builders | |
| "bitrig-64-opt", | |
| "freebsd10_32-1", | |
| "freebsd10_64-1", | |
| "dragonflybsd-64-opt", | |
| "openbsd-64-opt" | |
| ] | |
| try_platforms = ["linux", "win-gnu-32", "win-gnu-64", "mac"] | |
| snap_platforms = ["linux", "win-gnu-32", "win-gnu-64", "mac", "bitrig-64", | |
| "freebsd10_32-1", "freebsd10_64-1", "dragonflybsd-64-opt", | |
| "openbsd-64-opt"] | |
| dist_platforms = ["linux", "mac", "arm-android", "musl-linux", | |
| "cross-linux", | |
| "cross32-linux", | |
| "cross-host-linux", | |
| "mac-ios", | |
| "win-gnu-32", "win-gnu-64", | |
| "win-msvc-32", "win-msvc-64", | |
| "win-msvc-32-cross"] | |
| packaging_platforms = ["linux", "mac", | |
| "win-gnu-32", "win-gnu-64", | |
| "win-msvc-32", "win-msvc-64"] | |
| cargo_platforms = ["linux-32", "linux-64", "mac-32", "mac-64", | |
| "cross-linux", | |
| "win-gnu-32", "win-gnu-64", | |
| "win-msvc-32", "win-msvc-64", | |
| "bitrig-64"] | |
| cargo_dist_platforms = [p for p in cargo_platforms if "linux" in p or "mac" in p or "win" in p] | |
| def works_in_dev(platform): | |
| return 'linux' in platform or \ | |
| ('bsd' not in platform and 'bitrig' not in platform) | |
| if env != "prod": | |
| auto_platforms = [p for p in auto_platforms if works_in_dev(p)] | |
| try_platforms = [p for p in try_platforms if works_in_dev(p)] | |
| snap_platforms = [p for p in snap_platforms if works_in_dev(p)] | |
| dist_platforms = [p for p in dist_platforms if works_in_dev(p)] | |
| cargo_platforms = [p for p in cargo_platforms if works_in_dev(p)] | |
| packaging_platforms = [p for p in packaging_platforms if works_in_dev(p)] | |
| # auto-platforms that won't cause other's to fail (these don't gate bors) | |
| nogate_builders = [ | |
| "auto-bitrig-64-opt", | |
| "auto-freebsd10_32-1", | |
| "auto-freebsd10_64-1", | |
| "auto-dragonflybsd-64-opt", | |
| "auto-openbsd-64-opt", | |
| ] | |
| dist_nogate_platforms = [ | |
| #"mac-ios", | |
| #"cross-host-linux", | |
| #"cross-win", | |
| ] | |
| cargo_cross_targets = [ | |
| 'arm-unknown-linux-gnueabi', | |
| 'arm-unknown-linux-gnueabihf', | |
| 'armv7-unknown-linux-gnueabihf', | |
| 'aarch64-unknown-linux-gnu', | |
| 'x86_64-unknown-freebsd', | |
| 'x86_64-unknown-netbsd', | |
| 'i686-unknown-freebsd', | |
| ] | |
| ios = {'auto': 'cross-ios-opt', 'dist': 'mac-ios'} | |
| lincross = {'auto': 'linux-cross', 'dist': 'cross-linux'} | |
| lincross32 = {'auto': 'linux-32cross', 'dist': 'cross32-linux'} | |
| msvc32 = {'auto': 'msvc-32-cross', 'dist': 'win-msvc-32-cross'} | |
| def xhost(name): | |
| return {'auto': 'linux-64-cross-' + name, 'dist': 'cross-host-linux'} | |
| stable_cross_targets = [ | |
| {'t': 'aarch64-apple-ios', 'b': ios}, | |
| {'t': 'aarch64-unknown-linux-gnu', 'b': lincross}, | |
| {'t': 'arm-unknown-linux-gnueabi', 'b': lincross}, | |
| {'t': 'arm-unknown-linux-gnueabihf', 'b': lincross}, | |
| {'t': 'armv7-apple-ios', 'b': ios}, | |
| {'t': 'armv7-unknown-linux-gnueabihf', 'b': lincross}, | |
| {'t': 'armv7s-apple-ios', 'b': ios}, | |
| {'t': 'i386-apple-ios', 'b': ios}, | |
| {'t': 'mips-unknown-linux-gnu', 'b': lincross}, | |
| {'t': 'mipsel-unknown-linux-gnu', 'b': lincross}, | |
| {'t': 'powerpc-unknown-linux-gnu', 'b': lincross}, | |
| {'t': 'powerpc64-unknown-linux-gnu', 'b': lincross}, | |
| {'t': 'powerpc64le-unknown-linux-gnu', 'b': lincross}, | |
| {'t': 'x86_64-apple-ios', 'b': ios}, | |
| {'t': 'x86_64-rumprun-netbsd', 'b': lincross}, | |
| ] | |
| beta_cross_targets = stable_cross_targets + [ | |
| {'t': 'aarch64-unknown-linux-gnu', 'b': xhost('NOAUTO'), 'host': True, 'pkg': True}, | |
| {'t': 'arm-unknown-linux-gnueabi', 'b': xhost('armsf'), 'host': True, 'pkg': True}, | |
| {'t': 'arm-unknown-linux-gnueabihf', 'b': xhost('armhf'), 'host': True, 'pkg': True}, | |
| {'t': 'armv7-unknown-linux-gnueabihf', 'b': xhost('NOAUTO'), 'host': True, 'pkg': True}, | |
| {'t': 'i586-pc-windows-msvc', 'b': msvc32}, | |
| {'t': 'i686-unknown-freebsd', 'b': xhost('NOAUTO'), 'host': True}, | |
| {'t': 'mips-unknown-linux-musl', 'b': lincross}, | |
| {'t': 'mipsel-unknown-linux-musl', 'b': lincross}, | |
| {'t': 'x86_64-unknown-freebsd', 'b': xhost('freebsd'), 'host': True, 'pkg': True}, | |
| {'t': 'x86_64-unknown-netbsd', 'b': xhost('netbsd'), 'host': True, 'pkg': True}, | |
| ] | |
| nightly_cross_targets = beta_cross_targets + [ | |
| {'t': 'i586-unknown-linux-gnu', 'b': lincross32}, | |
| {'t': 'i686-unknown-linux-musl', 'b': lincross32}, | |
| ] | |
| ####### BUILDSLAVES | |
| # Configuration of --host and --target triples based on the above platform names | |
| def all_platform_hosts(platform): | |
| if "mac" in platform: | |
| return ["i686-apple-darwin", "x86_64-apple-darwin"] | |
| elif "linux-64-x-android" in platform: | |
| return ["i686-unknown-linux-gnu", "x86_64-unknown-linux-gnu"] | |
| elif "arm-android" in platform: | |
| return ["x86_64-unknown-linux-gnu"] | |
| elif "linux" in platform: | |
| return ["i686-unknown-linux-gnu", "x86_64-unknown-linux-gnu"] | |
| elif "freebsd10_32" in platform: | |
| return ["i686-unknown-freebsd"] | |
| elif "freebsd10_64" in platform: | |
| return ["x86_64-unknown-freebsd"] | |
| elif "win-gnu-32" in platform: | |
| return ["i686-pc-windows-gnu"] | |
| elif "win-gnu-64" in platform: | |
| return ["x86_64-pc-windows-gnu"] | |
| elif "win-msvc-32" in platform: | |
| return ["i686-pc-windows-msvc"] | |
| elif "win-msvc-64" in platform: | |
| return ["x86_64-pc-windows-msvc"] | |
| elif "bitrig-64" in platform: | |
| return ["x86_64-unknown-bitrig"] | |
| elif "dragonflybsd-64" in platform: | |
| return ["x86_64-unknown-dragonfly"] | |
| elif "openbsd-64" in platform: | |
| return ["x86_64-unknown-openbsd"] | |
| else: | |
| return None | |
| def auto_platform_host(p): | |
| if "-all" in p: | |
| return "all" | |
| else: | |
| return [auto_platform_triple(p)] | |
| def auto_platform_triple(p): | |
| if "mac" in p: | |
| if "-32" in p: | |
| return "i686-apple-darwin" | |
| else: | |
| return "x86_64-apple-darwin" | |
| if "linux" in p: | |
| if "-32" in p: | |
| return "i686-unknown-linux-gnu" | |
| else: | |
| return "x86_64-unknown-linux-gnu" | |
| if "win" in p: | |
| env = "gnu" if "gnu" in p else "msvc" | |
| if "-32" in p: | |
| return "i686-pc-windows-" + env | |
| else: | |
| return "x86_64-pc-windows-" + env | |
| if "freebsd10" in p: | |
| if "_32" in p: | |
| return "i686-unknown-freebsd" | |
| else: | |
| return "x86_64-unknown-freebsd" | |
| if "bitrig-64" in p: | |
| return "x86_64-unknown-bitrig" | |
| if "dragonflybsd-64" in p: | |
| return "x86_64-unknown-dragonfly" | |
| if "openbsd-64" in p: | |
| return "x86_64-unknown-openbsd" | |
| # No other platform was specified. Probably arm-android. | |
| if "android" in p: | |
| return "x86_64-unknown-linux-gnu" | |
| def auto_platform_build(p): | |
| return auto_platform_triple(p) | |
| ####### BUILDSLAVES | |
| # The 'slaves' list defines the set of recognized buildslaves. Each element is | |
| # a BuildSlave object, specifying a unique slave name and password. The same | |
| # slave name and password must be configured on the slave. | |
| from buildbot.buildslave import BuildSlave | |
| from buildbot.buildslave.ec2 import EC2LatentBuildSlave | |
| snap_slaves = [] | |
| dist_slaves = [] | |
| auto_slaves = [] | |
| ios_slaves = [] | |
| c['slaves'] = [] | |
| for line in open("slave-list.txt"): | |
| if line.startswith("#"): | |
| continue | |
| fields = line.split() | |
| if len(fields) >= 2: | |
| name = fields.pop(0) | |
| pw = fields.pop(0) | |
| ext = {'max_builds':1, | |
| 'instance_type':'m3.xlarge', | |
| } | |
| for kv in fields: | |
| (k,v) = kv.split('=') | |
| ext[k] = v | |
| if 'ami' in ext: | |
| user_data = "%s %s %s" % (name, pw, master_addy) | |
| if 'docker' in ext: | |
| user_data += "\n" + ext['docker'] | |
| user_data += "\n" + master_config['buildbot_source'] | |
| user_data += "\n" + master_config['buildbot_branch'] | |
| slave = EC2LatentBuildSlave(name, pw, ext['instance_type'], | |
| ami=ext['ami'], | |
| #elastic_ip=ext['elastic_ip'], | |
| user_data=user_data, | |
| region=region, | |
| #subnet_id=subnet_id, | |
| keypair_name=keypair_name, | |
| #security_name=security_name, | |
| security_name="rust-non-vpc-slave", | |
| build_wait_timeout=build_wait_timeout, | |
| # notify_on_missing=['admin@rust-lang.org'], | |
| max_builds=int(ext['max_builds']), | |
| tags = { 'Name': env + "-slave-" + name }) | |
| else: | |
| slave = BuildSlave(name, pw, max_builds=int(ext['max_builds'])) | |
| if 'snap' in ext: | |
| snap_slaves.append(slave) | |
| if 'dist' in ext: | |
| dist_slaves.append(slave) | |
| if 'ios' in ext: | |
| ios_slaves.append(slave) | |
| # "special" slaves are those we are _not_ putting in the auto pool. | |
| if 'special' not in ext: | |
| auto_slaves.append(slave) | |
| c['slaves'].append(slave) | |
| # We listen for slaves only on localhost; there should be an stunnel | |
| # loopback forwarding to here. | |
| c['slavePortnum'] = "tcp:9989:interface=127.0.0.1" | |
| ####### CHANGESOURCES | |
| # the 'change_source' setting tells the buildmaster how it should find out | |
| # about source code changes. | |
| from buildbot.changes.gitpoller import GitPoller | |
| main_sources = [GitPoller(git_source, | |
| workdir='gitpoller-workdir', | |
| branches=all_branches, | |
| pollinterval=60), | |
| GitPoller(cargo_source, | |
| workdir='gitpoller-workdir', | |
| branches=["master", "auto-cargo"], | |
| pollinterval=60)] | |
| c['change_source'] = main_sources | |
| ####### SCHEDULERS | |
| # Configure the Schedulers, which decide how to react to incoming changes. | |
| from buildbot.schedulers.basic import SingleBranchScheduler | |
| from buildbot.schedulers.forcesched import * | |
| from buildbot.schedulers.timed import Nightly | |
| from buildbot.schedulers.triggerable import Triggerable | |
| from buildbot.changes import filter | |
| try_sched = SingleBranchScheduler( | |
| name="try-sched", | |
| change_filter=filter.ChangeFilter(branch='try'), | |
| treeStableTimer=60, | |
| builderNames=["try-" + p for p in try_platforms]) | |
| auto_sched = SingleBranchScheduler( | |
| name="auto-sched", | |
| change_filter=filter.ChangeFilter(#filter_fn=(lambda c: "bors" in c.who), | |
| branch='auto'), | |
| treeStableTimer=60, | |
| builderNames=["auto-" + p for p in auto_platforms]) | |
| snap_sched = SingleBranchScheduler( | |
| name="snap3-sched", | |
| change_filter=filter.ChangeFilter(branch='snap-stage3'), | |
| treeStableTimer=60, | |
| builderNames=["snap3-" + p for p in snap_platforms]) | |
| nightly_dist_rustc_trigger_sched = Nightly( | |
| name="nightly-dist-rustc-trigger-sched", | |
| branch="master", | |
| builderNames=["nightly-dist-rustc-trigger"], | |
| hour=3, | |
| minute=0 | |
| ) | |
| nightly_dist_rustc_sched = Triggerable( | |
| name="nightly-dist-rustc-sched", | |
| builderNames=["nightly-dist-rustc-" + p for p in dist_platforms], | |
| ) | |
| beta_dist_rustc_sched = Triggerable( | |
| name="beta-dist-rustc-sched", | |
| builderNames=["beta-dist-rustc-" + p for p in dist_platforms], | |
| ) | |
| stable_dist_rustc_sched = Triggerable( | |
| name="stable-dist-rustc-sched", | |
| builderNames=["stable-dist-rustc-" + p for p in dist_platforms], | |
| ) | |
| nightly_dist_packaging_sched = Triggerable( | |
| name="nightly-dist-packaging-sched", | |
| builderNames=["nightly-dist-packaging-" + p for p in packaging_platforms]) | |
| beta_dist_packaging_sched = Triggerable( | |
| name="beta-dist-packaging-sched", | |
| builderNames=["beta-dist-packaging-" + p for p in packaging_platforms]) | |
| stable_dist_packaging_sched = Triggerable( | |
| name="stable-dist-packaging-sched", | |
| builderNames=["stable-dist-packaging-" + p for p in packaging_platforms]) | |
| nightly_dist_cargo_trigger_sched = Nightly( | |
| name="nightly-dist-cargo-trigger-sched", | |
| branch="master", | |
| builderNames=["nightly-dist-cargo-trigger"], | |
| hour=2, | |
| minute=0 | |
| ) | |
| nightly_dist_cargo_builders = [] | |
| for p in cargo_dist_platforms: | |
| nightly_dist_cargo_builders.append("nightly-dist-cargo-" + p) | |
| nightly_dist_cargo_sched = Triggerable( | |
| name="nightly-dist-cargo-sched", | |
| builderNames=nightly_dist_cargo_builders, | |
| ) | |
| cargo_sched = SingleBranchScheduler( | |
| name="cargo-sched", | |
| change_filter=filter.ChangeFilter(#filter_fn=(lambda c: "bors" in c.who), | |
| branch='auto-cargo'), | |
| treeStableTimer=60, | |
| builderNames=["cargo-" + p for p in cargo_platforms]) | |
| force_sched = ForceScheduler( | |
| name="force-sched", | |
| builderNames= | |
| ["try-" + p for p in try_platforms] | |
| + ["auto-" + p for p in auto_platforms] | |
| + ["snap3-" + p for p in snap_platforms], | |
| reason=StringParameter(name="reason", label="reason:", default="force build", | |
| required=False, size=10), | |
| branch=StringParameter(name="branch", label="branch:", | |
| required=True, size=10), | |
| revision=StringParameter(name="revision", label="revision:", | |
| required=False, size=10), | |
| # will generate nothing in the form, but revision, repository, | |
| # and project are needed by buildbot scheduling system so we | |
| # need to pass a value ("") | |
| #revision=FixedParameter(name="revision", default=""), | |
| repository=FixedParameter(name="repository", default=""), | |
| project=FixedParameter(name="project", default="") | |
| ) | |
| # Force scheduler for things that use the master branch | |
| master_force_sched = ForceScheduler( | |
| name="master-force-sched", | |
| builderNames= | |
| ["nightly-dist-rustc-" + p for p in dist_platforms] | |
| + ["nightly-dist-rustc-trigger"] | |
| + ["nightly-dist-packaging-" + p for p in packaging_platforms] | |
| + ["beta-dist-packaging-" + p for p in packaging_platforms] | |
| + ["stable-dist-packaging-" + p for p in packaging_platforms] | |
| + nightly_dist_cargo_builders | |
| + ["cargo-" + p for p in cargo_platforms] | |
| + ["nightly-dist-cargo-trigger"], | |
| reason=StringParameter(name="reason", label="reason:", default="force build", | |
| required=False, size=10), | |
| branch=StringParameter(name="branch", label="branch:", default="master", | |
| required=True, size=10), | |
| revision=StringParameter(name="revision", label="revision:", | |
| required=False, size=10), | |
| # will generate nothing in the form, but revision, repository, | |
| # and project are needed by buildbot scheduling system so we | |
| # need to pass a value ("") | |
| #revision=FixedParameter(name="revision", default=""), | |
| repository=FixedParameter(name="repository", default=""), | |
| project=FixedParameter(name="project", default="") | |
| ) | |
| beta_force_sched = ForceScheduler( | |
| name="beta-force-sched", | |
| builderNames= | |
| ["beta-dist-rustc-" + p for p in dist_platforms] | |
| + ["beta-dist-rustc-trigger"], | |
| reason=StringParameter(name="reason", label="reason:", default="force build", | |
| required=False, size=10), | |
| branch=StringParameter(name="branch", label="branch:", default="beta", | |
| required=True, size=10), | |
| revision=StringParameter(name="revision", label="revision:", | |
| required=False, size=10), | |
| # will generate nothing in the form, but revision, repository, | |
| # and project are needed by buildbot scheduling system so we | |
| # need to pass a value ("") | |
| #revision=FixedParameter(name="revision", default=""), | |
| repository=FixedParameter(name="repository", default=""), | |
| project=FixedParameter(name="project", default="") | |
| ) | |
| stable_force_sched = ForceScheduler( | |
| name="stable-force-sched", | |
| builderNames= | |
| ["stable-dist-rustc-" + p for p in dist_platforms] | |
| + ["stable-dist-rustc-trigger"], | |
| reason=StringParameter(name="reason", label="reason:", default="force build", | |
| required=False, size=10), | |
| branch=StringParameter(name="branch", label="branch:", default="stable", | |
| required=True, size=10), | |
| revision=StringParameter(name="revision", label="revision:", | |
| required=False, size=10), | |
| # will generate nothing in the form, but revision, repository, | |
| # and project are needed by buildbot scheduling system so we | |
| # need to pass a value ("") | |
| #revision=FixedParameter(name="revision", default=""), | |
| repository=FixedParameter(name="repository", default=""), | |
| project=FixedParameter(name="project", default="") | |
| ) | |
| c['schedulers'] = [ | |
| try_sched, | |
| auto_sched, | |
| snap_sched, | |
| nightly_dist_rustc_sched, | |
| nightly_dist_packaging_sched, | |
| beta_dist_rustc_sched, | |
| beta_dist_packaging_sched, | |
| stable_dist_rustc_sched, | |
| stable_dist_packaging_sched, | |
| cargo_sched, | |
| nightly_dist_cargo_sched, | |
| force_sched, | |
| master_force_sched, | |
| beta_force_sched, | |
| stable_force_sched | |
| ] | |
| if env == 'prod': | |
| c['schedulers'].append(nightly_dist_rustc_trigger_sched) | |
| c['schedulers'].append(nightly_dist_cargo_trigger_sched) | |
| ####### BUILDERS | |
| # The 'builders' list defines the Builders, which tell Buildbot how to perform a build: | |
| # what steps, and which slaves can execute them. Note that any particular build will | |
| # only take place on one slave. | |
| from buildbot.process.factory import BuildFactory | |
| from buildbot.process.properties import WithProperties, Property | |
| from buildbot.steps.source.git import Git | |
| from buildbot.status.results import SUCCESS, WARNINGS, FAILURE, SKIPPED, \ | |
| EXCEPTION, RETRY, worst_status | |
| from buildbot.steps.shell import ShellCommand, Configure, Compile, Test, SetPropertyFromCommand | |
| from buildbot.steps.transfer import FileUpload, DirectoryUpload | |
| from buildbot.steps.master import MasterShellCommand | |
| from buildbot.steps.slave import RemoveDirectory | |
| from buildbot.steps.trigger import Trigger | |
| from buildbot.config import BuilderConfig | |
| from buildbot.interfaces import IRenderable | |
| from zope.interface import implements | |
| import re, os | |
| def props_has_negative_key(props, keyname): | |
| if keyname in props: | |
| if str(props[keyname]).lower() in ["no", "false", "0"]: | |
| return True | |
| return False | |
| # property-based doStepIf helper | |
| def should_wipe(step): | |
| # Wipe isn't working right now | |
| return False | |
| props = step.build.getProperties() | |
| if props.has_key("wipe"): | |
| return True | |
| if props.has_key("freshconfig"): | |
| return props["freshconfig"] == "0" | |
| return False | |
| # property-based doStepIf helper | |
| def should_clean_llvm(step): | |
| props = step.build.getProperties() | |
| if props.has_key("clean-llvm"): | |
| return True | |
| # property-based doStepIf helper | |
| def should_check(step): | |
| props = step.build.getProperties() | |
| return not props_has_negative_key(props, "check") | |
| # property-based IRenderable helper | |
| class MakeCommand(object): | |
| implements(IRenderable) | |
| def getRenderingFor(self, props): | |
| if "buildername" in props: | |
| if "linux" in props["buildername"]: | |
| return "make" | |
| if "bsd" in props["buildername"] or "bitrig" in props["buildername"]: | |
| return "gmake" | |
| if "win" in props["buildername"]: | |
| # Put all processes in a job object to ensure that everything | |
| # dies in one unit. | |
| # | |
| # https://github.com/alexcrichton/rustjob | |
| return ["c:/rustjob.exe", "make"] | |
| return "make" | |
| # property-based IRenderable helper | |
| class CheckCommand(object): | |
| implements(IRenderable) | |
| def getRenderingFor(self, props): | |
| mk = MakeCommand().getRenderingFor(props) | |
| chk = "check" | |
| if "check" in props: | |
| chk = str(props["check"]) | |
| if "buildername" in props and \ | |
| "bsd" in props["buildername"] and \ | |
| chk in ["lite", "check-lite", "check", | |
| "yes", "True"]: | |
| chk = "check-lite" | |
| if re.search("^check(-[a-zA-Z0-9-_\.]+)?$", chk): | |
| return [mk, chk] | |
| return [mk, "check"] | |
| # property-based IRenderable helper | |
| class TestCommand(object): | |
| implements(IRenderable) | |
| def getRenderingFor(self, props): | |
| mk = MakeCommand().getRenderingFor(props) | |
| return [mk, "test"] | |
| # property-based IRenderable helper | |
| class CommandEnv(object): | |
| implements(IRenderable) | |
| def getRenderingFor(self, props): | |
| env = {"RUST_LOG": "rustc=error"} | |
| # Let us turn up ulimit on the bots | |
| env["ALLOW_NONZERO_RLIMIT_CORE"] = "1" | |
| # Used by rust-packaging | |
| env["RUST_DIST_SERVER"] = dist_server_addy | |
| # We want to build 10.7 compatible binaries on OSX, and this is largely | |
| # information to the linker about what libraries it can link and such. | |
| env["MACOSX_DEPLOYMENT_TARGET"] = "10.7" | |
| if "buildername" in props: | |
| win32toolchain = r"c:\mingw-w64\i686-4.9.1-win32-dwarf-rt_v3-rev1\mingw32\bin" | |
| win64toolchain = r"c:\mingw-w64\x86_64-4.9.1-win32-seh-rt_v3-rev1\mingw64\bin" | |
| python = r"c:\Python27" | |
| mingw32 = r"c:\msys64\mingw32\bin;c:\msys64\usr\bin" | |
| mingw64 = r"c:\msys64\mingw64\bin;c:\msys64\usr\bin" | |
| name = props["buildername"] | |
| # This is required to trigger certain workarounds done | |
| # slave-side by buildbot. In particular omitting the PWD | |
| # variable with an unmangled pathname. | |
| # | |
| # Note that the PATH management here is very particular, notably: | |
| # | |
| # * The gcc toolchain is first in PATH. We use a custom toolchain | |
| # rather than one installed by MinGW for a few reasons. We're | |
| # going to be shipping rustc binaries produced by this toolchain, | |
| # notably LLVM is compiled and will require libstdc++-6.dll | |
| # somehow. We want to use the win32 threading model as it doesn't | |
| # have an extra dependency, and the MinGW default toolchains all | |
| # have the pthread threading model. Also LLVM will segfault at | |
| # runtime if compiled by gcc 5+ currently, so we need to stick | |
| # back to 4.9.1 ... Not fun. | |
| # | |
| # * Next up is the system python we have installed. The MinGW python | |
| # is super weird in a few ways. First, it will implicitly insert | |
| # an entry in PATH to `/mingw32/bin` *first* whenever it runs, so | |
| # if a program relies on the order of PATH (like make-win-dist.py) | |
| # then we'll do the wrong thing. Second, the LLVM cmake requires | |
| # python, and it requires it to basically not worry about | |
| # msys-vs-windows paths, and the MinGW python likes to take | |
| # control of paths quickly. To complicate matters, there are | |
| # **two** python installations in MinGW, both of which end up just | |
| # being totally wrong for our use case. As a result we just | |
| # installed a local copy of python and called it a day. | |
| # | |
| # * Finally we add the mingw paths and then the system PATH to | |
| # ensure that we can find all the necessary tools as part of the | |
| # build. | |
| # | |
| # Note that MSVC and MinGW share this same block. MSVC just won't | |
| # use the gcc toolchain and in theory doesn't need the MinGW | |
| # programs, but currently we use `make` everywhere so it definitely | |
| # needs them... | |
| if "win-msvc-32" in name or "win-gnu-32" in name: | |
| env["MACHTYPE"] = "i686-pc-win32" | |
| env["MSYSTEM"] = "MINGW32" | |
| env["PATH"] = ';'.join([win32toolchain, python, mingw32, '${PATH}']) | |
| if "win-msvc-64" in name or "win-gnu-64" in name: | |
| env["MACHTYPE"] = "x86_64-pc-win32" | |
| env["MSYSTEM"] = "MINGW64" | |
| env["PATH"] = ';'.join([win64toolchain, python, mingw64, '${PATH}']) | |
| if "msvc" in props["buildername"]: | |
| env["MSVC"] = "1" | |
| if "valgrind" in props and props["valgrind"] == True: | |
| env["RUST_THREADS"]="1" | |
| env["RUST_RT_TEST_THREADS"]="1" | |
| if "verbose" in props and props["verbose"] == True: | |
| env["VERBOSE"]="1" | |
| #if "android" in props and props["android"] == True: | |
| if "buildername" in props and "android" in props["buildername"]: | |
| path = os.environ["PATH"] | |
| path += ":/android/sdk/platform-tools/" | |
| path += ":/android/sdk/tools/" | |
| env["PATH"] = path | |
| env["NO_BENCH"] = "1" | |
| elif "buildername" in props and "linux" in props["buildername"]: | |
| path = os.environ["PATH"] | |
| path = "/rustroot/bin:" + path | |
| path = "/opt/gcc/bin:" + path | |
| env["PATH"] = path | |
| ld_path = "/rustroot/lib" | |
| ld_path = "/rustroot/lib64:" + ld_path | |
| ld_path = "/opt/gcc/lib64:" + ld_path | |
| ld_path = "/opt/gcc/lib32:" + ld_path | |
| env["LD_LIBRARY_PATH"] = ld_path | |
| env["CARGO_HOME"] = "${PWD}/slave/" + props["buildername"] + "/cargo-home" | |
| if "cargo" in props: | |
| env["PLATFORM"] = props["platform"] | |
| env["BITS"] = props["bits"] | |
| return env | |
| # property-based IRenderable helper | |
| class ConfigCommand(object): | |
| implements(IRenderable) | |
| def getRenderingFor(self, props): | |
| opts=["sharedstd", "valgrind", "helgrind", | |
| "docs", "optimize", "optimize-tests", | |
| "optimize-cxx", "optimize-llvm", | |
| "debug", "debug-assertions", "pax-flags", "clang", | |
| "inject-std-version", "llvm-static-stdcpp", | |
| "rustbuild", "orbit", "nightly"] | |
| if "cargo" in props: | |
| s = "./configure --local-rust-root=$PWD/rustc" | |
| else: | |
| s = "../configure" | |
| for opt in opts: | |
| if opt in props: | |
| if props[opt] is not None: | |
| if props_has_negative_key(props, opt): | |
| s += " --disable-" + opt | |
| else: | |
| s += " --enable-" + opt | |
| if "musl" in props and props["musl"]: | |
| s += " --musl-root=" + props["musl"] | |
| testing_android = "android" in props and props["android"] == True | |
| # Set up the path to the android NDK | |
| if testing_android: | |
| s += " --arm-linux-androideabi-ndk=/android/ndk-arm-18" | |
| s += " --i686-linux-android-ndk=/android/ndk-x86" | |
| s += " --aarch64-linux-android-ndk=/android/ndk-aarch64" | |
| s += " --disable-docs" | |
| if "release-channel" in props: | |
| s += " --release-channel=" + props["release-channel"] | |
| # Our release builders build multiple hosts at once. | |
| # This tells them to only include a single target in each artifact. | |
| s += " --enable-dist-host-only" | |
| if "build" in props: | |
| if re.search("^[a-zA-Z0-9-_\.]+$", props["build"]): | |
| s += " --build=" + props["build"] | |
| if "hosts" in props: | |
| if props["hosts"] == "all" and "platform" in props: | |
| hosts = all_platform_hosts(props["platform"]) | |
| else: | |
| hosts = [t for t in props["hosts"] \ | |
| if re.search("^[a-zA-Z0-9-_\.]+$", str(t))] | |
| if hosts: | |
| s += " --host=" + ",".join(hosts) | |
| # --target is configured specially for android/musl | |
| if "targets" in props: | |
| targets = [t for t in props["targets"] \ | |
| if re.search("^[a-zA-Z0-9-_\.]+$", str(t))] | |
| if targets: | |
| s += " --target=" + ",".join(targets) | |
| # This works around a bug (that I don't know the cause of) where | |
| # $PWD is set to a windows-style path, causing Rust's configure script | |
| # to see the source dir as a windows-style path, then using that path | |
| # to call LLVM's configure script, which pukes on the windows-style | |
| # value of $0 by failing to generate AsmPrinters. Calling cd -P | |
| # sets $PWD properly. | |
| if "buildername" in props and "win" in props["buildername"]: | |
| s = "(cd -P . && " + s + ")" | |
| return ['sh', '-c', s] | |
| # Checks whether all dist artifacts for all platforms have been uploaded from | |
| # the slaves the buildmaster and have matching commit ids | |
| class DistSync(BuildStep): | |
| stagingDir = None | |
| platforms = [] | |
| nogate_platforms = [] | |
| def __init__(self, stagingDir=None, platforms=[], nogate_platforms=[], **kwargs): | |
| BuildStep.__init__(self, **kwargs) | |
| self.stagingDir = stagingDir | |
| self.platforms = platforms | |
| self.nogate_platforms = nogate_platforms | |
| def start(self): | |
| self.log = self.addLog('log') | |
| if self.all_dist_builds_done(): | |
| self.finished(SUCCESS) | |
| else: | |
| self.finished(FAILURE) | |
| self.step_status.setText(self.describe(done=True)) | |
| self.log.finish() | |
| def all_dist_builds_done(self): | |
| self.log.addEntry(STDOUT, 'checking commit ids of builds\n') | |
| all_done = True | |
| consensus_commit_id = None | |
| for p in self.platforms: | |
| plat_dir = self.stagingDir + "/" + p | |
| self.log.addEntry(STDOUT, 'checking ' + plat_dir + '\n') | |
| commit_id_file = plat_dir + "/commit-id" | |
| try: | |
| with open(commit_id_file, 'r') as f: | |
| if p in self.nogate_platforms: | |
| continue | |
| commit_id = f.read() | |
| self.log.addEntry(STDOUT, 'commit-id: ' + commit_id + '\n') | |
| if consensus_commit_id: | |
| all_done = all_done and commit_id == consensus_commit_id | |
| else: | |
| consensus_commit_id = commit_id | |
| except IOError: | |
| if p in self.nogate_platforms: | |
| self.log.addEntry(STDOUT, 'no commit-id, but making dummy file\n') | |
| if not os.path.exists(plat_dir): | |
| os.makedirs(plat_dir) | |
| open(plat_dir + "/nogate-failed-build", "w") | |
| else: | |
| self.log.addEntry(STDOUT, 'no commit-id\n') | |
| all_done = False | |
| return all_done | |
| # Run a function on master and sets a property to the return value | |
| class SetPropertyFromFn(BuildStep): | |
| property = None | |
| extract_fn = None | |
| def __init__(self, property=None, extract_fn=None, **kwargs): | |
| BuildStep.__init__(self, **kwargs) | |
| self.property = property | |
| self.extract_fn = extract_fn | |
| def start(self): | |
| self.log = self.addLog('log') | |
| prop_val = self.extract_fn() | |
| properties = self.build.getProperties() | |
| properties.setProperty(self.property, prop_val, 'SetPropertyFromFn', runtime=True) | |
| self.step_status.setText(self.describe(done=True)) | |
| self.log.finish() | |
| self.finished(SUCCESS) | |
| def grab_slave_filename(rc, out, err): | |
| for line in out.split('\n'): | |
| if re.search("^[a-zA-Z0-9-_\.]+$", line): | |
| return {"slave_filename": line} | |
| return {"slave_filename": None} | |
| def basic_buildfactory(): | |
| global git_source | |
| f = BuildFactory() | |
| # We wipe if the user requests it or if we can't find a config.stamp | |
| # that's been touched in the past 2 days -- suggests config is failing | |
| findcmd = "/usr/bin/find . -maxdepth 2 -name config.stamp -ctime -2 | wc -l" | |
| f.addStep(SetPropertyFromCommand(command=["sh", "-c", findcmd], | |
| property="freshconfig", | |
| workdir=WORKDIR)) | |
| f.addStep(RemoveDirectory(WORKDIR, | |
| doStepIf=should_wipe)) | |
| f.addStep(Git(repourl=git_source, | |
| progress=True, | |
| #clobberOnFailure=True, | |
| retry=(5, 2), # Combat the flakiness. 2 retries, 5 seconds | |
| retryFetch=True, # Combat the flakiness | |
| mode='full', # do what `method` says below | |
| method='clean', # run `git clean -d -f -f` | |
| workdir=WORKDIR)) | |
| return f | |
| def checkout_and_configure_buildfactory(checking_android): | |
| f = basic_buildfactory() | |
| if checking_android: | |
| # Restart Android emu and wait for it | |
| f.addStep(ShellCommand(env=CommandEnv(), | |
| command=["killall", "-9", "emulator-arm"], | |
| flunkOnFailure=False)) | |
| cmd = "nohup nohup " + \ | |
| "emulator @arm-18 -no-window -partition-size 2047 " + \ | |
| "0<&- &>/dev/null &" | |
| f.addStep(ShellCommand(env=CommandEnv(),command=["bash", "-c", cmd])) | |
| f.addStep(ShellCommand(env=CommandEnv(),command=["adb", "wait-for-device"], | |
| flunkOnFailure=False)) | |
| f.addStep(Configure(env=CommandEnv(), | |
| haltOnFailure=True, | |
| flunkOnFailure=True, | |
| command=ConfigCommand(), | |
| interruptSignal="TERM", | |
| workdir=BUILD_WORKDIR)) | |
| return f | |
| def just_tidy_buildfactory(): | |
| f = checkout_and_configure_buildfactory(False) | |
| f.addStep(Compile(env=CommandEnv(), | |
| name="tidy", | |
| description="make tidy", | |
| descriptionDone="tidy", | |
| interruptSignal="TERM", | |
| workdir=BUILD_WORKDIR, | |
| command=[MakeCommand(), "tidy"])) | |
| return f | |
| def make_and_check_buildfactory(check, android, windows, parallel): | |
| checking_android = android == True and check != False | |
| f = checkout_and_configure_buildfactory(checking_android) | |
| # Temporary until we convince the build system | |
| # to handle 0.x 0.x+1 co-occurrence in workspace | |
| f.addStep(Compile(env=CommandEnv(), | |
| name="clean", | |
| description="make clean", | |
| descriptionDone="cleaned", | |
| workdir=BUILD_WORKDIR, | |
| command=[MakeCommand(), "clean"])) | |
| f.addStep(Compile(env=CommandEnv(), | |
| name="clean-llvm", | |
| doStepIf=should_clean_llvm, | |
| description="make clean-llvm", | |
| descriptionDone="cleaned llvm", | |
| interruptSignal="TERM", | |
| workdir=BUILD_WORKDIR, | |
| command=[MakeCommand(), "clean-llvm"])) | |
| # msys make is pretty buggy with paralellization | |
| if windows or not parallel: | |
| command = [MakeCommand()] | |
| else: | |
| command = [MakeCommand(), "-j2"] | |
| f.addStep(Compile(env=CommandEnv(), | |
| haltOnFailure=True, | |
| flunkOnFailure=True, | |
| timeout=compile_timeout, | |
| interruptSignal="TERM", | |
| command=command, | |
| workdir=BUILD_WORKDIR)) | |
| f.addStep(Test(env=CommandEnv(), | |
| doStepIf=check != False, | |
| haltOnFailure=True, | |
| flunkOnFailure=True, | |
| interruptSignal="TERM", | |
| command=CheckCommand(), | |
| timeout=test_timeout, | |
| workdir=BUILD_WORKDIR)) | |
| return f | |
| def cargo_basic_buildfactory(): | |
| global cargo_source | |
| f = BuildFactory() | |
| # We wipe if the user requests it or if we can't find a config.stamp | |
| # that's been touched in the past 2 days -- suggests config is failing | |
| f.addStep(RemoveDirectory("build", | |
| doStepIf=should_wipe)) | |
| f.addStep(Git(repourl=cargo_source, | |
| progress=True, | |
| #clobberOnFailure=True, | |
| retry=(5, 2), # Combat the flakiness. 2 retries, 5 seconds | |
| retryFetch=True, # Combat the flakiness | |
| mode='full', # do what `method` says below | |
| method='clean', # run `git clean -d -f -f` | |
| submodules=True)) | |
| return f | |
| def cargo_buildfactory(p): | |
| f = cargo_basic_buildfactory() | |
| f.addStep(ShellCommand(env=CommandEnv(), | |
| command=["sh", ".travis.install.deps.sh"], | |
| workdir=WORKDIR)) | |
| f.addStep(Configure(env=CommandEnv(), | |
| haltOnFailure=True, | |
| flunkOnFailure=True, | |
| command=ConfigCommand(), | |
| interruptSignal="TERM", | |
| workdir=WORKDIR)) | |
| f.addStep(Compile(env=CommandEnv(), | |
| name="clean", | |
| description="make clean-all", | |
| descriptionDone="cleaned", | |
| workdir=WORKDIR, | |
| command=[MakeCommand(), "clean-all"])) | |
| f.addStep(Compile(env=CommandEnv(), | |
| haltOnFailure=True, | |
| flunkOnFailure=True, | |
| interruptSignal="TERM", | |
| command=[MakeCommand()], | |
| workdir=WORKDIR)) | |
| f.addStep(Test(env=CommandEnv(), | |
| doStepIf="cross" not in p, | |
| haltOnFailure=True, | |
| flunkOnFailure=True, | |
| interruptSignal="TERM", | |
| command=TestCommand(), | |
| timeout=test_timeout, | |
| workdir=WORKDIR)) | |
| return f | |
| def cargo_nightly_buildfactory(platform, hosts): | |
| global s3_cargo_addy | |
| f = cargo_buildfactory(platform) | |
| cmd = "distcheck" | |
| if "cross" in platform: | |
| cmd = "dist" | |
| f.addStep(Compile(env=CommandEnv(), | |
| name=cmd, | |
| description="make " + cmd, | |
| descriptionDone="installed", | |
| workdir=WORKDIR, | |
| command=[MakeCommand(), cmd])) | |
| local_dist_dir = "tmp/dist/cargo-nightly" | |
| for host in hosts: | |
| local_dist_platform_dir = local_dist_dir + "/" + host | |
| # Delete local dist dir | |
| rm_dist_cmd = "rm -rf " + local_dist_platform_dir | |
| f.addStep(MasterShellCommand(name="rm dist dir", | |
| command=["sh", "-c", rm_dist_cmd])) | |
| tarball = "cargo-nightly-" + host + ".tar.gz" | |
| f.addStep(FileUpload(slavesrc="target/" + host + "/release/dist/" + tarball, | |
| masterdest=local_dist_platform_dir + "/" + tarball, | |
| workdir=WORKDIR)) | |
| commit_id_cmd = "echo '%(got_revision)s' > " + local_dist_platform_dir + "/commit-id" | |
| f.addStep(MasterShellCommand(name="stamp commit id", | |
| command=["sh", "-c", WithProperties(commit_id_cmd)])) | |
| all_cargo_hosts = cargo_cross_targets[:] | |
| for p in cargo_dist_platforms: | |
| for h in all_platform_hosts(p): | |
| all_cargo_hosts += [h] | |
| all_cargo_hosts = list(set(all_cargo_hosts)) | |
| f.addStep(DistSync(name="checking for synced cargo dist builds", | |
| stagingDir=local_dist_dir, | |
| platforms=all_cargo_hosts, | |
| nogate_platforms=[], | |
| haltOnFailure=True, | |
| flunkOnFailure=False)) | |
| return finish_dist(f, local_dist_dir, all_cargo_hosts, s3_cargo_addy, "cargo-dist", "cargo", "nightly", True) | |
| def snap3_buildfactory(platform): | |
| global s3_addy | |
| f = make_and_check_buildfactory(True, False, "win" in platform, False) | |
| hosts = all_platform_hosts(platform) | |
| if hosts != None: | |
| for host in hosts: | |
| f.addStep(Compile(env=CommandEnv(), | |
| name="make-snap-stage3", | |
| command=[MakeCommand(), | |
| "snap-stage3-H-" + host], | |
| haltOnFailure=True, | |
| flunkOnFailure=True, | |
| interruptSignal="TERM", | |
| workdir=BUILD_WORKDIR)) | |
| f.addStep(SetPropertyFromCommand(command=["sh", "-c", "ls rust-stage0-*.tar.bz2"], | |
| haltOnFailure=True, | |
| flunkOnFailure=True, | |
| extract_fn=grab_slave_filename, | |
| workdir=BUILD_WORKDIR)) | |
| f.addStep(FileUpload(slavesrc=WithProperties("%(slave_filename:-none)s"), | |
| masterdest=WithProperties("tmp/%(slave_filename:-none)s"), | |
| haltOnFailure=True, | |
| flunkOnFailure=True, | |
| workdir=BUILD_WORKDIR)) | |
| s3cmd = WithProperties("s3cmd put -P tmp/%(slave_filename:-none)s " | |
| + s3_addy + "/stage0-snapshots/") | |
| f.addStep(MasterShellCommand(name="s3-upload", | |
| command=["sh", "-c", s3cmd], | |
| haltOnFailure=True, | |
| flunkOnFailure=True)) | |
| f.addStep(ShellCommand(command=["rm", | |
| WithProperties("%(slave_filename:-none)s")], | |
| workdir=BUILD_WORKDIR)) | |
| f.addStep(MasterShellCommand(command=["rm", | |
| WithProperties("tmp/%(slave_filename:-none)s")])) | |
| return f | |
| # The only purpose of this buildfactory is to trigger the nightly | |
| # builders all on the same revision | |
| def rust_distsnap_trigger_buildfactory(scheduler_names): | |
| # Need to do check out the source to get the revision (I think) | |
| f = basic_buildfactory() | |
| f.addStep(Trigger(schedulerNames=scheduler_names)) | |
| return f | |
| def cargo_distsnap_trigger_buildfactory(scheduler_names): | |
| # Need to do check out the source to get the revision (I think) | |
| f = cargo_basic_buildfactory() | |
| f.addStep(Trigger(schedulerNames=scheduler_names)) | |
| return f | |
| def distsnap_buildfactory(platform, channel_label): | |
| global s3_addy | |
| command = [MakeCommand()] | |
| if 'android' in platform: | |
| command.append('dist') | |
| elif 'musl' in platform: | |
| command.append("check-stage2-T-x86_64-unknown-linux-musl-" + \ | |
| "H-x86_64-unknown-linux-gnu") | |
| command.append('dist') | |
| elif 'cross' in platform: | |
| command.append('dist') | |
| else: | |
| command.append('distcheck') | |
| # Don't run check because distcheck is going to do it all again | |
| f = make_and_check_buildfactory(False, False, "win" in platform, False) | |
| f.addStep(Compile(env=CommandEnv(), | |
| name='distcheck', | |
| command=command, | |
| timeout=compile_timeout, | |
| interruptSignal="TERM", | |
| workdir=BUILD_WORKDIR)) | |
| # Artifacts from each channel go in their own directory | |
| local_dist_dir = "tmp/dist/rustc-" + channel_label | |
| # Artifacts from each platform go in their own dir since multiple | |
| # platforms may produce artifacts with the same name. We'll | |
| # combine them all right before the final upload. | |
| local_dist_platform_dir = local_dist_dir + "/" + platform | |
| # Delete local dist dir for this platform | |
| rm_dist_cmd = "rm -rf " + local_dist_platform_dir + "/*" | |
| f.addStep(MasterShellCommand(name="rm dist dir", | |
| command=["sh", "-c", rm_dist_cmd])) | |
| # Upload artifacts from slave | |
| slave_dist_dir = "dist" | |
| if 'cross-host-linux' in platform: | |
| slave_dist_dir = "build/dist" | |
| f.addStep(DirectoryUpload(slavesrc=slave_dist_dir, | |
| masterdest=local_dist_platform_dir, | |
| workdir=BUILD_WORKDIR)) | |
| # All remaining steps happen on the buildmaster | |
| # If we're not packaging as well then this platform is just producing std or | |
| # cross artifacts, so delete everything related to the host platform. | |
| if platform not in packaging_platforms: | |
| for host in all_platform_hosts(platform): | |
| # `find`, not `ls`, because we need the full path to what we'll remove | |
| clean_cmd = "find " + local_dist_platform_dir + \ | |
| "/*.gz | grep " + host + " | xargs rm -f" | |
| f.addStep(MasterShellCommand(name="remove " + host + " artifacts", | |
| command=["sh", "-c", clean_cmd])) | |
| # Add the commit-id file to indicate which commit this is | |
| # for. This will be checked by the DistSync buildstep later to | |
| # determine when to upload everything at once. | |
| commit_id_cmd = "echo '%(got_revision)s' > " + local_dist_platform_dir + "/commit-id" | |
| f.addStep(MasterShellCommand(name="stamp commit id", | |
| command=["sh", "-c", WithProperties(commit_id_cmd)])) | |
| # Check whether dists from all builders have finished by examining the commit-id file | |
| f.addStep(DistSync(name="checking for synced dist snap builds", | |
| stagingDir=local_dist_dir, | |
| platforms=dist_platforms, | |
| nogate_platforms=dist_nogate_platforms, | |
| haltOnFailure=True, | |
| flunkOnFailure=False)) | |
| # Because the above check that all platforms have finished is | |
| # haltOnFailure but not flunkOnFailure, all builds but the last | |
| # will stop here, successfully. The final build will proceed with | |
| # all the work of consolidating and uploading the artifacts. | |
| # These next few steps deal with uploading docs. We're going to | |
| # take the docs from the linux builder, put them on s3, then | |
| # delete the docs before uploading the rest of the dist artifacts. | |
| linux_doc_dir = local_dist_dir + "/linux/doc" | |
| # Figure out the 'package name' for subsequent steps. Package name | |
| # is something like 'nightly', 'beta', or '1.0.0', basically | |
| # either a channel name or a version, depending on how the | |
| # makefiles configure a given channel. | |
| def grab_package_name(doc_dir): | |
| from os import listdir | |
| from os.path import isdir | |
| for f in listdir(doc_dir): | |
| if isdir(doc_dir + "/" + f): | |
| return f | |
| raise Exception("unable to grab package name") | |
| f.addStep(SetPropertyFromFn(name="extracting package name", | |
| property="package_name", | |
| extract_fn=lambda : grab_package_name(linux_doc_dir))) | |
| # This is exactly like the previous step of grabbing | |
| # `package_name` but substituting 'nightly' with 'master' - for | |
| # legacy reasons nightly docs are uploaded to 'master'. | |
| # FIXME #17398 don't do this | |
| def grab_doc_package_name(doc_dir): | |
| name = grab_package_name(doc_dir) | |
| if name == 'nightly': | |
| return 'master' | |
| else: | |
| return name | |
| f.addStep(SetPropertyFromFn(name="extracting doc package name", | |
| property="doc_package_name", | |
| extract_fn=lambda : grab_doc_package_name(linux_doc_dir))) | |
| # Sync the doc folder from the 'linux' staging dir | |
| s3cmd = WithProperties("s3cmd sync -P --no-progress --delete-removed " + \ | |
| linux_doc_dir + "/%(package_name)s/ " + \ | |
| s3_addy + "/doc/%(doc_package_name)s/",) | |
| f.addStep(MasterShellCommand(name="upload docs", | |
| command=["sh", "-c", s3cmd])) | |
| # Delete the doc folders from all the dist_platform directories | |
| all_doc_dirs = [(local_dist_dir + "/" + p + "/doc") for p in dist_platforms] | |
| rm_doc_dirs_cmd = "rm -Rf " + " ".join(all_doc_dirs) | |
| f.addStep(MasterShellCommand(name="rm doc dirs", | |
| command=["sh", "-c", rm_doc_dirs_cmd])) | |
| # We want our source tarballs to come from the linux bot. Delete any others | |
| non_linux_platforms = [p for p in dist_platforms if p != 'linux'] | |
| non_linux_src_tarballs = [(local_dist_dir + "/" + p + "/rustc-%(package_name)s-src.tar.gz") | |
| for p in non_linux_platforms] | |
| rm_src_tarballs_cmd = "rm -Rf " + " ".join(non_linux_src_tarballs) | |
| f.addStep(MasterShellCommand(name="rm non-linux src tarballs", | |
| command=["sh", "-c", WithProperties(rm_src_tarballs_cmd)])) | |
| # Upload everything that's left | |
| f = finish_dist(f, local_dist_dir, dist_platforms, s3_addy, "dist", "rustc", channel_label, True) | |
| # Finally, trigger the packaging build | |
| f.addStep(Trigger(schedulerNames=[channel_label + "-dist-packaging-sched"], | |
| alwaysUseLatest=True | |
| )) | |
| return f | |
| def finish_dist(f, local_dist_dir, dist_subdirs, s3_addy, remote_dist_dir, component, channel, upload): | |
| # The archive date used for this publication to the dist server, YYYY-MM-DD | |
| f.addStep(SetPropertyFromFn(name="getting today's date", | |
| property="archive_date", | |
| extract_fn=lambda : time.strftime("%Y-%m-%d"))) | |
| # Delete the commit-id files | |
| all_commit_ids = [(local_dist_dir + "/" + p + "/commit-id") for p in dist_subdirs] | |
| all_commit_ids += [(local_dist_dir + "/" + p + "/nogate-failed-build") for p in dist_subdirs] | |
| rm_commit_ids_cmd = "rm -f " + " ".join(all_commit_ids) | |
| f.addStep(MasterShellCommand(name="rm commit-id", | |
| command=["sh", "-c", rm_commit_ids_cmd])) | |
| # Consolidate everything that's left under one directory for final upload. There | |
| # should be no duplicate artifacts across platforms at this point. | |
| final_dist_dir = local_dist_dir + "/final" | |
| all_dist_dirs = [(local_dist_dir + "/" + p) for p in dist_subdirs] | |
| mkdir_final_cmd = "mkdir -p " + final_dist_dir | |
| mv_final_cmd = "find " + " ".join(all_dist_dirs) + " -type f | xargs cp -t " + final_dist_dir + "/" | |
| consolidate_cmd = mkdir_final_cmd + " && " + mv_final_cmd | |
| f.addStep(MasterShellCommand(name="consolidate artifacts", | |
| command=["sh", "-c", WithProperties(consolidate_cmd)])) | |
| # Build the channel manifest v1 | |
| manifest_name = "channel-" + component + "-" + channel | |
| manifest_cmd = "ls " + final_dist_dir + " > " + manifest_name + " && mv " + manifest_name + " " + final_dist_dir + "/" | |
| f.addStep(MasterShellCommand(name="manifesting", | |
| command=["sh", "-c", WithProperties(manifest_cmd)])) | |
| # Build the channel manifest v2, which is a toml file describing the components | |
| # of Rust. This only works for the 'rust' component, which contains various | |
| # artifacts of other builds. It depends on the v1 manifests and channel build | |
| # date of the 'cargo' and 'rustc' channels. | |
| # | |
| # NB: This is run under a shell command instead of by importing | |
| # the module in hopes that MasterShallCommand is automatically | |
| # asynchronous. | |
| if component == "rust": | |
| manifest_temp_dir = "tmp/dist/manifest-tmp-" + channel | |
| # FIXME: This is a poor way to find the build-rust-manifest script | |
| manifest_v2_cmd = ("python $HOME/rust-buildbot/master/build-rust-manifest.py " + | |
| channel + " " + | |
| "%(archive_date)s " + | |
| dist_server_addy + " " + | |
| public_dist_server_addy + " " + | |
| final_dist_dir + " " + | |
| manifest_temp_dir) | |
| f.addStep(MasterShellCommand(name="writing v2 manifest", | |
| command=["sh", "-c", WithProperties(manifest_v2_cmd)])) | |
| # Write channel date. | |
| channel_date_cmd = "echo %(archive_date)s > " + final_dist_dir + "/channel-" + component + "-" + channel + "-date.txt" | |
| f.addStep(MasterShellCommand(name="writing channel build date", | |
| command=["sh", "-c", WithProperties(channel_date_cmd)] )) | |
| # Generate signatures | |
| # | |
| # FIXME: Sigs should be generated after sums. That way it would be possible to | |
| # validate artifacts without downloading them, as long as the checksums match. | |
| # build-rust-mamnifest could use this to validate the sums downloaded over http. | |
| sign_cmd = "for i in " + final_dist_dir + "/* ; do gpg --no-tty --yes --passphrase-fd 0 -a --detach-sign $i < ../rust-bot-sign-passphrase; done" | |
| f.addStep(MasterShellCommand(name="signing", | |
| command=["sh", "-c", WithProperties(sign_cmd)])) | |
| # Generate SHA 256 checksums for everything remaining | |
| sha256_cmd = "(cd " + final_dist_dir + " && for i in * ; do sha256sum $i > $i.sha256; done)" | |
| f.addStep(MasterShellCommand(name="checksumming", | |
| command=["sh", "-c", WithProperties(sha256_cmd)])) | |
| # Just log what we're about to upload for reference | |
| ls_cmd = "ls -l " + final_dist_dir | |
| f.addStep(MasterShellCommand(name="listing artifacts", | |
| command=["sh", "-c", WithProperties(ls_cmd)])) | |
| # Stable releases need a manual upload | |
| if upload: | |
| # Upload everything to S3, first to the archive | |
| s3dir = s3_addy + "/" + remote_dist_dir + "/%(archive_date)s/" | |
| s3cmd = "s3cmd put -P --no-progress " + final_dist_dir + "/* " + s3dir | |
| f.addStep(MasterShellCommand(name="s3 archive upload", | |
| command=["sh", "-c", WithProperties(s3cmd)])) | |
| # Then to the primary dist directory | |
| s3dir = s3_addy + "/" + remote_dist_dir + "/" | |
| s3cmd = "s3cmd put -P --no-progress " + final_dist_dir + "/* " + s3dir | |
| f.addStep(MasterShellCommand(name="s3 primary upload", | |
| command=["sh", "-c", s3cmd])) | |
| # Delete the dist directory to start the next fresh | |
| f.addStep(MasterShellCommand(name="wipe dist dir", | |
| command=["sh", "-c", "rm -Rf " + local_dist_dir])) | |
| return f | |
| # Pulls the Rust and Cargo installers from s3 and combines them into the final | |
| # distributable | |
| def packaging_dist_buildfactory(platform, channel_label): | |
| f = BuildFactory() | |
| f.addStep(Git(repourl=packaging_source, | |
| progress=True, | |
| retry=(5, 2), | |
| retryFetch=True, | |
| submodules=True, | |
| workdir=WORKDIR)) | |
| f.addStep(ShellCommand(env=CommandEnv(), | |
| name="clean", | |
| description="clean", | |
| descriptionDone="cleaned", | |
| workdir=WORKDIR, | |
| command=["rm", "-Rf", "in", "out", "tmp"], | |
| haltOnFailure=True)) | |
| hosts = all_platform_hosts(platform) | |
| if platform == 'linux': | |
| if channel_label == 'stable': | |
| extra_hosts = stable_cross_targets | |
| elif channel_label == 'beta': | |
| extra_hosts = beta_cross_targets | |
| else: | |
| extra_hosts = nightly_cross_targets | |
| hosts += [t['t'] for t in extra_hosts if 'pkg' in t] | |
| for target in hosts: | |
| f.addStep(Compile(env=CommandEnv(), | |
| name="fetch inputs", | |
| description="fetch input", | |
| descriptionDone="fetched", | |
| workdir=WORKDIR, | |
| command=["python", "fetch-inputs.py", | |
| "--target=" + target, | |
| "--channel=" + channel_label])) | |
| opt_args = [] | |
| if "windows" in target: | |
| opt_args = ["--exe", "--msi", "--msi-sval"] | |
| elif "apple" in target: | |
| opt_args = ["--pkg"] | |
| f.addStep(Compile(env=CommandEnv(), | |
| name="package", | |
| description="package", | |
| descriptionDone="packaged", | |
| workdir=WORKDIR, | |
| command=["python", "package-rust.py", | |
| "--target=" + target, | |
| "--channel=" + channel_label] + opt_args)) | |
| # Get the id to use to uniquely identify this revision. In the | |
| # other dist builds this is the commit sha. Here though we're | |
| # going to use the hash from the version file. | |
| # Routing stderr to /dev/null is to work around a | |
| # mysterious bug on windows where cat thinks I'm trying to pipe | |
| # file called '^'. | |
| source_id = 'cat tmp/work/rustc-*/version 2> /dev/null | sed "s/^.*(\([a-z0-9][a-z0-9][a-z0-9][a-z0-9][a-z0-9][a-z0-9][a-z0-9][a-z0-9][a-z0-9]\).*/\\1/"' | |
| f.addStep(SetPropertyFromCommand(env=CommandEnv(), | |
| command=["sh", "-c", source_id], | |
| property="commit_id", | |
| workdir=WORKDIR)) | |
| # Location on master where we put the artifacts | |
| local_dist_dir = "tmp/dist/packaging-" + channel_label | |
| # Artifacts for *this* builder | |
| local_dist_platform_dir = local_dist_dir + "/" + platform | |
| # Remove any old local junk | |
| rm_dist_cmd = "rm -rf " + local_dist_platform_dir + "/*" | |
| f.addStep(MasterShellCommand(name="rm dist dir", | |
| command=["sh", "-c", rm_dist_cmd])) | |
| # Upload to master from slave | |
| f.addStep(DirectoryUpload(slavesrc="out", | |
| masterdest=local_dist_platform_dir, | |
| workdir=WORKDIR)) | |
| # Write that value to the commit-id file | |
| commit_id_cmd = "echo '%(commit_id)s' > " + local_dist_platform_dir + "/commit-id" | |
| f.addStep(MasterShellCommand(name="stamp commit id", | |
| command=["sh", "-c", WithProperties(commit_id_cmd)])) | |
| f.addStep(DistSync(name="checking for synced packaging builds", | |
| stagingDir=local_dist_dir, | |
| platforms=packaging_platforms, | |
| nogate_platforms=dist_nogate_platforms, | |
| haltOnFailure=True, | |
| flunkOnFailure=False)) | |
| # Require a manual upload for the stable channel | |
| upload = channel_label != "stable" | |
| return finish_dist(f, local_dist_dir, dist_platforms, s3_addy, "dist", "rust", channel_label, upload) | |
| c['builders'] = [] | |
| def platform_slaves(p): | |
| # The android builder has one slave, with the same name | |
| if "-x-android" in p: | |
| return [p] | |
| if "musl" in p or "32cross" in p or "cross32" in p: | |
| p = "linux" | |
| elif "ios" in p: | |
| return [slave.slavename for slave in ios_slaves] | |
| elif "linux" in p and "cross" in p: | |
| p = "lincross" | |
| else: | |
| p = p.split("-")[0] | |
| return [slave.slavename | |
| for slave in c['slaves'] | |
| if p in slave.slavename and slave in auto_slaves] | |
| def platform_snap_slaves(p): | |
| p = p.split("-")[0] | |
| return [slave.slavename | |
| for slave in snap_slaves | |
| if p in slave.slavename] | |
| # FIXME: The linux AMI instances are using valgrind 3.7 and we need 3.8+ | |
| # This rule limits which bots we run the valgrinding dist snapshot on. | |
| def platform_dist_slaves(p): | |
| if 'musl' in p or ('cross' in p and not 'win' in p) or 'ios' in p: | |
| return platform_slaves(p) | |
| # p is exactly the platform name, ie arm-android | |
| # dist_slaves list is all with dist=true in slave_list.txt | |
| part = p.split("-")[0] | |
| return [slave.slavename | |
| for slave in dist_slaves | |
| if part in slave.slavename] | |
| def nextSlave(builder, available_slavebuilders): | |
| if available_slavebuilders and len(available_slavebuilders) > 0: | |
| s = sorted(available_slavebuilders, key=lambda s: s.slave.slavename) | |
| return s[0] | |
| return None | |
| for p in try_platforms: | |
| # Builder that does incremental-make-check-lite runs, | |
| # host=target only, no valgrind, no perf. | |
| c['builders'].append(BuilderConfig( | |
| mergeRequests=True, | |
| name="try-" + p, | |
| category="try", | |
| nextSlave=nextSlave, | |
| properties={"platform":p, | |
| "branch":"try", | |
| "valgrind": False, | |
| "docs": False, | |
| "check": "check-lite", | |
| "verbose": True}, | |
| slavenames=platform_slaves(p), | |
| factory=make_and_check_buildfactory(True, False, | |
| "win" in p, True))) | |
| for p in auto_platforms: | |
| # here we parse builder names and platform names into actions | |
| opt_compiler = True | |
| opt_tests = True | |
| debug = False | |
| chk = True | |
| rustbuild = None | |
| orbit = None | |
| if "-debug" in p: | |
| debug = True | |
| chk = False | |
| if "-nopt-c" in p: | |
| opt_compiler = False | |
| if "-nopt-t" in p: | |
| opt_tests = False | |
| vg = False | |
| if "-vg" in p: | |
| vg = True | |
| musl = None | |
| targets = [] | |
| hosts = auto_platform_host(p) | |
| if "-all" in p: | |
| chk = "check-lite" | |
| if "bsd" in p: | |
| chk = "check-lite" | |
| if "musl" in p: | |
| chk = "check-stage2-T-x86_64-unknown-linux-musl-H-x86_64-unknown-linux-gnu" | |
| musl = "/musl-x86_64" | |
| targets.append('x86_64-unknown-linux-musl') | |
| if "cross" in p: | |
| chk = False | |
| musl = "/musl-i686" | |
| if not opt_compiler: | |
| chk = False | |
| if "rustbuild" in p: | |
| rustbuild = True | |
| if "mir" in p: | |
| orbit = True | |
| if "cargotest" in p: | |
| chk = "check-cargotest" | |
| rustbuild = True | |
| for t in nightly_cross_targets: | |
| if t['b']['auto'] in p: | |
| if 'host' in t: | |
| hosts.append(t['t']) | |
| rustbuild = True | |
| else: | |
| targets.append(t['t']) | |
| android = False | |
| if "-x-android" in p: | |
| targets.append('arm-linux-androideabi') | |
| targets.append('i686-linux-android') | |
| android = True | |
| # Not checking android for now | |
| chk = False | |
| if "-x-android-t" in p: | |
| # Only test android, not the host | |
| chk = "check-stage2-T-arm-linux-androideabi-H-x86_64-unknown-linux-gnu" | |
| c['builders'].append(BuilderConfig( | |
| mergeRequests=True, | |
| name="auto-" + p, | |
| category="auto", | |
| slavenames=platform_slaves(p), | |
| nextSlave=nextSlave, | |
| properties={"platform":p, | |
| "branch":"auto", | |
| "valgrind": vg, | |
| "optimize": opt_compiler, | |
| "optimize-tests": opt_tests, | |
| "android": android, | |
| "musl": musl, | |
| "build": auto_platform_build(p), | |
| "hosts": hosts, | |
| "targets": targets, | |
| "debug": debug, | |
| "debug-assertions": True, | |
| "check": chk, | |
| "orbit": orbit, | |
| "rustbuild": rustbuild}, | |
| factory=make_and_check_buildfactory(chk, android, | |
| "win" in p, True))) | |
| for p in snap_platforms: | |
| c['builders'].append(BuilderConfig( | |
| mergeRequests=True, | |
| name="snap3-" + p, | |
| category="util-snap3", | |
| slavenames=platform_snap_slaves(p), | |
| nextSlave=nextSlave, | |
| properties={"platform":p, | |
| "branch":"snap-stage3", | |
| "hosts": "all", | |
| "valgrind": False, | |
| "inject-std-version": False, | |
| "llvm-static-stdcpp": p == 'linux', | |
| "check": True}, | |
| factory=snap3_buildfactory(p))) | |
| for p in dist_platforms: | |
| hosts = "all" | |
| android = False | |
| musl = None | |
| targets = [] | |
| if "android" in p: | |
| android = True | |
| # Not checking android for now | |
| hosts = [] | |
| targets.append('arm-linux-androideabi') | |
| targets.append('i686-linux-android') | |
| elif "musl" in p: | |
| musl = "/musl-x86_64" | |
| hosts = [] | |
| targets.append('x86_64-unknown-linux-musl') | |
| elif "cross32-linux" in p: | |
| musl = "/musl-i686" | |
| if "ios" in p or "cross" in p: | |
| hosts = [] | |
| for channel in ['nightly', 'beta', 'stable']: | |
| my_targets = targets[:] | |
| my_hosts = hosts | |
| rustbuild = None | |
| if channel == 'stable': | |
| my_cross_targets = stable_cross_targets | |
| elif channel == 'beta': | |
| my_cross_targets = beta_cross_targets | |
| elif channel == 'nightly': | |
| my_cross_targets = nightly_cross_targets | |
| for t in my_cross_targets: | |
| if not t['b']['dist'] in p: | |
| continue | |
| if 'host' in p: | |
| rustbuild = True | |
| my_hosts.append(t['t']) | |
| else: | |
| my_targets.append(t['t']) | |
| # The `cross-linux` builder is intended for just producing standard | |
| # libraries, whereas the `cross-host-linux` is producing entire | |
| # compilers. Ideally these would use the same bot but unfortunately they | |
| # use different build systems, so they need to be separate for now. | |
| # | |
| # In any case, if we're producing an entire compiler for a target then | |
| # we don't want to *also* produce a standard library for that target on | |
| # another bot as it'll cause upload failures later. Consequently we just | |
| # filter our the list of `my_targets` to exclude everything we're | |
| # building a compiler for. | |
| for t in my_cross_targets: | |
| if 'host' in t and t['t'] in my_targets: | |
| my_targets.remove(t['t']) | |
| branch = 'master' if channel == 'nightly' else channel | |
| c['builders'].append(BuilderConfig( | |
| mergeRequests=True, | |
| name=channel + "-dist-rustc-" + p, | |
| category="util-dist", | |
| properties={"platform":p, | |
| "branch":branch, | |
| "hosts": my_hosts, | |
| "targets": my_targets, | |
| "build": auto_platform_build(p), | |
| "valgrind": False, | |
| "check": True, | |
| "android": android, | |
| "musl": musl, | |
| "llvm-static-stdcpp": 'linux' in p, | |
| "rustbuild": rustbuild, | |
| "release-channel": channel}, | |
| nextSlave=nextSlave, | |
| slavenames=platform_dist_slaves(p), | |
| factory=distsnap_buildfactory(p, channel))) | |
| for p in packaging_platforms: | |
| # Nightly packaging | |
| c['builders'].append(BuilderConfig( | |
| mergeRequests=True, | |
| name="nightly-dist-packaging-" + p, | |
| category="util-dist", | |
| properties={"platform": p, | |
| "branch": "master"}, | |
| nextSlave=nextSlave, | |
| slavenames=platform_dist_slaves(p), | |
| factory=packaging_dist_buildfactory(p, "nightly"))) | |
| # Beta packaging | |
| c['builders'].append(BuilderConfig( | |
| mergeRequests=True, | |
| name="beta-dist-packaging-" + p, | |
| category="util-dist", | |
| properties={"platform": p, | |
| "branch": "master"}, | |
| nextSlave=nextSlave, | |
| slavenames=platform_dist_slaves(p), | |
| factory=packaging_dist_buildfactory(p, "beta"))) | |
| # Stable packaging | |
| c['builders'].append(BuilderConfig( | |
| mergeRequests=True, | |
| name="stable-dist-packaging-" + p, | |
| category="util-dist", | |
| properties={"platform": p, | |
| "branch": "master"}, | |
| nextSlave=nextSlave, | |
| slavenames=platform_dist_slaves(p), | |
| factory=packaging_dist_buildfactory(p, "stable"))) | |
| # The nightly trigger | |
| c['builders'].append(BuilderConfig( | |
| mergeRequests=True, | |
| name="nightly-dist-rustc-trigger", | |
| category="util-dist", | |
| nextSlave=nextSlave, | |
| slavenames=[s.slavename for s in auto_slaves], # any slave; we just need to check out the source to get the current rev | |
| factory=rust_distsnap_trigger_buildfactory(["nightly-dist-rustc-sched"]))) | |
| # The beta trigger | |
| c['builders'].append(BuilderConfig( | |
| mergeRequests=True, | |
| name="beta-dist-rustc-trigger", | |
| category="util-dist", | |
| nextSlave=nextSlave, | |
| slavenames=[s.slavename for s in auto_slaves], # any slave; we just need to check out the source to get the current rev | |
| factory=rust_distsnap_trigger_buildfactory(["beta-dist-rustc-sched"]))) | |
| # The stable trigger | |
| c['builders'].append(BuilderConfig( | |
| mergeRequests=True, | |
| name="stable-dist-rustc-trigger", | |
| category="util-dist", | |
| nextSlave=nextSlave, | |
| slavenames=[s.slavename for s in auto_slaves], # any slave; we just need to check out the source to get the current rev | |
| factory=rust_distsnap_trigger_buildfactory(["stable-dist-rustc-sched"]))) | |
| # The cargo nightly trigger | |
| c['builders'].append(BuilderConfig( | |
| mergeRequests=True, | |
| name="nightly-dist-cargo-trigger", | |
| category="util-dist", | |
| nextSlave=nextSlave, | |
| slavenames=[s.slavename for s in auto_slaves], # any slave; we just need to check out the source to get the current rev | |
| factory=cargo_distsnap_trigger_buildfactory(["nightly-dist-cargo-sched"]))) | |
| for platform in cargo_platforms: | |
| p = platform.split("-")[0] | |
| bits = "32" if "32" in platform else "64" | |
| triple = auto_platform_triple(platform) | |
| slave_match = p | |
| targets = [triple] | |
| nightly = None | |
| if "cross" in platform: | |
| nightly = True | |
| targets = cargo_cross_targets[:] | |
| slave_match = 'lincross' | |
| slaves = [slave.slavename | |
| for slave in c['slaves'] | |
| if slave_match in slave.slavename and slave in auto_slaves] | |
| # Builder that does incremental-make-check-lite runs, | |
| # host=target only, no valgrind, no perf. | |
| c['builders'].append(BuilderConfig( | |
| mergeRequests=True, | |
| name="cargo-" + platform, | |
| category="cargo", | |
| nextSlave=nextSlave, | |
| properties={"platform":p, | |
| "branch":"auto-cargo", | |
| "verbose": True, | |
| "cargo": True, | |
| "optimize": False, | |
| "nightly": nightly, | |
| "bits": bits, | |
| "targets": targets, | |
| "build": triple}, | |
| slavenames=slaves, | |
| factory=cargo_buildfactory(platform))) | |
| if platform not in cargo_dist_platforms: | |
| continue | |
| # Cargo nightly | |
| c['builders'].append(BuilderConfig( | |
| mergeRequests=True, | |
| name="nightly-dist-cargo-" + platform, | |
| category="util-dist", | |
| properties={"platform":p, | |
| "branch":"master", | |
| "targets": targets, | |
| "check": True, | |
| "wipe": True, | |
| "cargo": True, | |
| "cargo-nightly": True, | |
| "optimize": True, | |
| "bits": bits, | |
| "nightly": True}, | |
| nextSlave=nextSlave, | |
| slavenames=platform_dist_slaves(platform), | |
| factory=cargo_nightly_buildfactory(platform, targets))) | |
| ####### STATUS TARGETS | |
| # 'status' is a list of Status Targets. The results of each build will be | |
| # pushed to these targets. buildbot/status/*.py has a variety to choose from, | |
| # including web pages, email senders, and IRC bots. | |
| c['status'] = [] | |
| from buildbot.status import html | |
| from buildbot.status.web import authz, auth | |
| # Import users, one that is able to build anything (any-build), | |
| # and another that is not allowed to build dists (no-dist). | |
| import passwords | |
| reload(passwords) | |
| def userAuth(username, bs): | |
| if username == "any-build": | |
| return True | |
| if (hasattr(bs, 'getName') and "dist" in bs.getName()) or (hasattr(bs, 'getBuilder') and "dist" in bs.getBuilder().getName()): | |
| return False | |
| return True | |
| def userAllAuth(username): | |
| return username == "any-build" | |
| authz_cfg=authz.Authz( | |
| auth=auth.BasicAuth(passwords.users), | |
| gracefulShutdown = 'auth', | |
| forceBuild = userAuth, | |
| forceAllBuilds = userAllAuth, | |
| pingBuilder = userAuth, | |
| stopBuild = userAuth, | |
| stopAllBuilds = userAllAuth, | |
| cancelPendingBuild = userAuth, | |
| showUsersPage = 'auth' | |
| ) | |
| #c['status'].append( | |
| # mail.MailNotifier(fromaddr="buildbot@rust-lang.org", | |
| # builders=[], | |
| # relayhost="smtp.example.org")) | |
| c['status'].append(html.WebStatus( | |
| revlink='http://github.com/rust-lang/rust/commit/%s', | |
| order_console_by_time=True, | |
| #http_port="tcp:8010:interface=127.0.0.1", | |
| http_port="tcp:8010:interface=127.0.0.1", | |
| authz=authz_cfg)) | |
| from buildbot.status import words | |
| c['status'].append(words.IRC(host="irc.mozilla.org", | |
| port=6697, | |
| useSSL=True, | |
| nick="rust-buildbot", | |
| channels=["#rust-bots"], | |
| notify_events={ | |
| 'exception':1, | |
| 'finished':1, | |
| 'success':1, | |
| 'failure':1 | |
| })) | |
| # Status reporter that kills off builds that are redundant if | |
| # particular build has failed. We use this to kill associated 'auto' | |
| # jobs if one of them fails. | |
| # | |
| # thanks to Brendan Cully <brendan@kublai.com> | |
| from twisted.internet import defer | |
| from twisted.python import log | |
| from buildbot.status.base import StatusReceiverMultiService | |
| from buildbot.status import results | |
| from buildbot import interfaces | |
| def samesource(ssa, ssb): | |
| if ssa.ssid and ssa.ssid == ssb.ssid: | |
| return True | |
| if ssa.branch == ssb.branch and ssa.revision == ssb.revision \ | |
| and ssa.project == ssb.project and ssa.repository == ssb.repository \ | |
| and ssa.patch == ssb.patch: | |
| return True | |
| return False | |
| class BSKiller(StatusReceiverMultiService): | |
| def __init__(self, buildermatch=lambda _: True): | |
| StatusReceiverMultiService.__init__(self) | |
| self.buildermatch = buildermatch | |
| self.builders = [] | |
| self.ctl = None | |
| def startService(self): | |
| StatusReceiverMultiService.startService(self) | |
| self.parent.getStatus().subscribe(self) | |
| self.ctl = interfaces.IControl(self.master) | |
| def builderAdded(self, name, builder): | |
| self.builders.append(builder) | |
| """choose to subscribe to the given builder""" | |
| if not self.buildermatch(name): | |
| return False | |
| return self | |
| def buildFinished(self, buildername, build, result): | |
| if result in [results.FAILURE]: | |
| return self.killallbuilds(build.getSourceStamps()) | |
| def stepFinished(self, build, step, results): | |
| builderctl = self.ctl.getBuilder(build.getBuilder().getName()) | |
| builderctl.getBuild(build.getNumber()).stopBuild("no point in continuing") | |
| @defer.inlineCallbacks | |
| def killallbuilds(self, sourcestamps): | |
| pending = [] | |
| for builder in self.builders: | |
| checkpending = True | |
| builderctl = self.ctl.getBuilder(builder.getName()) | |
| for build in builder.getCurrentBuilds(): | |
| if build.isFinished(): | |
| continue | |
| log.msg('considering build %s:%d' % (builder.getName(), | |
| build.getNumber())) | |
| if not any([samesource(ss1, ss2) | |
| for ss1 in sourcestamps | |
| for ss2 in build.getSourceStamps()]): | |
| log.msg('mismatched sourcestamps') | |
| continue | |
| log.msg('sourcestamps match') | |
| checkpending = False | |
| bldctl = builderctl.getBuild(build.getNumber()) | |
| if build.currentStep != None and ("git" in build.currentStep.getName() | |
| or "configure" in build.currentStep.getName()): | |
| build.subscribe(self) | |
| else: | |
| bldctl.stopBuild("no point in continuing") | |
| if checkpending: | |
| res = yield builderctl.getPendingBuildRequestControls() | |
| brctls = dict((brc.brid, brc) for brc in res) | |
| brs = yield builder.getPendingBuildRequestStatuses() | |
| for br in brs: | |
| ss = yield br.getSourceStamps() | |
| if any([samesource(ss1, ss2) for ss1 in sourcestamps | |
| for ss2 in ss]): | |
| log.msg('cancelling pending build on ' + builder.getName()) | |
| brctl = brctls[br.brid] | |
| brctl.cancel() | |
| c['status'].append(BSKiller(lambda buildername: buildername.startswith('auto') and buildername not in nogate_builders)) | |
| from buildbot.status.status_push import HttpStatusPush | |
| if env == "prod": | |
| push_url = "http://buildbot.rust-lang.org/homu/buildbot" | |
| else: | |
| push_url = "http://54.176.156.253/homu/buildbot" | |
| c['status'].append(HttpStatusPush( | |
| serverUrl=push_url, | |
| extra_post_params={'secret': master_config['homu_secret']}, | |
| )) | |
| ####### PROJECT IDENTITY | |
| # the 'title' string will appear at the top of this buildbot | |
| # installation's html.WebStatus home page (linked to the | |
| # 'titleURL') and is embedded in the title of the waterfall HTML page. | |
| c['title'] = "Rust" | |
| c['titleURL'] = "http://rust-lang.org" | |
| # the 'buildbotURL' string should point to the location where the buildbot's | |
| # internal web server (usually the html.WebStatus page) is visible. This | |
| # typically uses the port number set in the Waterfall 'status' entry, but | |
| # with an externally-visible host name which the buildbot cannot figure out | |
| # without some help. | |
| c['buildbotURL'] = "http://buildbot.rust-lang.org/" | |
| ####### DB URL | |
| # This specifies what database buildbot uses to store change and scheduler | |
| # state. You can leave this at its default for all but the largest | |
| # installations. | |
| c['db_url'] = "sqlite:///state.sqlite" | |