diff --git a/benchtester/BatchTester.py b/benchtester/BatchTester.py index 02a856d..5473d46 100644 --- a/benchtester/BatchTester.py +++ b/benchtester/BatchTester.py @@ -28,148 +28,162 @@ is_win = platform.system() == "Windows" ## -## Utility +# Utility ## + def parse_nightly_time(string): - string = string.split('-') - if (len(string) != 3): - raise Exception("Could not parse %s as a YYYY-MM-DD date") - return datetime.date(int(string[0]), int(string[1]), int(string[2])) + string = string.split('-') + if (len(string) != 3): + raise Exception("Could not parse %s as a YYYY-MM-DD date") + return datetime.date(int(string[0]), int(string[1]), int(string[2])) # Grab the first file (alphanumerically) from the batch folder, # delete it and return its contents + + def get_queued_job(dirname): - batchfiles = os.listdir(dirname) - if len(batchfiles): - bname = os.path.join(dirname, sorted(batchfiles)[0]) - try: - bfile = open(bname, 'r') - bcmd = json.load(bfile) - finally: - if bfile: bfile.close() - os.remove(bname) - return bcmd - return False + batchfiles = os.listdir(dirname) + if len(batchfiles): + bname = os.path.join(dirname, sorted(batchfiles)[0]) + try: + bfile = open(bname, 'r') + bcmd = json.load(bfile) + finally: + if bfile: + bfile.close() + os.remove(bname) + return bcmd + return False # Given a 'hook', which is a path to a python file, # imports it as a module and returns the handle. A bit hacky. + + def _get_hook(filename): - hookname = os.path.basename(filename) - # Strip .py and complain if it has other periods. (I said hacky!) - if hookname[-3:].lower() == '.py': - hookname = hookname[:-3] - if hookname.find('.') != -1: - raise Exception("Hook filename cannot contain periods (other than .py (it is imported as a module interally))") - - # Add dir containing hook to path temporarily - sys.path.append(os.path.abspath(os.path.dirname(filename))) - try: - ret = __import__(hookname) - finally: - sys.path = sys.path[:-1] - return ret + hookname = os.path.basename(filename) + # Strip .py and complain if it has other periods. (I said hacky!) + if hookname[-3:].lower() == '.py': + hookname = hookname[:-3] + if hookname.find('.') != -1: + raise Exception( + "Hook filename cannot contain periods (other than .py (it is " + "imported as a module interally))") + + # Add dir containing hook to path temporarily + sys.path.append(os.path.abspath(os.path.dirname(filename))) + try: + ret = __import__(hookname) + finally: + sys.path = sys.path[:-1] + return ret # BatchBuild wraps BuildGetter.build with various info, and provides # a .(de)serialize for the status.json file output -class BatchBuild(): - def __init__(self, build, revision): - # BuildGetter compilebuild object - self.build = build - # Revision, may be full hash or partial - self.revision = revision - # Build / task number. These are sequential, but reset to zero occasionally - # no two *concurrently running* builds will share a number, so it can be - # used for e.g. non-colliding VNC display #s - self.num = None - # The pool task associated with this build - self.task = None - # Textual note for this build, shows up in logs and serialized Build objects. - # used by AWSY's /status/ page, for instance - self.note = None - # Place in a custom series - self.series = None - # Timestamp of when the build began testing - self.started = None - # unique identifier per session, totally unique unlike .num. TODO should - # probably be given to __init__ instead of set manually later... - self.uid = -1 - # Timestamp of when this build was 'finished' (failed or otherwise) - self.finished = None - # If true, retest the build even if its already queued. --hook scripts should - # honor this in should_test as well - self.force = None - - def build_type(self): - if isinstance(self.build, BuildGetter.CompileBuild): - return 'compile' - elif isinstance(self.build, BuildGetter.TryBuild): - return 'try' - elif isinstance(self.build, BuildGetter.FTPBuild): - return 'ftp' - elif isinstance(self.build, BuildGetter.TinderboxBuild): - return 'tinderbox' - elif isinstance(self.build, BuildGetter.NightlyBuild): - return 'nightly' - else: - raise Exception("Unknown build type %s" % (self.build,)) - - @staticmethod - def deserialize(buildobj, args): - if buildobj['type'] == 'compile': - # See https://github.com/mozilla/areweslimyet/issues/47 - raise Exception("Build type 'compile' is not currently supported") - elif buildobj['type'] == 'tinderbox': - build = BuildGetter.TinderboxBuild(buildobj['timestamp'], buildobj['branch']) - elif buildobj['type'] == 'nightly': - build = BuildGetter.NightlyBuild(parse_nightly_time(buildobj['for'])) - elif buildobj['type'] == 'ftp': - build = BuildGetter.FTPBuild(buildobj['path']) - elif buildobj['type'] == 'try': - build = BuildGetter.TryBuild(buildobj['changeset']) - else: - raise Exception("Unkown build type %s" % buildobj['type']) - - ret = BatchBuild(build, buildobj['revision']) - ret.series = buildobj['series'] - ret.uid = buildobj['uid'] - ret.timestamp = buildobj['timestamp'] - ret.note = buildobj['note'] - ret.started = buildobj['started'] - ret.finished = buildobj['finished'] - ret.force = buildobj['force'] - return ret - def serialize(self): - ret = { - 'timestamp' : self.build.get_buildtime(), - 'revision' : self.revision, - 'note' : self.note, - 'started' : self.started, - 'finished' : self.finished, - 'force' : self.force, - 'uid' : self.uid, - 'series': self.series - } - - build_type = self.build_type() - ret['type'] = build_type - - if build_type == 'try': - ret['changeset'] = self.build._changeset - elif build_type == 'ftp': - ret['path'] = self.build._path - elif build_type == 'tinderbox': - # When deserializing we need to look this up by it's tinderbox timestamp, - # even if we use the push timestamp internally - ret['timestamp'] = self.build.get_tinderbox_timestamp() - ret['branch'] = self.build.get_branch() - elif build_type == 'nightly': - # Date of nightly might not correspond to build timestamp - ret['for'] = '%u-%u-%u' % (self.build._date.year, self.build._date.month, self.build._date.day) +class BatchBuild(): - return ret + def __init__(self, build, revision): + # BuildGetter compilebuild object + self.build = build + # Revision, may be full hash or partial + self.revision = revision + # Build / task number. These are sequential, but reset to zero occasionally + # no two *concurrently running* builds will share a number, so it can be + # used for e.g. non-colliding VNC display #s + self.num = None + # The pool task associated with this build + self.task = None + # Textual note for this build, shows up in logs and serialized Build objects. + # used by AWSY's /status/ page, for instance + self.note = None + # Place in a custom series + self.series = None + # Timestamp of when the build began testing + self.started = None + # unique identifier per session, totally unique unlike .num. TODO should + # probably be given to __init__ instead of set manually later... + self.uid = -1 + # Timestamp of when this build was 'finished' (failed or otherwise) + self.finished = None + # If true, retest the build even if its already queued. --hook scripts should + # honor this in should_test as well + self.force = None + + def build_type(self): + if isinstance(self.build, BuildGetter.CompileBuild): + return 'compile' + elif isinstance(self.build, BuildGetter.TryBuild): + return 'try' + elif isinstance(self.build, BuildGetter.FTPBuild): + return 'ftp' + elif isinstance(self.build, BuildGetter.TinderboxBuild): + return 'tinderbox' + elif isinstance(self.build, BuildGetter.NightlyBuild): + return 'nightly' + else: + raise Exception("Unknown build type %s" % (self.build,)) + + @staticmethod + def deserialize(buildobj, args): + if buildobj['type'] == 'compile': + # See https://github.com/mozilla/areweslimyet/issues/47 + raise Exception("Build type 'compile' is not currently supported") + elif buildobj['type'] == 'tinderbox': + build = BuildGetter.TinderboxBuild( + buildobj['timestamp'], buildobj['branch']) + elif buildobj['type'] == 'nightly': + build = BuildGetter.NightlyBuild( + parse_nightly_time(buildobj['for'])) + elif buildobj['type'] == 'ftp': + build = BuildGetter.FTPBuild(buildobj['path']) + elif buildobj['type'] == 'try': + build = BuildGetter.TryBuild(buildobj['changeset']) + else: + raise Exception("Unkown build type %s" % buildobj['type']) + + ret = BatchBuild(build, buildobj['revision']) + ret.series = buildobj['series'] + ret.uid = buildobj['uid'] + ret.timestamp = buildobj['timestamp'] + ret.note = buildobj['note'] + ret.started = buildobj['started'] + ret.finished = buildobj['finished'] + ret.force = buildobj['force'] + + return ret + + def serialize(self): + ret = { + 'timestamp': self.build.get_buildtime(), + 'revision': self.revision, + 'note': self.note, + 'started': self.started, + 'finished': self.finished, + 'force': self.force, + 'uid': self.uid, + 'series': self.series + } + + build_type = self.build_type() + ret['type'] = build_type + + if build_type == 'try': + ret['changeset'] = self.build._changeset + elif build_type == 'ftp': + ret['path'] = self.build._path + elif build_type == 'tinderbox': + # When deserializing we need to look this up by it's tinderbox timestamp, + # even if we use the push timestamp internally + ret['timestamp'] = self.build.get_tinderbox_timestamp() + ret['branch'] = self.build.get_branch() + elif build_type == 'nightly': + # Date of nightly might not correspond to build timestamp + ret['for'] = '%u-%u-%u' % (self.build._date.year, + self.build._date.month, self.build._date.day) + + return ret # Work around multiprocessing.Pool() quirkiness. We can't give it # BatchTest.test_build directly because that might not point to the same thing @@ -177,460 +191,544 @@ def serialize(self): # directly because the pool pickles it at a later date and causes thread issues # (but just forcing it to pickle explicitly is fine as it would be pickled # eventually either way) + + def _pool_batchtest_build(build, args): - return BatchTest.test_build(pickle.loads(build), args) + return BatchTest.test_build(pickle.loads(build), args) ## -## BatchTest - a threaded test object. Given a list of builds, prepares them -## and tests them in parallel. In 'batch' mode, processes sets of -## arguments from a batch folder, and adds them to its queue, never -## exiting. (daemon mode might be better?) -## See BatchTestCLI for documentation on options +# BatchTest - a threaded test object. Given a list of builds, prepares them +# and tests them in parallel. In 'batch' mode, processes sets of +# arguments from a batch folder, and adds them to its queue, never +# exiting. (daemon mode might be better?) +# See BatchTestCLI for documentation on options ## + class BatchTest(object): - def __init__(self, args, out=sys.stdout): - # See BatchTestCLI for args documentation, for the time being - self.args = args - self.logfile = None - self.out = out - self.starttime = time.time() - self.buildindex = 0 - self.pool = None - self.processed = 0 - self.tick = 0 - self.builds = { - 'building' : None, - 'prepared': [], - 'running': [], - 'pending': [], - 'skipped': [], - 'completed': [], - 'failed': [] - } - self.processedbatches = [] - self.pendingbatches = [] - - if (self.args.get('hook')): - sys.path.append(os.path.abspath(os.path.dirname(self.args.get('hook')))) - self.hook = os.path.basename(self.args.get('hook')) - else: - self.hook = None - - self.builder = None - self.builder_mode = None - self.builder_batch = None - self.manager = multiprocessing.Manager() - self.builder_result = self.manager.dict({ 'result': 'not started', 'ret' : None }) - - def stat(self, msg=""): - msg = "%s :: %s\n" % (time.ctime(), msg) - if self.out: - self.out.write("[BatchTester] %s" % msg) - if self.logfile: - self.logfile.write(msg) - self.logfile.flush() - - # - # Resets worker pool - def reset_pool(self): - if self.pool: - self.pool.close() - self.pool.join() - self.buildindex = 0 - self.pool = multiprocessing.Pool(processes=self.args['processes'], maxtasksperchild=1) - - # - # Writes/updates the status file - def write_status(self): - statfile = self.args.get('status_file') - if not statfile: return - status = { - 'starttime' : self.starttime, - 'building': self.builds['building'].serialize() if self.builds['building'] else None, - 'batches' : self.processedbatches, - 'pendingbatches' : self.pendingbatches - } - for x in self.builds: - if type(self.builds[x]) == list: - status[x] = map(lambda y: y.serialize(), self.builds[x]) - - tempfile = os.path.join(os.path.dirname(statfile), ".%s" % os.path.basename(statfile)) - sf = open(tempfile, 'w') - json.dump(status, sf, indent=2) - if is_win: - os.remove(statfile) # Can't do atomic renames on windows - os.rename(tempfile, statfile) - sf.close() - - # Builds that are in the pending/running list already - def build_is_queued(self, build): - for x in ( self.builds['running'], self.builds['pending'], self.builds['prepared'], [ self.builds['building'] ]): - for y in x: - if y and y.revision == build.revision: - return True - return False - # Given a set of arguments, lookup & add all specified builds to our queue. - # This happens asyncrhonously, so not all builds may be queued immediately - def add_batch(self, batchargs): - self.pendingbatches.append({ 'args' : batchargs, 'note' : None, 'requested' : time.time(), 'uid': self.processed }) - self.processed += 1 - - # Checks on the builder subprocess, getting its result, starting it if needed, - # etc - def check_builder(self): - # Did it exit? - if self.builder and not self.builder.is_alive(): - self.builder.join() - self.builder = None - - # Finished a batch queue job - if self.builder_mode == 'batch': - if self.builder_result['result'] == 'success': - queued = self.queue_builds(self.builder_result['ret'][0], prepend=self.builder_batch['args'].get('prioritize')) - already_queued = len(self.builder_result['ret'][0]) - len(queued) - self.queue_builds(self.builder_result['ret'][1], target='skipped', prepend=self.builder_batch['args'].get('prioritize')) - self.builder_batch['note'] = "Queued %u builds, skipped %u" % (len(queued), already_queued + len(self.builder_result['ret'][1])) + + def __init__(self, args, out=sys.stdout): + # See BatchTestCLI for args documentation, for the time being + self.args = args + self.logfile = None + self.out = out + self.starttime = time.time() + self.buildindex = 0 + self.pool = None + self.processed = 0 + self.tick = 0 + self.builds = { + 'building': None, + 'prepared': [], + 'running': [], + 'pending': [], + 'skipped': [], + 'completed': [], + 'failed': [] + } + self.processedbatches = [] + self.pendingbatches = [] + + if (self.args.get('hook')): + sys.path.append(os.path.abspath( + os.path.dirname(self.args.get('hook')))) + self.hook = os.path.basename(self.args.get('hook')) else: - self.builder_batch['note'] = self.builder_result['ret'] - self.stat("Batch completed: %s (%s)" % (self.builder_batch['args'], self.builder_batch['note'])) + self.hook = None + + self.builder = None + self.builder_mode = None self.builder_batch = None + self.manager = multiprocessing.Manager() + self.builder_result = self.manager.dict( + {'result': 'not started', 'ret': None}) + + def stat(self, msg=""): + msg = "%s :: %s\n" % (time.ctime(), msg) + if self.out: + self.out.write("[BatchTester] %s" % msg) + if self.logfile: + self.logfile.write(msg) + self.logfile.flush() + + # + # Resets worker pool + def reset_pool(self): + if self.pool: + self.pool.close() + self.pool.join() + self.buildindex = 0 + self.pool = multiprocessing.Pool( + processes=self.args['processes'], maxtasksperchild=1) + + # + # Writes/updates the status file + def write_status(self): + statfile = self.args.get('status_file') + if not statfile: + return + status = { + 'starttime': self.starttime, + 'building': self.builds['building'].serialize() if self.builds['building'] else None, + 'batches': self.processedbatches, + 'pendingbatches': self.pendingbatches + } + for x in self.builds: + if type(self.builds[x]) == list: + status[x] = map(lambda y: y.serialize(), self.builds[x]) + + tempfile = os.path.join(os.path.dirname( + statfile), ".%s" % os.path.basename(statfile)) + sf = open(tempfile, 'w') + json.dump(status, sf, indent=2) + if is_win: + os.remove(statfile) # Can't do atomic renames on windows + os.rename(tempfile, statfile) + sf.close() - # Finished a build job - elif self.builder_mode == 'build': - build = self.builds['building'] - self.stat("Test %u prepared" % (build.num,)) - self.builds['building'] = None - if self.builder_result['result'] == 'success': - self.builds['prepared'].append(self.builder_result['ret']) + # Builds that are in the pending/running list already + def build_is_queued(self, build): + for x in (self.builds['running'], self.builds['pending'], + self.builds['prepared'], [self.builds['building']]): + for y in x: + if y and y.revision == build.revision: + return True + return False + # Given a set of arguments, lookup & add all specified builds to our queue. + # This happens asyncrhonously, so not all builds may be queued immediately + + def add_batch(self, batchargs): + self.pendingbatches.append( + {'args': batchargs, 'note': None, 'requested': time.time(), 'uid': self.processed}) + self.processed += 1 + + # Checks on the builder subprocess, getting its result, starting it if needed, + # etc + def check_builder(self): + # Did it exit? + if self.builder and not self.builder.is_alive(): + self.builder.join() + self.builder = None + + # Finished a batch queue job + if self.builder_mode == 'batch': + if self.builder_result['result'] == 'success': + queued = self.queue_builds( + self.builder_result['ret'][0], + prepend=self.builder_batch['args'].get('prioritize')) + already_queued = len(self.builder_result['ret'][0]) - len(queued) + self.queue_builds( + self.builder_result['ret'][1], + target='skipped', + prepend=self.builder_batch['args'].get('prioritize')) + self.builder_batch['note'] = "Queued %u builds, skipped %u" % ( + len(queued), already_queued + len(self.builder_result['ret'][1])) + else: + self.builder_batch['note'] = self.builder_result['ret'] + self.stat("Batch completed: %s (%s)" % ( + self.builder_batch['args'], self.builder_batch['note'])) + self.builder_batch = None + + # Finished a build job + elif self.builder_mode == 'build': + build = self.builds['building'] + self.stat("Test %u prepared" % (build.num,)) + self.builds['building'] = None + if self.builder_result['result'] == 'success': + self.builds['prepared'].append(self.builder_result['ret']) + else: + build.note = "Build setup failed - see log" + build.finished = time.time() + self.builds['failed'].append(build) + self.builder_result['result'] = 'uninitialied' + self.builder_result['ret'] = None + self.builder_mode = None + + # Should it run? + if not self.builder and len(self.pendingbatches): + self.builder_mode = 'batch' + self.builder_batch = self.pendingbatches.pop() + self.stat("Handling batch %s" % (self.builder_batch,)) + self.builder_batch['processed'] = time.time() + self.processedbatches.append(self.builder_batch) + self.builder_batch['note'] = "Processing - Looking up builds" + self.builder = multiprocessing.Process(target=self._process_batch, args=( + self.args, self.builder_batch['args'], self.builder_result, self.hook)) + self.builder.start() + elif not self.builder and self.builds['building']: + self.builder_mode = 'build' + self.stat("Starting build for %s :: %s" % ( + self.builds['building'].num, self.builds['building'].serialize())) + self.builder = multiprocessing.Process(target=self.prepare_build, args=( + self.builds['building'], self.builder_result)) + self.builder.start() + + @staticmethod + def prepare_build(build, result): + if build.build.prepare(): + result['result'] = 'success' else: - build.note = "Build setup failed - see log" - build.finished = time.time() - self.builds['failed'].append(build) - self.builder_result['result'] = 'uninitialied' - self.builder_result['ret'] = None - self.builder_mode = None - - # Should it run? - if not self.builder and len(self.pendingbatches): - self.builder_mode = 'batch' - self.builder_batch = self.pendingbatches.pop() - self.stat("Handling batch %s" % (self.builder_batch,)) - self.builder_batch['processed'] = time.time() - self.processedbatches.append(self.builder_batch) - self.builder_batch['note'] = "Processing - Looking up builds" - self.builder = multiprocessing.Process(target=self._process_batch, args=(self.args, self.builder_batch['args'], self.builder_result, self.hook)) - self.builder.start() - elif not self.builder and self.builds['building']: - self.builder_mode = 'build' - self.stat("Starting build for %s :: %s" % (self.builds['building'].num, self.builds['building'].serialize())) - self.builder = multiprocessing.Process(target=self.prepare_build, args=(self.builds['building'], self.builder_result)) - self.builder.start() - - @staticmethod - def prepare_build(build, result): - if build.build.prepare(): - result['result'] = 'success' - else: - result['result'] = 'failed' - - result['ret'] = build - - # Add builds to self.builds[target], giving them a uid. Redirect builds from - # pending -> skipped if they're already queued - def queue_builds(self, builds, target='pending', prepend=False): - skip = [] - ready = [] - for x in builds: - if not x.force and target == 'pending' and self.build_is_queued(x): - x.finished = time.time() - skip.append(x) - x.note = "A build with this revision is already in queue" - else: - ready.append(x) - x.uid = self.processed - self.processed += 1 - if len(skip): - self.builds['skipped'].extend(skip) - if (prepend): - self.builds[target] = ready + self.builds[target] - else: - self.builds[target].extend(ready) - return ready - - # - # Run loop - # - def run(self): - if not self.args.get('repo'): - raise Exception('--repo is required for resolving full commit IDs (even on non-compile builds)') - - statfile = self.args.get("status_file") - - if self.args.get('logdir'): - self.logfile = open(os.path.join(self.args.get('logdir'), 'tester.log'), 'a') - - self.stat("Starting at %s with args \"%s\"" % (time.ctime(), sys.argv)) - - self.reset_pool() - - batchmode = self.args.get('batch') - if batchmode: - if statfile and os.path.exists(statfile) and self.args.get('status_resume'): - sf = open(statfile, 'r') - ostat = json.load(sf) - sf.close() - # Try to recover builds in order they were going to be processed - recover_builds = ostat['running'] - recover_builds.extend(ostat['prepared']) - if ostat['building']: recover_builds.append(ostat['building']) - recover_builds.extend(ostat['pending']) - - if len(recover_builds): - # Create a dummy batch, process it on main thread, move it to completed. - # this all happens before the helper thread starts so there are no other - # batches to contend with - self.add_batch("< Tester Restarted : Resuming any interrupted builds >") - resumebatch = self.pendingbatches.pop() - self.processedbatches.append(resumebatch) - resumebatch['processed'] = time.time() - self.write_status() - self.queue_builds(map(lambda x: BatchBuild.deserialize(x, self.args), recover_builds)) - resumebatch['note'] = "Recovered %u builds (%u skipped)" % (len(self.builds['pending']), len(self.builds['skipped'])) - else: - self.add_batch(self.args) - - while True: - # Clean up finished builds - for build in self.builds['running']: - if not build.task.ready(): continue - - taskresult = build.task.get() if build.task.successful() else False - if taskresult is True: - self.stat("Test %u finished" % (build.num,)) - self.builds['completed'].append(build) + result['result'] = 'failed' + + result['ret'] = build + + # Add builds to self.builds[target], giving them a uid. Redirect builds from + # pending -> skipped if they're already queued + def queue_builds(self, builds, target='pending', prepend=False): + skip = [] + ready = [] + for x in builds: + if not x.force and target == 'pending' and self.build_is_queued(x): + x.finished = time.time() + skip.append(x) + x.note = "A build with this revision is already in queue" + else: + ready.append(x) + x.uid = self.processed + self.processed += 1 + if len(skip): + self.builds['skipped'].extend(skip) + if (prepend): + self.builds[target] = ready + self.builds[target] + else: + self.builds[target].extend(ready) + return ready + + # + # Run loop + # + def run(self): + if not self.args.get('repo'): + raise Exception( + '--repo is required for resolving full commit IDs (even on non-compile builds)') + + statfile = self.args.get("status_file") + + if self.args.get('logdir'): + self.logfile = open(os.path.join( + self.args.get('logdir'), 'tester.log'), 'a') + + self.stat("Starting at %s with args \"%s\"" % (time.ctime(), sys.argv)) + + self.reset_pool() + + batchmode = self.args.get('batch') + if batchmode: + if statfile and os.path.exists(statfile) and self.args.get('status_resume'): + sf = open(statfile, 'r') + ostat = json.load(sf) + sf.close() + # Try to recover builds in order they were going to be + # processed + recover_builds = ostat['running'] + recover_builds.extend(ostat['prepared']) + if ostat['building']: + recover_builds.append(ostat['building']) + recover_builds.extend(ostat['pending']) + + if len(recover_builds): + # Create a dummy batch, process it on main thread, move it to completed. + # this all happens before the helper thread starts so there are no other + # batches to contend with + self.add_batch( + "< Tester Restarted : Resuming any interrupted builds >") + resumebatch = self.pendingbatches.pop() + self.processedbatches.append(resumebatch) + resumebatch['processed'] = time.time() + self.write_status() + self.queue_builds( + map(lambda x: BatchBuild.deserialize(x, self.args), recover_builds)) + resumebatch['note'] = "Recovered %u builds (%u skipped)" % ( + len(self.builds['pending']), len(self.builds['skipped'])) else: - self.stat("!! Test %u failed :: %s" % (build.num, taskresult)) - build.note = "Failed: %s" % (taskresult,) - self.builds['failed'].append(build) - build.finished = time.time() - self.builds['running'].remove(build) - build.build.cleanup() - - # Check on builder - self.check_builder() - - # Read any pending jobs if we're in batchmode - while batchmode: - rcmd = None + self.add_batch(self.args) + + while True: + # Clean up finished builds + for build in self.builds['running']: + if not build.task.ready(): + continue + + taskresult = build.task.get() if build.task.successful() else False + if taskresult is True: + self.stat("Test %u finished" % (build.num,)) + self.builds['completed'].append(build) + else: + self.stat("!! Test %u failed :: %s" % + (build.num, taskresult)) + build.note = "Failed: %s" % (taskresult,) + self.builds['failed'].append(build) + build.finished = time.time() + self.builds['running'].remove(build) + build.build.cleanup() + + # Check on builder + self.check_builder() + + # Read any pending jobs if we're in batchmode + while batchmode: + rcmd = None + try: + rcmd = get_queued_job(batchmode) + except Exception, e: + note = "Invalid batch file" + self.stat(note) + self.processedbatches.append( + {'args': "", 'note': note}) + if rcmd: + self.add_batch(rcmd) + else: + break + + # Prepare pending builds, but not more than processes, as prepared builds + # takeup space (hundreds of queued builds would fill /tmp with gigabytes + # of things) + if len(self.builds['pending']) \ + and not self.builds['building'] \ + and len(self.builds['prepared']) < self.args['processes']: + build = self.builds['pending'][0] + self.builds['building'] = build + self.builds['pending'].remove(build) + build.num = self.buildindex + self.buildindex += 1 + + # Start builds if pool is not filled + while len(self.builds['prepared']) \ + and len(self.builds['running']) < self.args['processes']: + build = self.builds['prepared'][0] + self.builds['prepared'].remove(build) + build.started = time.time() + self.stat("Moving test %u to running" % (build.num,)) + build.task = self.pool.apply_async( + _pool_batchtest_build, [pickle.dumps(build), self.args]) + self.builds['running'].append(build) + + self.write_status() + + in_progress = sum( + len(self.builds['pending']), + len(self.builds['prepared']), + len(self.builds['running'])) + + if not self.builder and not self.builds['building'] and in_progress == 0: + # Out of things to do + if batchmode and self.buildindex > 0: + # In batchmode, reset the pool and restore buildindex to zero. + # Buildindex is used for things like VNC display IDs, so we don't want + # it to get too high. + self.reset_pool() + self.buildindex = 0 + elif not batchmode: + self.stat("All tasks complete, exiting") + break # Done + # Wait a little and repeat loop + time.sleep(1) + self.tick += 1 + if self.tick % 120 == 0: + # Remove items older than 1 day from these lists + self.builds['completed'] = filter(lambda x: ( + x.finished + 60 * 60 * 24) > time.time(), self.builds['completed']) + self.builds['failed'] = filter(lambda x: ( + x.finished + 60 * 60 * 24 * 3) > time.time(), self.builds['failed']) + self.builds['skipped'] = filter(lambda x: ( + x.finished + 60 * 60 * 24) > time.time(), self.builds['skipped']) + self.processedbatches = filter(lambda x: ( + x['processed'] + 60 * 60 * 24) > time.time(), self.processedbatches) + time.sleep(1) + + self.stat("No more tasks, exiting") + self.pool.close() + self.pool.join() + self.pool = None + + # Threaded call the builder is started on. Calls _process_batch_inner and + # handles return results + @staticmethod + def _process_batch(globalargs, batchargs, returnproxy, hook): try: - rcmd = get_queued_job(batchmode) + if hook: + mod = _get_hook(globalargs.get('hook')) + else: + mod = None + ret = BatchTest._process_batch_inner(globalargs, batchargs, mod) except Exception, e: - note = "Invalid batch file" - self.stat(note) - self.processedbatches.append({ 'args' : "", 'note': note }) - if rcmd: - self.add_batch(rcmd) + import traceback + traceback.print_exc() + ret = "An exception occured while processing batch -- %s: %s" % ( + type(e), e) + + if type(ret) == str: + returnproxy['result'] = 'error' else: - break - - # Prepare pending builds, but not more than processes, as prepared builds - # takeup space (hundreds of queued builds would fill /tmp with gigabytes - # of things) - if len(self.builds['pending']) \ - and not self.builds['building'] \ - and len(self.builds['prepared']) < self.args['processes']: - build = self.builds['pending'][0] - self.builds['building'] = build - self.builds['pending'].remove(build) - build.num = self.buildindex - self.buildindex += 1 - - # Start builds if pool is not filled - while len(self.builds['prepared']) and len(self.builds['running']) < self.args['processes']: - build = self.builds['prepared'][0] - self.builds['prepared'].remove(build) - build.started = time.time() - self.stat("Moving test %u to running" % (build.num,)) - build.task = self.pool.apply_async(_pool_batchtest_build, [pickle.dumps(build), self.args]) - self.builds['running'].append(build) - - self.write_status() - - in_progress = len(self.builds['pending']) + len(self.builds['prepared']) + len(self.builds['running']) - if not self.builder and not self.builds['building'] and in_progress == 0: - # Out of things to do - if batchmode and self.buildindex > 0: - # In batchmode, reset the pool and restore buildindex to zero. - # Buildindex is used for things like VNC display IDs, so we don't want - # it to get too high. - self.reset_pool() - self.buildindex = 0 - elif not batchmode: - self.stat("All tasks complete, exiting") - break # Done - # Wait a little and repeat loop - time.sleep(1) - self.tick += 1 - if self.tick % 120 == 0: - # Remove items older than 1 day from these lists - self.builds['completed'] = filter(lambda x: (x.finished + 60 * 60 * 24) > time.time(), self.builds['completed']) - self.builds['failed'] = filter(lambda x: (x.finished + 60 * 60 * 24 * 3) > time.time(), self.builds['failed']) - self.builds['skipped'] = filter(lambda x: (x.finished + 60 * 60 * 24) > time.time(), self.builds['skipped']) - self.processedbatches = filter(lambda x: (x['processed'] + 60 * 60 * 24) > time.time(), self.processedbatches) - time.sleep(1) - - self.stat("No more tasks, exiting") - self.pool.close() - self.pool.join() - self.pool = None - - # Threaded call the builder is started on. Calls _process_batch_inner and - # handles return results - @staticmethod - def _process_batch(globalargs, batchargs, returnproxy, hook): - try: - if hook: - mod = _get_hook(globalargs.get('hook')) - else: + returnproxy['result'] = 'success' + returnproxy['ret'] = ret + + # + # Inner call for _process_batch + @staticmethod + def _process_batch_inner(globalargs, batchargs, hook): + if not batchargs['firstbuild']: + raise Exception("--firstbuild is required") + + mode = batchargs['mode'] + dorange = 'lastbuild' in batchargs and batchargs['lastbuild'] + builds = [] + # Queue builds + if mode == 'nightly': + startdate = parse_nightly_time(batchargs['firstbuild']) + if dorange: + enddate = parse_nightly_time(batchargs['lastbuild']) + dates = range(startdate.toordinal(), enddate.toordinal() + 1) + else: + dates = [startdate.toordinal()] + for x in dates: + builds.append(BuildGetter.NightlyBuild( + datetime.date.fromordinal(x))) + elif mode == 'tinderbox': + startdate = float(batchargs['firstbuild']) + if dorange: + enddate = float(batchargs['lastbuild']) + tinderbuilds = BuildGetter.list_tinderbox_builds( + startdate, enddate) + for x in tinderbuilds: + builds.append(BuildGetter.TinderboxBuild(x)) + else: + builds.append(BuildGetter.TinderboxBuild(startdate)) + elif mode == 'ftp': + path = batchargs['firstbuild'] + builds.append(BuildGetter.FTPBuild(path)) + elif mode == 'try': + path = batchargs['firstbuild'] + builds.append(BuildGetter.TryBuild(path)) + elif mode == 'compile': + # See https://github.com/mozilla/areweslimyet/issues/47 + raise Exception("Build type 'compile' is not currently supported") + else: + raise Exception("Unknown mode %s" % mode) + + readybuilds = [] + skippedbuilds = [] + force = batchargs.get('force') if batchargs.get( + 'force') else globalargs.get('force') + for build in builds: + rev = build.get_revision() + # HACKITY HACK HACK HACK + build._scraper = None + + build = BatchBuild(build, rev) + build.force = force + build.series = batchargs.get('series') + if not build.build.get_valid(): + # Can happen with FTP builds we failed to lookup on ftp.m.o, or any + # builds that arn't found in pushlog + build.note = "Build is not found or missing from pushlog" + elif hook and not hook.should_test(build, globalargs): + if not build.note: + build.note = "Build skipped by tester" + else: + readybuilds.append(build) + continue + + build.finished = time.time() + skippedbuilds.append(build) + + return [readybuilds, skippedbuilds] + + # + # Build testing pool + # + @staticmethod + def test_build(build, globalargs): mod = None - ret = BatchTest._process_batch_inner(globalargs, batchargs, mod) - except Exception, e: - import traceback - traceback.print_exc() - ret = "An exception occured while processing batch -- %s: %s" % (type(e), e) - - if type(ret) == str: - returnproxy['result'] = 'error' - else: - returnproxy['result'] = 'success' - returnproxy['ret'] = ret - - # - # Inner call for _process_batch - @staticmethod - def _process_batch_inner(globalargs, batchargs, hook): - if not batchargs['firstbuild']: - raise Exception("--firstbuild is required") - - mode = batchargs['mode'] - dorange = 'lastbuild' in batchargs and batchargs['lastbuild'] - builds = [] - # Queue builds - if mode == 'nightly': - startdate = parse_nightly_time(batchargs['firstbuild']) - if dorange: - enddate = parse_nightly_time(batchargs['lastbuild']) - dates = range(startdate.toordinal(), enddate.toordinal() + 1) - else: - dates = [ startdate.toordinal() ] - for x in dates: - builds.append(BuildGetter.NightlyBuild(datetime.date.fromordinal(x))) - elif mode == 'tinderbox': - startdate = float(batchargs['firstbuild']) - if dorange: - enddate = float(batchargs['lastbuild']) - tinderbuilds = BuildGetter.list_tinderbox_builds(startdate, enddate) - for x in tinderbuilds: - builds.append(BuildGetter.TinderboxBuild(x)) - else: - builds.append(BuildGetter.TinderboxBuild(startdate)) - elif mode == 'ftp': - path = batchargs['firstbuild'] - builds.append(BuildGetter.FTPBuild(path)) - elif mode == 'try': - path = batchargs['firstbuild'] - builds.append(BuildGetter.TryBuild(path)) - elif mode == 'compile': - # See https://github.com/mozilla/areweslimyet/issues/47 - raise Exception("Build type 'compile' is not currently supported") - else: - raise Exception("Unknown mode %s" % mode) - - readybuilds = [] - skippedbuilds = [] - force = batchargs.get('force') if batchargs.get('force') else globalargs.get('force') - for build in builds: - rev = build.get_revision() - #HACKITY HACK HACK HACK - build._scraper = None - - build = BatchBuild(build, rev) - build.force = force - build.series = batchargs.get('series') - if not build.build.get_valid(): - # Can happen with FTP builds we failed to lookup on ftp.m.o, or any - # builds that arn't found in pushlog - build.note = "Build is not found or missing from pushlog" - elif hook and not hook.should_test(build, globalargs): - if not build.note: - build.note = "Build skipped by tester"; - else: - readybuilds.append(build) - continue - - build.finished = time.time() - skippedbuilds.append(build) - - return [ readybuilds, skippedbuilds ] - - # - # Build testing pool - # - @staticmethod - def test_build(build, globalargs): - mod = None - ret = True - if not globalargs.get('hook'): - return "Cannot test builds without a --hook providing run_tests(Build)" + ret = True + if not globalargs.get('hook'): + return "Cannot test builds without a --hook providing run_tests(Build)" + + try: + mod = _get_hook(globalargs.get('hook')) + # TODO BenchTester should actually dynamically pick a free port, rather than + # taking it as a parameter. + s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + try: + s.bind(('', 24242 + build.num)) + except Exception, e: + raise Exception( + "Test error: jsbridge port %u unavailable" % (24242 + build.num,)) + s.close() + + mod.run_tests(build, globalargs) + except (Exception, KeyboardInterrupt) as e: + err = "%s :: %s" % (type(e), e) + ret = err + return ret - try: - mod = _get_hook(globalargs.get('hook')) - # TODO BenchTester should actually dynamically pick a free port, rather than - # taking it as a parameter. - s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - try: - s.bind(('', 24242 + build.num)) - except Exception, e: - raise Exception("Test error: jsbridge port %u unavailable" % (24242 + build.num,)) - s.close() - - mod.run_tests(build, globalargs) - except (Exception, KeyboardInterrupt) as e: - err = "%s :: %s" % (type(e), e) - ret = err - return ret class BatchTestCLI(BatchTest): - def __init__(self, args=sys.argv[1:]): - self.parser = argparse.ArgumentParser(description='Run tests against one or more builds in parallel') - self.parser.add_argument('--mode', help='nightly or tinderbox or compile') - self.parser.add_argument('--batch', help='Batch mode -- given a folder name, treat each file within as containing a set of arguments to this script, deleting each file as it is processed.') - self.parser.add_argument('--firstbuild', help='For nightly, the date (YYYY-MM-DD) of the first build to test. For tinderbox, the timestamp to start testing builds at. For build, the first revision to build.') - self.parser.add_argument('--lastbuild', help='[optional] For nightly builds, the last date to test. For tinderbox, the timestamp to stop testing builds at. For build, the last revision to build If omitted, first_build is the only build tested.') - self.parser.add_argument('-p', '--processes', help='Number of tests to run in parallel.', default=multiprocessing.cpu_count(), type=int) - self.parser.add_argument('--hook', help='Name of a python file to import for each test. The test will call should_test(BatchBuild), run_tests(BatchBuild), and cli_hook(argparser) in this file.') - self.parser.add_argument('--logdir', '-l', help="Directory to log progress to. Doesn't make sense for batched processes. Creates 'tester.log', 'buildname.test.log' and 'buildname.build.log' (for compile builds).") - self.parser.add_argument('--repo', help="For build mode, the checked out FF repo to use") - self.parser.add_argument('--mozconfig', help="For build mode, the mozconfig to use") - self.parser.add_argument('--objdir', help="For build mode, the objdir provided mozconfig will create") - self.parser.add_argument('--no-pull', action='store_true', help="For build mode, don't run a hg pull in the repo before messing with a commit") - self.parser.add_argument('--status-file', help="A file to keep a json-dump of the currently running job status in. This file is mv'd into place to avoid read/write issues") - self.parser.add_argument('--status-resume', action='store_true', help="Resume any jobs still present in the status file. Useful for interrupted sessions") - self.parser.add_argument('--prioritize', action='store_true', help="For batch'd builds, insert at the beginning of the pending queue rather than the end") - self.parser.add_argument('--force', action='store_true', help="Test/queue given builds even if they have already been tested or are already in queue") - temp = vars(self.parser.parse_known_args(args)[0]) - if temp.get('hook'): - mod = _get_hook(temp.get('hook')) - mod.cli_hook(self.parser) - - args = vars(self.parser.parse_args(args)) - super(BatchTestCLI, self).__init__(args) + + def __init__(self, args=sys.argv[1:]): + self.parser = argparse.ArgumentParser( + description='Run tests against one or more builds in parallel') + self.parser.add_argument('--mode', + help='nightly or tinderbox or compile') + self.parser.add_argument('--batch', + help='Batch mode -- given a folder name, treat each file within ' + 'as containing a set of arguments to this script, deleting ' + 'each file as it is processed.') + self.parser.add_argument('--firstbuild', + help='For nightly, the date (YYYY-MM-DD) of the first build to ' + 'test. For tinderbox, the timestamp to start testing builds ' + 'at. For build, the first revision to build.') + self.parser.add_argument('--lastbuild', + help='[optional] For nightly builds, the last date to test. For ' + 'tinderbox, the timestamp to stop testing builds at. For ' + 'build, the last revision to build If omitted, first_build ' + 'is the only build tested.') + self.parser.add_argument('-p', '--processes', + help='Number of tests to run in parallel.', + default=multiprocessing.cpu_count(), type=int) + self.parser.add_argument('--hook', + help='Name of a python file to import for each test. The test ' + 'will call should_test(BatchBuild), run_tests(BatchBuild), ' + 'and cli_hook(argparser) in this file.') + self.parser.add_argument('--logdir', '-l', + help="Directory to log progress to. Doesn't make sense for " + "batched processes. Creates 'tester.log', " + "'buildname.test.log' and 'buildname.build.log' (for " + "compile builds).") + self.parser.add_argument('--repo', + help="For build mode, the checked out FF repo to use") + self.parser.add_argument('--mozconfig', + help="For build mode, the mozconfig to use") + self.parser.add_argument('--objdir', + help="For build mode, the objdir provided mozconfig will create") + self.parser.add_argument('--no-pull', action='store_true', + help="For build mode, don't run a hg pull in the repo before " + "messing with a commit") + self.parser.add_argument('--status-file', + help="A file to keep a json-dump of the currently running job " + "status in. This file is mv'd into place to avoid " + "read/write issues") + self.parser.add_argument('--status-resume', action='store_true', + help="Resume any jobs still present in the status file. Useful " + "for interrupted sessions") + self.parser.add_argument('--prioritize', action='store_true', + help="For batch'd builds, insert at the beginning of the pending " + "queue rather than the end") + self.parser.add_argument('--force', action='store_true', + help="Test/queue given builds even if they have already been " + "tested or are already in queue") + temp = vars(self.parser.parse_known_args(args)[0]) + if temp.get('hook'): + mod = _get_hook(temp.get('hook')) + mod.cli_hook(self.parser) + + args = vars(self.parser.parse_args(args)) + super(BatchTestCLI, self).__init__(args) # # Main # if __name__ == '__main__': - cli = BatchTestCLI() - cli.run() + cli = BatchTestCLI() + cli.run() diff --git a/benchtester/BenchTester.py b/benchtester/BenchTester.py index d21e9d7..c433d46 100644 --- a/benchtester/BenchTester.py +++ b/benchtester/BenchTester.py @@ -8,11 +8,13 @@ # You can obtain one at http://mozilla.org/MPL/2.0/. import argparse -import mercurial, mercurial.ui, mercurial.hg, mercurial.commands +import mercurial +import mercurial.ui +import mercurial.hg +import mercurial.commands import os import re import sqlite3 -import subprocess import sys import time @@ -22,47 +24,48 @@ gVersion = 1 gTableSchemas = [ - # benchtester_version - the database version, can be used for upgrade scripts - '''CREATE TABLE IF NOT EXISTS + # benchtester_version - the database version, can be used for upgrade + # scripts + '''CREATE TABLE IF NOT EXISTS "benchtester_version" ("version" INTEGER NOT NULL UNIQUE)''', - # Builds - info on builds we have tests for - '''CREATE TABLE IF NOT EXISTS + # Builds - info on builds we have tests for + '''CREATE TABLE IF NOT EXISTS "benchtester_builds" ("id" INTEGER PRIMARY KEY NOT NULL, "name" VARCHAR NOT NULL UNIQUE, "time" DATETIME NOT NULL, "repo_id" INTEGER NOT NULL)''', - # Tests - tests that have been run and against which build - '''CREATE TABLE IF NOT EXISTS + # Tests - tests that have been run and against which build + '''CREATE TABLE IF NOT EXISTS "benchtester_tests" ("id" INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, "name" VARCHAR NOT NULL, "time" DATETIME NOT NULL, "build_id" INTEGER NOT NULL, "successful" INTEGER NOT NULL)''', - # Datapoints - names of datapoints - '''CREATE TABLE IF NOT EXISTS + # Datapoints - names of datapoints + '''CREATE TABLE IF NOT EXISTS "benchtester_datapoints" ("id" INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, "name" VARCHAR NOT NULL UNIQUE)''', - # Procs - names of processes - '''CREATE TABLE IF NOT EXISTS + # Procs - names of processes + '''CREATE TABLE IF NOT EXISTS "benchtester_procs" ("id" INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, "name" VARCHAR NOT NULL UNIQUE)''', - # Repos - names of source repositories - '''CREATE TABLE IF NOT EXISTS + # Repos - names of source repositories + '''CREATE TABLE IF NOT EXISTS "benchtester_repos" ("id" INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, "name" VARCHAR NOT NULL UNIQUE)''', - # Checkpoints - names of checkpoints - '''CREATE TABLE IF NOT EXISTS + # Checkpoints - names of checkpoints + '''CREATE TABLE IF NOT EXISTS "benchtester_checkpoints" ("id" INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, "name" VARCHAR NOT NULL UNIQUE)''', - # Data - datapoints from tests - '''CREATE TABLE IF NOT EXISTS + # Data - datapoints from tests + '''CREATE TABLE IF NOT EXISTS "benchtester_data" ("test_id" INTEGER NOT NULL, "datapoint_id" INTEGER NOT NULL, "checkpoint_id" INTEGER NOT NULL, @@ -72,10 +75,12 @@ "units" INTEGER NOT NULL, "kind" INTEGER NOT NULL)''', - # Some default indexes - '''CREATE INDEX IF NOT EXISTS test_lookup ON benchtester_tests ( name, build_id DESC )''', - '''CREATE INDEX IF NOT EXISTS data_for_test ON benchtester_data ( test_id DESC, datapoint_id )''' -]; + # Some default indexes + '''CREATE INDEX IF NOT EXISTS + test_lookup ON benchtester_tests ( name, build_id DESC )''', + '''CREATE INDEX IF NOT EXISTS + data_for_test ON benchtester_data ( test_id DESC, datapoint_id )''' +] # TODO: # - doxygen or at least some sort of documentation @@ -89,413 +94,460 @@ # - Currently used to graph the data on areweslimyet.com. The areweslimyet repo # is a good example of using this, given the lack of docs + class BenchTest(): - def __init__(self, parent): - self.tester = parent - self.name = "Unconfigured Test Module" - def run_test(self, testname, testvars={}): - return self.error("run_test() not defined") + def __init__(self, parent): + self.tester = parent + self.name = "Unconfigured Test Module" + + def run_test(self, testname, testvars={}): + return self.error("run_test() not defined") - def setup(self): - return True + def setup(self): + return True - def error(self, msg): - return self.tester.error("[%s] %s" % (self.name, msg)) + def error(self, msg): + return self.tester.error("[%s] %s" % (self.name, msg)) - def warn(self, msg): - return self.tester.warn("[%s] %s" % (self.name, msg)) + def warn(self, msg): + return self.tester.warn("[%s] %s" % (self.name, msg)) - def info(self, msg): - return self.tester.info("[%s] %s" % (self.name, msg)) + def info(self, msg): + return self.tester.info("[%s] %s" % (self.name, msg)) # The main class for running tests + + class BenchTester(): - def info(self, msg): - self.logger.info(msg) - - def error(self, msg): - self.errors.append(msg) - self.logger.error(msg) - return False - - def warn(self, msg): - self.warnings.append(msg) - self.logger.warning(msg) - - def run_test(self, testname, testtype, testvars={}): - if not self.ready: - return self.error("run_test() called before setup") - - # make sure a record is created, even if no testdata is produced - if not self._open_db(): - return self.error("Failed to open sqlite database") - - if self.modules.has_key(testtype): - self.info("Passing test '%s' to module '%s'" % (testname, testtype)) - return self.modules[testtype].run_test(testname, testvars) - else: - return self.error("Test '%s' is of unknown type '%s'" % (testname, testtype)) - - # Modules are named 'SomeModule.py' and have a class named 'SomeModule' based on BenchTest - def load_module(self, modname): - if self.ready: - return self.error("Modules must be loaded before setup()") - - if self.modules.has_key(modname): return True - - self.info("Loading module '%s'" % (modname)) - try: - module = __import__(modname) - self.modules[modname] = vars(module)[modname](self) - except Exception, e: - return self.error("Failed to load module '%s', Exception '%s': %s" % (modname, type(e), e)) - - return True - - @staticmethod - def map_process_names(process_names): - # Normalize the process names. - # Given: [ "Main", "Web Content (123)", "Web Content (345)", "Web Content (678)" ] - # Mapping: [ "Main" => "Main", - # "Web Content (123)" => "Web Content", - # "Web Content (345)" => "Web Content 2", - # "Web Content (678)" => "Web Content 3" - # ] - proc_name_counts = {} - proc_name_mapping = {} - - for full_process_name in process_names: - # Drop the pid portion of process name - process_re = r'(.*)\s+\(.+\)' - m = re.match(process_re, full_process_name) - if m: - proc_name = m.group(1) - if proc_name in proc_name_counts: - proc_name_counts[proc_name] += 1 - proc_name_mapping[full_process_name] = "%s %d" % (proc_name, proc_name_counts[proc_name]) - else: - # Leave the first entry w/o a number - proc_name_counts[proc_name] = 1 - proc_name_mapping[full_process_name] = proc_name - else: - proc_name_mapping[full_process_name] = full_process_name - - return proc_name_mapping - - def insert_results(self, test_id, results): - # - results is an array of iterations - # - iterations is an array of checkpoints - # - checkpoint is a dict with: label, reports - # - reports is a dict of processes - cur = self.sqlite.cursor() - - for x, iteration in enumerate(results): - iternum = x + 1 - for checkpoint in iteration: - label = checkpoint['label'] - - # insert checkpoint name, get checkpoint_id - cur.execute("SELECT id FROM benchtester_checkpoints WHERE name = ?", (label, )) - row = cur.fetchone() - checkpoint_id = row[0] if row else None - if checkpoint_id is None: - cur.execute("INSERT INTO benchtester_checkpoints(name) VALUES (?)", (label, )) - checkpoint_id = cur.lastrowid - - proc_name_mapping = self.map_process_names(checkpoint['reports']) - for process_name, reports in checkpoint['reports'].iteritems(): - # reports is a dictionary of datapoint_name: { val, unit, kind } - process_name = proc_name_mapping[process_name] - - # insert process name, get process_id - cur.execute("SELECT id FROM benchtester_procs WHERE name = ?", (process_name, )) - row = cur.fetchone() - process_id = row[0] if row else None - if process_id is None: - cur.execute("INSERT INTO benchtester_procs(name) VALUES (?)", (process_name, )) - process_id = cur.lastrowid - - # insert datapoint names - insertbegin = time.time() - self.info("Inserting %u datapoints into DB" % len(reports)) - cur.executemany("INSERT OR IGNORE INTO `benchtester_datapoints`(name) " - "VALUES (?)", - ( [ k ] for k in reports.iterkeys() )) - self.sqlite.commit() - self.info("Filled datapoint names in %.02fs" % (time.time() - insertbegin)) - - # insert datapoint values - insertbegin = time.time() - cur.executemany("INSERT INTO `benchtester_data` " - "SELECT ?, p.id, ?, ?, ?, ?, ?, ? FROM `benchtester_datapoints` p " - "WHERE p.name = ?", - ( [ test_id, - checkpoint_id, - process_id, - iternum, - dp['val'], - dp['unit'], - dp['kind'], - name ] - for name, dp in reports.iteritems() if dp )) - self.sqlite.commit() - self.info("Filled datapoint values in %.02fs" % (time.time() - insertbegin)) - - # datapoints a list of the format [ [ "key", value, "meta"], ... ]. - # Duplicate keys are allowed. Value is numeric and required, meta is an - # optional string (see db format) - def add_test_results(self, testname, datapoints, succeeded=True): - # Ensure DB is open - if not self._open_db(): - return self.error("Failed to open sqlite database") - - if not testname: - return self.error("Invalid use of addDataPoint()") - - timestamp = time.time() - - #for datapoint, val in datapoints.iteritems(): - # self.info("Datapoint: Test '%s', Datapoint '%s', Value '%s'" % (testname, datapoint, val)) - if self.sqlite: - try: - cur = self.sqlite.cursor() - cur.execute("INSERT INTO " - " benchtester_tests(name, time, build_id, successful) " - "VALUES (?, ?, ?, ?)", - (testname, int(timestamp), self.build_id, succeeded)) - cur.execute("SELECT last_insert_rowid()") - - if datapoints: - testid = cur.fetchone()[0] - self.insert_results(testid, datapoints) - except Exception, e: - self.error("Failed to insert data into sqlite, got '%s': %s" % (type(e), e)) - import traceback - traceback.print_exc() - self.sqlite.rollback() - return False - return True - - def __init__(self, logfile=None, out=sys.stdout): - self.starttime = time.clock() - self.ready = False - self.args = {} - self.argparser = argparse.ArgumentParser(description='Run automated benchmark suite, optionally adding datapoints to a sqlite database') - self.arglist = {} - self.out = out - self.modules = {} - self.logfile = None - self.buildtime = None - self.buildname = None - self.sqlite = False - self.errors = [] - self.warnings = [] - - # Default to outputing 'mach' style to stdout. - log_args = { 'log_mach': ['-'] } - if logfile: - # If a logfile is requested we also output in a raw structured log - # format to the requested file. - log_args.update({ 'log_raw': [ logfile ] }) - - self.logger = commandline.setup_logging("AwsyTest", log_args) - - # These can be passed to setup() like so: - # mytester.setup({'binary': 'blah', 'buildname': 'blee'}) - # OR you can call mytester.parseArgs() on a command-line formatted arg list (sys.argv) to extract - # them as needed. - self.add_argument('-b', '--binary', help='The binary (either in the current PATH or a full path) to test') - self.add_argument('--buildname', help='The name of this firefox build. If omitted, attempts to use the \ - commit id from the mercurial respository the binary resides \ - in') - self.add_argument('--buildtime', help='The unix timestamp to assign to this build \ - build. If omitted, attempts to use the commit timestamp \ - from the mercurial repository the binary resides in') - self.add_argument('--test-module', '-m', help='Load the specified test module (from libs). You must load at least one module to have tests', - action='append') - self.add_argument('-l', '--logfile', help='Log to given file') - self.add_argument('-s', '--sqlitedb', help='Merge datapoint into specified sqlite database') - - self.info("BenchTester instantiated") - - def add_argument(self, *args, **kwargs): - act = self.argparser.add_argument(*args, **kwargs) - if kwargs.has_key('default'): - self.args[act.dest] = kwargs['default'] - - # Parses commandline arguments, *AND* loads the modules specified on them, - # such that their arguments can be known/parsed. Does not prevent loading of - # more modules later on. - # - returns a args dict suitable for passing to setup(). - # - If handle_exceptions is false, will let argparser failures fall through. - # Otherwise, prints an error. - # - The "test_module" argument is returned, but not used by setup, and is - # useful for seeing what modules the commandline just caused to load - def parse_args(self, rawargs=sys.argv[1:]): - self.info("Parsing arguments...") - try: - args = vars(self.argparser.parse_known_args(rawargs)[0]) - # Modules can add more arguments, so load the ones specified - # and re-parse - if args['test_module']: - for m in args['test_module']: - self.load_module(m) - args = vars(self.argparser.parse_args(rawargs)) - return args - except SystemExit, e: + def info(self, msg): + self.logger.info(msg) + + def error(self, msg): + self.errors.append(msg) + self.logger.error(msg) return False - def __del__(self): - # In case we exception out mid transaction or something - if (hasattr(self, 'sqlite') and self.sqlite): - self.sqlite.rollback() - - def _open_db(self): - if not self.args['sqlitedb'] or self.sqlite: return True - - self.info("Setting up SQLite") - if not self.buildname or not self.buildtime: - self.error("Cannot use db without a buildname and buildtime set") - self.sqlitedb = self.args['sqlitedb'] = None - return False - try: - db_exists = os.path.exists(self.args['sqlitedb']) - - sql_path = os.path.abspath(self.args['sqlitedb']) - self.sqlite = sqlite3.connect(sql_path, timeout=900) - cur = self.sqlite.cursor() - - if db_exists: - # make sure the version matches - cur.execute("SELECT `version` FROM `benchtester_version` WHERE `version` = ?", [ gVersion ]) - row = cur.fetchone() - version = row[0] if row else None - if version != gVersion: - self.error("Incompatible versions: %s is version %s, current version is %s" % (self.args['sqlitedb'], version, gVersion)) - self.sqlitedb = self.args['sqlitedb'] = None - return False - - for schema in gTableSchemas: - cur.execute(schema) - - if not db_exists: - cur.execute("INSERT INTO `benchtester_version` (`version`) VALUES (?)", [ gVersion ]) - - # Create/update the repo - cur.execute("SELECT `id` FROM `benchtester_repos` WHERE `name` = ?", [ self.repo ]) - row = cur.fetchone() - if row: - repo_id = int(row[0]) - else: - cur.execute("INSERT INTO benchtester_repos(name) VALUES (?)", (self.repo, )) - repo_id = cur.lastrowid - - # Create/update build ID - cur.execute("SELECT `time`, `id` FROM `benchtester_builds` WHERE `name` = ?", [ self.buildname ]) - buildrow = cur.fetchone() - - if buildrow and buildrow[0] != int(self.buildtime): - self.warn("Build '%s' already exists in the database, but with a differing timestamp. Overwriting old record (%s -> %s)" % (self.buildname, buildrow[0], self.buildtime)) - cur.execute("UPDATE `benchtester_builds` SET `time` = ? WHERE `id` = ?", [ int(self.buildtime), buildrow[1] ]) - self.build_id = buildrow[1] - elif not buildrow: - self.info("Creating new build record") - cur.execute("INSERT INTO `benchtester_builds` (`name`, `time`, `repo_id`) " - "VALUES (?, ?, ?)", - (self.buildname, int(self.buildtime), repo_id)) - cur.execute("SELECT last_insert_rowid()") - self.build_id = cur.fetchone()[0] - else: - self.build_id = buildrow[1] - self.info("Found build record") - self.sqlite.commit() - except Exception, e: - self.error("Failed to setup sqliteDB '%s': %s - %s\n" % (self.args['sqlitedb'], type(e), e)) - self.sqlitedb = self.args['sqlitedb'] = None - return False - - return True - - def setup(self, args): - self.info("Performing setup") - self.hg_ui = mercurial.ui.ui() - - # args will already contain defaults from add_argument calls - self.args.update(args) - - # Check that binary is set - if not self.args['binary']: - return self.error("--binary is required, see --help") - try: - self.binary = os.path.abspath(self.args['binary']) - except: - self.binary = False - if not self.binary or not os.path.exists(self.binary): - return self.error("Unable to access binary '%s' (abs: '%s')\n" % (self.args['binary'], self.binary if self.binary else "Cannot resolve")) - - # Set commit name/timestamp - if (self.args['buildname']): - self.buildname = self.args['buildname'].strip() - if (self.args['buildtime']): - self.buildtime = str(self.args['buildtime']).strip() - - if 'repo' in self.args and self.args['repo']: - self.repo = self.args['repo'] - self.info('Using provided repo: %s' % self.repo) - else: - self.repo = 'mozilla-inbound' - self.info('Using default repo: mozilla-inbound') - - # Try to autodetect commitname/time if given a binary in a repo - if not self.buildname or not self.buildtime: - try: - hg_repo = mercurial.hg.repository(self.hg_ui, os.path.dirname(self.binary)) - except: - hg_repo = None - if hg_repo: - try: - self.info("Binary is in a hg repo, attempting to detect build info") - self.hg_ui.pushbuffer() - mercurial.commands.tip(self.hg_ui, hg_repo, template="{node} {date}") - tipinfo = self.hg_ui.popbuffer().split() - hg_changeset = tipinfo[0] - # Date is a float (truncate to int) of format 12345.0[+/-]3600 where 3600 is timezone info - hg_date = tipinfo[1].split('.')[0] - if not self.buildname: - self.buildname = hg_changeset - self.info("No build name given, using %s from repo binary is in" % self.buildname) - if not self.buildtime: - self.buildtime = hg_date - self.info("No build time given, using %s from repo binary is in" % self.buildtime) - except Exception as e: - self.error("Found a Hg repo, but failed to get changeset/timestamp. \ - You may need to provide these manually with --buildname, --buildtime\ - \nError was: %s" % (e)); + def warn(self, msg): + self.warnings.append(msg) + self.logger.warning(msg) - # Sanity checks - if (self.sqlite): - if (not self.buildname or not len(self.buildname)): - self.error("Must provide a name for this build via --buildname in order to log to sqlite") - return False + def run_test(self, testname, testtype, testvars={}): + if not self.ready: + return self.error("run_test() called before setup") - try: - inttime = int(self.buildtime, 10) - except: - inttime = None - if (not inttime or str(inttime) != self.buildtime or inttime < 1): - self.error("--buildtime must be set to a unix timestamp in order to log to sqlite") - return False + # make sure a record is created, even if no testdata is produced + if not self._open_db(): + return self.error("Failed to open sqlite database") - self.failed_modules = {} - for m in self.modules: - if not self.modules[m].setup(): - self.error("Failed to setup module %s!" % m) - self.failed_modules[m] = self.modules[m] - for m in self.failed_modules: - del self.modules[m] - - self.ready = True - self.info("Setup successful") - return True + if testtype in self.modules: + self.info("Passing test '%s' to module '%s'" % + (testname, testtype)) + return self.modules[testtype].run_test(testname, testvars) + else: + return self.error("Test '%s' is of unknown type '%s'" % (testname, testtype)) + + # Modules are named 'SomeModule.py' and have a class named 'SomeModule' + # based on BenchTest + def load_module(self, modname): + if self.ready: + return self.error("Modules must be loaded before setup()") + + if modname in self.modules: + return True + + self.info("Loading module '%s'" % (modname)) + try: + module = __import__(modname) + self.modules[modname] = vars(module)[modname](self) + except Exception, e: + return self.error("Failed to load module '%s', Exception '%s': %s" % + (modname, type(e), e)) + + return True + + @staticmethod + def map_process_names(process_names): + # Normalize the process names. + # Given: [ "Main", "Web Content (123)", "Web Content (345)", "Web Content (678)" ] + # Mapping: [ "Main" => "Main", + # "Web Content (123)" => "Web Content", + # "Web Content (345)" => "Web Content 2", + # "Web Content (678)" => "Web Content 3" + # ] + proc_name_counts = {} + proc_name_mapping = {} + + for full_process_name in process_names: + # Drop the pid portion of process name + process_re = r'(.*)\s+\(.+\)' + m = re.match(process_re, full_process_name) + if m: + proc_name = m.group(1) + if proc_name in proc_name_counts: + proc_name_counts[proc_name] += 1 + proc_name_mapping[full_process_name] = "%s %d" % ( + proc_name, proc_name_counts[proc_name]) + else: + # Leave the first entry w/o a number + proc_name_counts[proc_name] = 1 + proc_name_mapping[full_process_name] = proc_name + else: + proc_name_mapping[full_process_name] = full_process_name + + return proc_name_mapping + + def insert_results(self, test_id, results): + # - results is an array of iterations + # - iterations is an array of checkpoints + # - checkpoint is a dict with: label, reports + # - reports is a dict of processes + cur = self.sqlite.cursor() + + for x, iteration in enumerate(results): + iternum = x + 1 + for checkpoint in iteration: + label = checkpoint['label'] + + # insert checkpoint name, get checkpoint_id + cur.execute( + "SELECT id FROM benchtester_checkpoints WHERE name = ?", (label, )) + row = cur.fetchone() + checkpoint_id = row[0] if row else None + if checkpoint_id is None: + cur.execute( + "INSERT INTO benchtester_checkpoints(name) VALUES (?)", (label, )) + checkpoint_id = cur.lastrowid + + proc_name_mapping = self.map_process_names( + checkpoint['reports']) + for process_name, reports in checkpoint['reports'].iteritems(): + # reports is a dictionary of datapoint_name: { val, unit, + # kind } + process_name = proc_name_mapping[process_name] + + # insert process name, get process_id + cur.execute( + "SELECT id FROM benchtester_procs WHERE name = ?", (process_name, )) + row = cur.fetchone() + process_id = row[0] if row else None + if process_id is None: + cur.execute( + "INSERT INTO benchtester_procs(name) VALUES (?)", (process_name, )) + process_id = cur.lastrowid + + # insert datapoint names + insertbegin = time.time() + self.info("Inserting %u datapoints into DB" % len(reports)) + cur.executemany("INSERT OR IGNORE INTO `benchtester_datapoints`(name) " + "VALUES (?)", + ([k] for k in reports.iterkeys())) + self.sqlite.commit() + self.info("Filled datapoint names in %.02fs" % + (time.time() - insertbegin)) + + # insert datapoint values + insertbegin = time.time() + cur.executemany("INSERT INTO `benchtester_data` " + "SELECT ?, p.id, ?, ?, ?, ?, ?, ? " + "FROM `benchtester_datapoints` p " + "WHERE p.name = ?", + ([test_id, + checkpoint_id, + process_id, + iternum, + dp['val'], + dp['unit'], + dp['kind'], + name] + for name, dp in reports.iteritems() if dp)) + self.sqlite.commit() + self.info("Filled datapoint values in %.02fs" % + (time.time() - insertbegin)) + + # datapoints a list of the format [ [ "key", value, "meta"], ... ]. + # Duplicate keys are allowed. Value is numeric and required, meta is an + # optional string (see db format) + def add_test_results(self, testname, datapoints, succeeded=True): + # Ensure DB is open + if not self._open_db(): + return self.error("Failed to open sqlite database") + + if not testname: + return self.error("Invalid use of addDataPoint()") + + timestamp = time.time() + + if self.sqlite: + try: + cur = self.sqlite.cursor() + cur.execute("INSERT INTO " + " benchtester_tests(name, time, build_id, successful) " + "VALUES (?, ?, ?, ?)", + (testname, int(timestamp), self.build_id, succeeded)) + cur.execute("SELECT last_insert_rowid()") + + if datapoints: + testid = cur.fetchone()[0] + self.insert_results(testid, datapoints) + except Exception, e: + self.error( + "Failed to insert data into sqlite, got '%s': %s" % (type(e), e)) + import traceback + traceback.print_exc() + self.sqlite.rollback() + return False + return True + + def __init__(self, logfile=None, out=sys.stdout): + self.starttime = time.clock() + self.ready = False + self.args = {} + self.argparser = argparse.ArgumentParser( + description='Run automated benchmark suite, optionally adding \ + datapoints to a sqlite database') + self.arglist = {} + self.out = out + self.modules = {} + self.logfile = None + self.buildtime = None + self.buildname = None + self.sqlite = False + self.errors = [] + self.warnings = [] + + # Default to outputing 'mach' style to stdout. + log_args = {'log_mach': ['-']} + if logfile: + # If a logfile is requested we also output in a raw structured log + # format to the requested file. + log_args.update({'log_raw': [logfile]}) + + self.logger = commandline.setup_logging("AwsyTest", log_args) + + # These can be passed to setup() like so: + # mytester.setup({'binary': 'blah', 'buildname': 'blee'}) + # OR you can call mytester.parseArgs() on a command-line formatted arg + # list (sys.argv) to extract them as needed. + self.add_argument('-b', '--binary', + help='The binary (either in the current PATH or a full path) to test') + self.add_argument('--buildname', + help='The name of this firefox build. If omitted, attempts to use the \ + commit id from the mercurial respository the binary resides \ + in') + self.add_argument('--buildtime', + help='The unix timestamp to assign to this build \ + build. If omitted, attempts to use the commit timestamp \ + from the mercurial repository the binary resides in') + self.add_argument('--test-module', '-m', + help='Load the specified test module (from libs). You must load at \ + least one module to have tests', + action='append') + self.add_argument('-l', '--logfile', + help='Log to given file') + self.add_argument('-s', '--sqlitedb', + help='Merge datapoint into specified sqlite database') + + self.info("BenchTester instantiated") + + def add_argument(self, *args, **kwargs): + act = self.argparser.add_argument(*args, **kwargs) + if 'default' in kwargs: + self.args[act.dest] = kwargs['default'] + + # Parses commandline arguments, *AND* loads the modules specified on them, + # such that their arguments can be known/parsed. Does not prevent loading of + # more modules later on. + # - returns a args dict suitable for passing to setup(). + # - If handle_exceptions is false, will let argparser failures fall through. + # Otherwise, prints an error. + # - The "test_module" argument is returned, but not used by setup, and is + # useful for seeing what modules the commandline just caused to load + def parse_args(self, rawargs=sys.argv[1:]): + self.info("Parsing arguments...") + try: + args = vars(self.argparser.parse_known_args(rawargs)[0]) + # Modules can add more arguments, so load the ones specified + # and re-parse + if args['test_module']: + for m in args['test_module']: + self.load_module(m) + args = vars(self.argparser.parse_args(rawargs)) + return args + except SystemExit, e: + return False + + def __del__(self): + # In case we exception out mid transaction or something + if (hasattr(self, 'sqlite') and self.sqlite): + self.sqlite.rollback() + + def _open_db(self): + if not self.args['sqlitedb'] or self.sqlite: + return True + + self.info("Setting up SQLite") + if not self.buildname or not self.buildtime: + self.error("Cannot use db without a buildname and buildtime set") + self.sqlitedb = self.args['sqlitedb'] = None + return False + try: + db_exists = os.path.exists(self.args['sqlitedb']) + + sql_path = os.path.abspath(self.args['sqlitedb']) + self.sqlite = sqlite3.connect(sql_path, timeout=900) + cur = self.sqlite.cursor() + + if db_exists: + # make sure the version matches + cur.execute( + "SELECT `version` FROM `benchtester_version` WHERE `version` = ?", [gVersion]) + row = cur.fetchone() + version = row[0] if row else None + if version != gVersion: + self.error("Incompatible versions: %s is version %s, current version is %s" % ( + self.args['sqlitedb'], version, gVersion)) + self.sqlitedb = self.args['sqlitedb'] = None + return False + + for schema in gTableSchemas: + cur.execute(schema) + + if not db_exists: + cur.execute( + "INSERT INTO `benchtester_version` (`version`) VALUES (?)", [gVersion]) + + # Create/update the repo + cur.execute( + "SELECT `id` FROM `benchtester_repos` WHERE `name` = ?", [self.repo]) + row = cur.fetchone() + if row: + repo_id = int(row[0]) + else: + cur.execute( + "INSERT INTO benchtester_repos(name) VALUES (?)", (self.repo, )) + repo_id = cur.lastrowid + + # Create/update build ID + cur.execute("SELECT `time`, `id` FROM `benchtester_builds` WHERE `name` = ?", [ + self.buildname]) + buildrow = cur.fetchone() + + if buildrow and buildrow[0] != int(self.buildtime): + self.warn("Build '%s' already exists in the database, but with a differing" + "timestamp. Overwriting old record (%s -> %s)" % + (self.buildname, buildrow[0], self.buildtime)) + cur.execute("UPDATE `benchtester_builds` SET `time` = ? WHERE `id` = ?", [ + int(self.buildtime), buildrow[1]]) + self.build_id = buildrow[1] + elif not buildrow: + self.info("Creating new build record") + cur.execute("INSERT INTO `benchtester_builds` (`name`, `time`, `repo_id`) " + "VALUES (?, ?, ?)", + (self.buildname, int(self.buildtime), repo_id)) + cur.execute("SELECT last_insert_rowid()") + self.build_id = cur.fetchone()[0] + else: + self.build_id = buildrow[1] + self.info("Found build record") + self.sqlite.commit() + except Exception, e: + self.error( + "Failed to setup sqliteDB '%s': %s - %s\n" % (self.args['sqlitedb'], type(e), e)) + self.sqlitedb = self.args['sqlitedb'] = None + return False + + return True + + def setup(self, args): + self.info("Performing setup") + self.hg_ui = mercurial.ui.ui() + + # args will already contain defaults from add_argument calls + self.args.update(args) + + # Check that binary is set + if not self.args['binary']: + return self.error("--binary is required, see --help") + try: + self.binary = os.path.abspath(self.args['binary']) + except: + self.binary = False + if not self.binary or not os.path.exists(self.binary): + return self.error("Unable to access binary '%s' (abs: '%s')\n" % + (self.args['binary'], + self.binary if self.binary else "Cannot resolve")) + + # Set commit name/timestamp + if (self.args['buildname']): + self.buildname = self.args['buildname'].strip() + if (self.args['buildtime']): + self.buildtime = str(self.args['buildtime']).strip() + + if 'repo' in self.args and self.args['repo']: + self.repo = self.args['repo'] + self.info('Using provided repo: %s' % self.repo) + else: + self.repo = 'mozilla-inbound' + self.info('Using default repo: mozilla-inbound') + + # Try to autodetect commitname/time if given a binary in a repo + if not self.buildname or not self.buildtime: + try: + hg_repo = mercurial.hg.repository( + self.hg_ui, os.path.dirname(self.binary)) + except: + hg_repo = None + if hg_repo: + try: + self.info( + "Binary is in a hg repo, attempting to detect build info") + self.hg_ui.pushbuffer() + mercurial.commands.tip( + self.hg_ui, hg_repo, template="{node} {date}") + tipinfo = self.hg_ui.popbuffer().split() + hg_changeset = tipinfo[0] + # Date is a float (truncate to int) of format + # 12345.0[+/-]3600 where 3600 is timezone info + hg_date = tipinfo[1].split('.')[0] + if not self.buildname: + self.buildname = hg_changeset + self.info( + "No build name given, using %s from repo binary is in" % self.buildname) + if not self.buildtime: + self.buildtime = hg_date + self.info( + "No build time given, using %s from repo binary is in" % self.buildtime) + except Exception as e: + self.error("Found a Hg repo, but failed to get changeset/timestamp. \ + You may need to provide these manually with --buildname, --buildtime\ + \nError was: %s" % (e)) + + # Sanity checks + if (self.sqlite): + if (not self.buildname or not len(self.buildname)): + self.error( + "Must provide a name for this build via --buildname in order to log to sqlite") + return False + + try: + inttime = int(self.buildtime, 10) + except: + inttime = None + if (not inttime or str(inttime) != self.buildtime or inttime < 1): + self.error( + "--buildtime must be set to a unix timestamp in order to log to sqlite") + return False + + self.failed_modules = {} + for m in self.modules: + if not self.modules[m].setup(): + self.error("Failed to setup module %s!" % m) + self.failed_modules[m] = self.modules[m] + for m in self.failed_modules: + del self.modules[m] + + self.ready = True + self.info("Setup successful") + return True diff --git a/benchtester/BuildGetter.py b/benchtester/BuildGetter.py index 6cbb48c..aa0b414 100644 --- a/benchtester/BuildGetter.py +++ b/benchtester/BuildGetter.py @@ -24,9 +24,9 @@ import mozdownload PUSHLOG_BRANCH_MAP = { - 'mozilla-inbound': 'integration/mozilla-inbound', - 'b2g-inbound': 'integration/b2g-inbound', - 'fx-team': 'integration/fx-team' + 'mozilla-inbound': 'integration/mozilla-inbound', + 'b2g-inbound': 'integration/b2g-inbound', + 'fx-team': 'integration/fx-team' } BASE_FTP_URL = 'https://archive.mozilla.org/pub' @@ -41,306 +41,317 @@ # This currently selects the linux-64 (non-pgo) build # hardcoded at a few spots. This will need to be changed for non-linux testing + def _stat(msg): - output.write("[BuildGetter] %s\n" % msg); + output.write("[BuildGetter] %s\n" % msg) def get_build_info(url): - """Retrieves the build info file and parses out relevant information""" - # cross-platform FIXME, this is hardcoded to linux - # trim off the extension, replace w/ .txt - info_url = url[:-len(".tar.bz2")] + ".txt" + """Retrieves the build info file and parses out relevant information""" + # cross-platform FIXME, this is hardcoded to linux + # trim off the extension, replace w/ .txt + info_url = url[:-len(".tar.bz2")] + ".txt" - try: - raw = urllib2.urlopen(info_url, timeout=30).read() - except (IOError, urllib2.URLError) as e: - _stat("ERR: Failed to query server for %s %s %s" % (url, type(e), e)) - return None + try: + raw = urllib2.urlopen(info_url, timeout=30).read() + except (IOError, urllib2.URLError) as e: + _stat("ERR: Failed to query server for %s %s %s" % (url, type(e), e)) + return None - _stat("Got build info: %s" % raw) + _stat("Got build info: %s" % raw) - # This file has had lines changed in the past, just find a numeric line - # and a url-of-revision-lookin' line - m = re.search('^[0-9]{14}$', raw, re.MULTILINE) - timestamp = int(time.mktime(time.strptime(m.group(0), '%Y%m%d%H%M%S'))) - m = re.search('^https?://hg.mozilla.org/(.+)/rev/([0-9a-z]+)$', raw, re.MULTILINE) - rev = m.group(2) - branch = m.group(1) + # This file has had lines changed in the past, just find a numeric line + # and a url-of-revision-lookin' line + m = re.search('^[0-9]{14}$', raw, re.MULTILINE) + timestamp = int(time.mktime(time.strptime(m.group(0), '%Y%m%d%H%M%S'))) + m = re.search( + '^https?://hg.mozilla.org/(.+)/rev/([0-9a-z]+)$', raw, re.MULTILINE) + rev = m.group(2) + branch = m.group(1) - return (timestamp, rev, branch) + return (timestamp, rev, branch) def pushlog_lookup(rev, branch=gDefaultBranch, base_url=BASE_HG_URL): - """hg.m.o pushlog query""" - pushlog_branch = PUSHLOG_BRANCH_MAP.get(branch, branch) - pushlog = gPushlogUrl % (base_url, pushlog_branch) - url = "%s?changeset=%s" % (pushlog, rev) - try: - raw = urllib2.urlopen(url, timeout=30).read() - except (IOError, urllib2.URLError) as e: - _stat("ERR: Failed to query pushlog for changeset %s on %s at %s: %s - %s" % (rev, branch, url, type(e), e)) - return False - try: - pushlog = json.loads(raw) - if len(pushlog) != 1: - raise ValueError("Pushlog returned %u items, expected 1" % len(pushlog)) - for cset in pushlog[pushlog.keys()[0]]['changesets']: - if cset.startswith(rev): - break - else: - raise ValueError("Pushlog returned a push that does not contain this revision?") - - except ValueError as e: - _stat("ERR: pushlog returned invalid JSON for changeset %s\n Error was:\n %s - %s\n JSON:\ %s" % (rev, type(e), e, raw)) - return False - - push = pushlog[pushlog.keys()[0]] - _stat("For rev %s on branch %s got push by %s at %u with %u changesets" % (cset, branch, push['user'], push['date'], len(push['changesets']))) - return cset, push['date'] + """hg.m.o pushlog query""" + pushlog_branch = PUSHLOG_BRANCH_MAP.get(branch, branch) + pushlog = gPushlogUrl % (base_url, pushlog_branch) + url = "%s?changeset=%s" % (pushlog, rev) + try: + raw = urllib2.urlopen(url, timeout=30).read() + except (IOError, urllib2.URLError) as e: + _stat("ERR: Failed to query pushlog for changeset %s on %s at %s: %s - %s" % + (rev, branch, url, type(e), e)) + return False + try: + pushlog = json.loads(raw) + if len(pushlog) != 1: + raise ValueError( + "Pushlog returned %u items, expected 1" % len(pushlog)) + for cset in pushlog[pushlog.keys()[0]]['changesets']: + if cset.startswith(rev): + break + else: + raise ValueError( + "Pushlog returned a push that does not contain this revision?") + + except ValueError as e: + _stat("ERR: pushlog returned invalid JSON for changeset %s\n" + " Error was:\n %s - %s\n JSON:\ %s" % + (rev, type(e), e, raw)) + return False + + push = pushlog[pushlog.keys()[0]] + _stat("For rev %s on branch %s got push by %s at %u with %u changesets" % + (cset, branch, push['user'], push['date'], len(push['changesets']))) + return cset, push['date'] def list_tinderbox_builds(starttime=0, endtime=int(time.time()), branch=gDefaultBranch, base_url=BASE_FTP_URL): - """ - Gets a list of TinderboxBuild objects for all builds on ftp.m.o within - specified date range. - """ - parser = mozdownload.parser.DirectoryParser(gTinderboxUrl % (base_url, branch)) - entries = parser.filter(r'^\d+$') # only entries that are all digits - return sorted([int(x) for x in entries if int(x) >= starttime and int(x) <= endtime]) + """ + Gets a list of TinderboxBuild objects for all builds on ftp.m.o within + specified date range. + """ + parser = mozdownload.parser.DirectoryParser( + gTinderboxUrl % (base_url, branch)) + entries = parser.filter(r'^\d+$') # only entries that are all digits + return sorted([int(x) for x in entries if int(x) >= starttime and int(x) <= endtime]) class Build(): - """Abstract base class for builds.""" + """Abstract base class for builds.""" - def prepare(self): - """Downloads or builds and extracts the build to a temporary directory""" - raise NotImplementedError() + def prepare(self): + """Downloads or builds and extracts the build to a temporary directory""" + raise NotImplementedError() - def cleanup(self): - raise NotImplementedError() + def cleanup(self): + raise NotImplementedError() - def get_revision(self): - raise NotImplementedError() + def get_revision(self): + raise NotImplementedError() - def get_buildtime(self): - raise NotImplementedError() + def get_buildtime(self): + raise NotImplementedError() - def get_valid(self): - raise NotImplementedError() + def get_valid(self): + raise NotImplementedError() - def get_binary(self): - """Requires prepare()'d""" - raise NotImplementedError() + def get_binary(self): + """Requires prepare()'d""" + raise NotImplementedError() class DownloadedBuild(Build): - """Base class with shared helpers for Tinderbox, Nightly, and Try builds""" + """Base class with shared helpers for Tinderbox, Nightly, and Try builds""" + + def __init__(self, scraper_args, directory=None, + base_ftp_url=BASE_FTP_URL, base_hg_url=BASE_HG_URL): + """ + Sets up the build for downloading. - def __init__(self, scraper_args, directory=None, - base_ftp_url=BASE_FTP_URL, base_hg_url=BASE_HG_URL): - """ - Sets up the build for downloading. + Creates a mozdownloader.scraper instance and then queries the server for + more build details such as revision, branch, and timestamp. - Creates a mozdownloader.scraper instance and then queries the server for - more build details such as revision, branch, and timestamp. + :param scraper_args: Specifies the |mozdownload.scraper| type to use and + arguments that should be passed to it. Format: + { 'type': , 'args': { ... } } + """ - :param scraper_args: Specifies the |mozdownload.scraper| type to use and - arguments that should be passed to it. Format: - { 'type': , 'args': { ... } } - """ + self._branch = None + self._extracted = directory + self._cleanup_dir = False + self._prepared = False + self._revision = None + self._scraper = None + self._scraperTarget = None + self._timestamp = None + self._valid = False + self._base_ftp_url = base_ftp_url + self._base_hg_url = base_hg_url - self._branch = None - self._extracted = directory - self._cleanup_dir = False - self._prepared = False - self._revision = None - self._scraper = None - self._scraperTarget = None - self._timestamp = None - self._valid = False - self._base_ftp_url = base_ftp_url - self._base_hg_url = base_hg_url - - if not directory: - self._extracted = tempfile.mkdtemp("BuildGetter_firefox") - self._cleanup_dir = True - - # FIXME: platform hard coded to linux64 - default_args = { - 'destination': self._extracted, - 'platform': 'linux64', - 'base_url': base_ftp_url, - } - - default_args.update(scraper_args['args']) - - # cache scraper details to support serialization - self._scraper_type = scraper_args['type'] - self._scraper_args = default_args + if not directory: + self._extracted = tempfile.mkdtemp("BuildGetter_firefox") + self._cleanup_dir = True - try: - self._scraper = scraper_args['type'](**default_args) - url = self._scraper.url - except mozdownload.errors.NotFoundError: - _stat("ERR: Build not found") - return - - ret = get_build_info(url) - if not ret: - _stat("ERR: Failed to lookup information about the build") - return - - (self._timestamp, self._revision, self._branch) = ret - - ret = pushlog_lookup(self._revision, self._branch, self._base_hg_url) - if not ret: - _stat("ERR: Failed to lookup the build in the pushlog") - return - - (self._revision, self._timestamp) = ret - - self._valid = True - - @staticmethod - def extract_build(src, dstdir): - """Extracts the given build to the given directory.""" - - # cross-platform FIXME, this is hardcoded to tar at the moment - with tarfile.open(src, mode='r:*') as tar: - tar.extractall(path=dstdir) - - def prepare(self): - """ - Prepares the build for testing. + # FIXME: platform hard coded to linux64 + default_args = { + 'destination': self._extracted, + 'platform': 'linux64', + 'base_url': base_ftp_url, + } - Downloads the build and extracts it to a temporary directory. - """ + default_args.update(scraper_args['args']) + + # cache scraper details to support serialization + self._scraper_type = scraper_args['type'] + self._scraper_args = default_args - if not self._scraper: - # recreate it - self._scraper = self._scraper_type(**self._scraper_args) + try: + self._scraper = scraper_args['type'](**default_args) + url = self._scraper.url + except mozdownload.errors.NotFoundError: + _stat("ERR: Build not found") + return - if not self._valid: - raise Exception("Attempted to prepare() invalid build") + ret = get_build_info(url) + if not ret: + _stat("ERR: Failed to lookup information about the build") + return - self._scraper.download() - self._scraperTarget = self._scraper.filename + (self._timestamp, self._revision, self._branch) = ret - _stat("Extracting build") - self.extract_build(self._scraper.filename, self._extracted) + ret = pushlog_lookup(self._revision, self._branch, self._base_hg_url) + if not ret: + _stat("ERR: Failed to lookup the build in the pushlog") + return - self._prepared = True - self._scraper = None - return True + (self._revision, self._timestamp) = ret - def cleanup(self): - if self._prepared: - self._prepared = False + self._valid = True - # remove the downloaded archive - os.remove(self._scraperTarget) + @staticmethod + def extract_build(src, dstdir): + """Extracts the given build to the given directory.""" - # remove the extracted archive - shutil.rmtree(os.path.join(self._extracted, "firefox")) + # cross-platform FIXME, this is hardcoded to tar at the moment + with tarfile.open(src, mode='r:*') as tar: + tar.extractall(path=dstdir) - # remove the temp directory that was created - if self._cleanup_dir: - shutil.rmtree(self._extracted) + def prepare(self): + """ + Prepares the build for testing. - return True + Downloads the build and extracts it to a temporary directory. + """ - def get_revision(self): - return self._revision + if not self._scraper: + # recreate it + self._scraper = self._scraper_type(**self._scraper_args) - def get_valid(self): - return self._valid + if not self._valid: + raise Exception("Attempted to prepare() invalid build") - def get_binary(self): - if not self._prepared: - raise Exception("Build is not prepared") - # FIXME More hard-coded linux stuff - return os.path.join(self._extracted, "firefox", "firefox") + self._scraper.download() + self._scraperTarget = self._scraper.filename - def get_buildtime(self): - return self._timestamp + _stat("Extracting build") + self.extract_build(self._scraper.filename, self._extracted) + + self._prepared = True + self._scraper = None + return True + + def cleanup(self): + if self._prepared: + self._prepared = False + + # remove the downloaded archive + os.remove(self._scraperTarget) + + # remove the extracted archive + shutil.rmtree(os.path.join(self._extracted, "firefox")) + + # remove the temp directory that was created + if self._cleanup_dir: + shutil.rmtree(self._extracted) + + return True + + def get_revision(self): + return self._revision + + def get_valid(self): + return self._valid + + def get_binary(self): + if not self._prepared: + raise Exception("Build is not prepared") + # FIXME More hard-coded linux stuff + return os.path.join(self._extracted, "firefox", "firefox") + + def get_buildtime(self): + return self._timestamp class CompileBuild(Build): - """ - A build that needs to be compiled + """ + A build that needs to be compiled - This is currently unsupported, see: - https://github.com/mozilla/areweslimyet/issues/47 - """ - pass + This is currently unsupported, see: + https://github.com/mozilla/areweslimyet/issues/47 + """ + pass class FTPBuild(DownloadedBuild): - """A build that simply points to a full path on ftp.m.o""" + """A build that simply points to a full path on ftp.m.o""" - def __init__(self, path, *args, **kwargs): - self._path = path - scraper_info = { - 'type': mozdownload.DirectScraper, - 'args': { 'url': path } - } + def __init__(self, path, *args, **kwargs): + self._path = path + scraper_info = { + 'type': mozdownload.DirectScraper, + 'args': {'url': path} + } - DownloadedBuild.__init__(self, scraper_info, *args, **kwargs) + DownloadedBuild.__init__(self, scraper_info, *args, **kwargs) class TryBuild(DownloadedBuild): - """A try build from ftp.m.o. Initialized with a 12-digit try changeset.""" + """A try build from ftp.m.o. Initialized with a 12-digit try changeset.""" - def __init__(self, changeset, *args, **kwargs): - # mozdownload requires the full revision, look it up if necessary. - if len(changeset) != 40: - (changeset, _) = pushlog_lookup(changeset, branch='try', base_url=kwargs.get('base_hg_url', BASE_HG_URL)) + def __init__(self, changeset, *args, **kwargs): + # mozdownload requires the full revision, look it up if necessary. + if len(changeset) != 40: + (changeset, _) = pushlog_lookup(changeset, branch='try', + base_url=kwargs.get('base_hg_url', BASE_HG_URL)) - self._changeset = changeset - scraper_info = { - 'type': mozdownload.scraper.TryScraper, - 'args': { 'revision': changeset } - } + self._changeset = changeset + scraper_info = { + 'type': mozdownload.scraper.TryScraper, + 'args': {'revision': changeset} + } - DownloadedBuild.__init__(self, scraper_info, *args, **kwargs) + DownloadedBuild.__init__(self, scraper_info, *args, **kwargs) class NightlyBuild(DownloadedBuild): - """A nightly build. Initialized with a date() object or a YYYY-MM-DD string""" + """A nightly build. Initialized with a date() object or a YYYY-MM-DD string""" - def __init__(self, date, *args, **kwargs): - self._date = date if isinstance(date, datetime.date) else datetime.datetime.strptime(date, "%Y-%m-%d") - scraper_info = { - 'type': mozdownload.scraper.DailyScraper, - 'args': { 'date': self._date.strftime("%Y-%m-%d") } - } + def __init__(self, date, *args, **kwargs): + self._date = date if isinstance( + date, datetime.date) else datetime.datetime.strptime(date, "%Y-%m-%d") + scraper_info = { + 'type': mozdownload.scraper.DailyScraper, + 'args': {'date': self._date.strftime("%Y-%m-%d")} + } - DownloadedBuild.__init__(self, scraper_info, *args, **kwargs) + DownloadedBuild.__init__(self, scraper_info, *args, **kwargs) class TinderboxBuild(DownloadedBuild): - """A tinderbox build from ftp.m.o. Initialized with a timestamp to build""" + """A tinderbox build from ftp.m.o. Initialized with a timestamp to build""" - def __init__(self, timestamp, branch = "mozilla-inbound", *args, **kwargs): - if not branch: - branch = "mozilla-inbound" + def __init__(self, timestamp, branch="mozilla-inbound", *args, **kwargs): + if not branch: + branch = "mozilla-inbound" - self._branch_name = branch - self._tinderbox_timestamp = int(timestamp) + self._branch_name = branch + self._tinderbox_timestamp = int(timestamp) - # Use this as the timestamp if finding the build fails - self._timestamp = self._tinderbox_timestamp + # Use this as the timestamp if finding the build fails + self._timestamp = self._tinderbox_timestamp - scraper_info = { - 'type': mozdownload.scraper.TinderboxScraper, - 'args': { 'branch': branch, 'date': str(self._tinderbox_timestamp) } - } + scraper_info = { + 'type': mozdownload.scraper.TinderboxScraper, + 'args': {'branch': branch, 'date': str(self._tinderbox_timestamp)} + } - DownloadedBuild.__init__(self, scraper_info, *args, **kwargs) + DownloadedBuild.__init__(self, scraper_info, *args, **kwargs) - def get_tinderbox_timestamp(self): - return self._tinderbox_timestamp + def get_tinderbox_timestamp(self): + return self._tinderbox_timestamp - def get_branch(self): - return self._branch_name + def get_branch(self): + return self._branch_name diff --git a/benchtester/MarionetteTest.py b/benchtester/MarionetteTest.py index afb9d58..307a18b 100644 --- a/benchtester/MarionetteTest.py +++ b/benchtester/MarionetteTest.py @@ -15,130 +15,137 @@ from marionette.runtests import MarionetteTestRunner from mozlog.structured import commandline + class MarionetteTest(BenchTester.BenchTest): - def __init__(self, parent): - BenchTester.BenchTest.__init__(self, parent) - # NB: If bug 1027022 ever lands we can remove this. - parent.add_argument('--marionette_port', - help="Port to use for marionette, so concurrent tests don't collide", - default="24242") - parent.add_argument('--gecko_log', - help="Logfile for gecko output. Defaults to 'gecko.log'", - default=None) - parent.add_argument('--process_count', - help="Number of e10s processes to use", - default=1) - self.name = "MarionetteTest" - self.parent = parent - - def setup(self): - self.info("Setting up MarionetteTest module") - self.ready = True - self.endurance_results = None - self.port = int(self.parent.args['marionette_port']) - self.gecko_log = self.parent.args['gecko_log'] - self.process_count = int(self.parent.args['process_count']) - self.info("Process Count: %d " % self.process_count) - - return True - - def run_test(self, testname, testvars={}): - if not self.ready: - return self.error("run_test() called before setup") - - self.info("Beginning marionette test '%s'" % testname) - - e10s = testvars.get("e10s", False) - - prefs = { - # disable network access - "network.proxy.socks": "localhost", - "network.proxy.socks_port": testvars.get("proxyPort", 90000), - "network.proxy.socks_remote_dns": True, - "network.proxy.type": 1, # Socks - - # Don't open the first-run dialog, it loads a video - 'startup.homepage_welcome_url': '', - 'startup.homepage_override_url': '', - 'browser.newtab.url': 'about:blank', - - # make sure e10s is enabled - "browser.tabs.remote.autostart": e10s, - "browser.tabs.remote.autostart.1": e10s, - "browser.tabs.remote.autostart.2": e10s, - "browser.tabs.remote.autostart.3": e10s, - "browser.tabs.remote.autostart.4": e10s, - "browser.tabs.remote.autostart.5": e10s, - "browser.tabs.remote.autostart.6": e10s, - "dom.ipc.processCount": self.process_count, - - # prevent "You're using e10s!" dialog from showing up - "browser.displayedE10SNotice": 1000, - - # We're not testing flash memory usage. Also: it likes to crash in VNC sessions. - "plugin.disable": True, - - # Specify a communications port - "marionette.defaultPrefs.port": self.port, - - # override image expiration in hopes of getting less volatile numbers - "image.mem.surfacecache.min_expiration_ms": 10000 - } - - # Setup a test runner with our prefs and our logger. - # TODO(ER): We might want to use a larger set of "automation" preferences - # until marionette sets them for us. See bug 1123683. - profile = mozprofile.FirefoxProfile(preferences=prefs) - - runner = MarionetteTestRunner( - binary=self.tester.binary, - profile=profile, - logger=self.parent.logger, - address="localhost:%d" % self.port, - gecko_log=self.gecko_log, - startup_timeout=60) - - # Add test - testpath = os.path.join(*testvars['test']) - if not os.path.exists(testpath): - return self.error("Test '%s' specifies a test that doesn't exist: %s" % (testname, testpath)) - - # Add our testvars - runner.testvars.update(testvars) - - # Run test - self.info("Marionette - starting browser") - try: - self.info("Marionette - running test") - runner.run_tests([ testpath ]) - failures = runner.failed - except Exception, e: - try: - runner.cleanup() - except: pass - return self.error("Marionette test run failed -- %s: %s" % (type(e), e)) - finally: - # cleanup the profile dir if not already cleaned up - if os.path.exists(profile.profile): - shutil.rmtree(profile.profile) - - self.info("Marionette - cleaning up") - try: - runner.cleanup() - except Exception, e: - self.error("Failed to properly cleanup marionette -- %s: %s" % (type(e), e)) - finally: - # cleanup the profile dir if not already cleaned up - if os.path.exists(profile.profile): - shutil.rmtree(profile.profile) - - self.info("Marionette - saving results") - - self.endurance_results = runner.testvars.get("results", []) - - if not self.tester.add_test_results(testname, self.endurance_results, not failures): - return self.error("Failed to save test results") - if failures: - return self.error("%u failures occured during test run" % failures) - self.info("Test '%s' complete" % testname) - return True + + def __init__(self, parent): + BenchTester.BenchTest.__init__(self, parent) + # NB: If bug 1027022 ever lands we can remove this. + parent.add_argument('--marionette_port', + help="Port to use for marionette, so concurrent tests don't collide", + default="24242") + parent.add_argument('--gecko_log', + help="Logfile for gecko output. Defaults to 'gecko.log'", + default=None) + parent.add_argument('--process_count', + help="Number of e10s processes to use", + default=1) + self.name = "MarionetteTest" + self.parent = parent + + def setup(self): + self.info("Setting up MarionetteTest module") + self.ready = True + self.endurance_results = None + self.port = int(self.parent.args['marionette_port']) + self.gecko_log = self.parent.args['gecko_log'] + self.process_count = int(self.parent.args['process_count']) + self.info("Process Count: %d " % self.process_count) + + return True + + def run_test(self, testname, testvars={}): + if not self.ready: + return self.error("run_test() called before setup") + + self.info("Beginning marionette test '%s'" % testname) + + e10s = testvars.get("e10s", False) + + prefs = { + # disable network access + "network.proxy.socks": "localhost", + "network.proxy.socks_port": testvars.get("proxyPort", 90000), + "network.proxy.socks_remote_dns": True, + "network.proxy.type": 1, # Socks + + # Don't open the first-run dialog, it loads a video + 'startup.homepage_welcome_url': '', + 'startup.homepage_override_url': '', + 'browser.newtab.url': 'about:blank', + + # make sure e10s is enabled + "browser.tabs.remote.autostart": e10s, + "browser.tabs.remote.autostart.1": e10s, + "browser.tabs.remote.autostart.2": e10s, + "browser.tabs.remote.autostart.3": e10s, + "browser.tabs.remote.autostart.4": e10s, + "browser.tabs.remote.autostart.5": e10s, + "browser.tabs.remote.autostart.6": e10s, + "dom.ipc.processCount": self.process_count, + + # prevent "You're using e10s!" dialog from showing up + "browser.displayedE10SNotice": 1000, + + # We're not testing flash memory usage. Also: it likes to crash in + # VNC sessions. + "plugin.disable": True, + + # Specify a communications port + "marionette.defaultPrefs.port": self.port, + + # override image expiration in hopes of getting less volatile + # numbers + "image.mem.surfacecache.min_expiration_ms": 10000 + } + + # Setup a test runner with our prefs and our logger. + # TODO(ER): We might want to use a larger set of "automation" preferences + # until marionette sets them for us. See bug 1123683. + profile = mozprofile.FirefoxProfile(preferences=prefs) + + runner = MarionetteTestRunner( + binary=self.tester.binary, + profile=profile, + logger=self.parent.logger, + address="localhost:%d" % self.port, + gecko_log=self.gecko_log, + startup_timeout=60) + + # Add test + testpath = os.path.join(*testvars['test']) + if not os.path.exists(testpath): + return self.error("Test '%s' specifies a test that doesn't exist: %s" % + (testname, testpath)) + + # Add our testvars + runner.testvars.update(testvars) + + # Run test + self.info("Marionette - starting browser") + try: + self.info("Marionette - running test") + runner.run_tests([testpath]) + failures = runner.failed + except Exception, e: + try: + runner.cleanup() + except: + pass + return self.error("Marionette test run failed -- %s: %s" % (type(e), e)) + finally: + # cleanup the profile dir if not already cleaned up + if os.path.exists(profile.profile): + shutil.rmtree(profile.profile) + + self.info("Marionette - cleaning up") + try: + runner.cleanup() + except Exception, e: + self.error( + "Failed to properly cleanup marionette -- %s: %s" % (type(e), e)) + finally: + # cleanup the profile dir if not already cleaned up + if os.path.exists(profile.profile): + shutil.rmtree(profile.profile) + + self.info("Marionette - saving results") + + self.endurance_results = runner.testvars.get("results", []) + + if not self.tester.add_test_results(testname, self.endurance_results, not failures): + return self.error("Failed to save test results") + if failures: + return self.error("%u failures occured during test run" % failures) + self.info("Test '%s' complete" % testname) + return True diff --git a/benchtester/test_memory_usage.py b/benchtester/test_memory_usage.py index 192947b..b01281c 100644 --- a/benchtester/test_memory_usage.py +++ b/benchtester/test_memory_usage.py @@ -14,107 +14,107 @@ # Talos TP5 TEST_SITES = [ - "http://localhost:8001/tp5/thesartorialist.blogspot.com/thesartorialist.blogspot.com/index.html", - "http://localhost:8002/tp5/cakewrecks.blogspot.com/cakewrecks.blogspot.com/index.html", - "http://localhost:8003/tp5/baidu.com/www.baidu.com/s@wd=mozilla.html", - "http://localhost:8004/tp5/en.wikipedia.org/en.wikipedia.org/wiki/Rorschach_test.html", - "http://localhost:8005/tp5/twitter.com/twitter.com/ICHCheezburger.html", - "http://localhost:8006/tp5/msn.com/www.msn.com/index.html", - "http://localhost:8007/tp5/yahoo.co.jp/www.yahoo.co.jp/index.html", - "http://localhost:8008/tp5/amazon.com/www.amazon.com/Kindle-Wireless-Reader-Wifi-Graphite/dp/B002Y27P3M/507846.html", - "http://localhost:8009/tp5/linkedin.com/www.linkedin.com/in/christopherblizzard@goback=.nppvan_%252Flemuelf.html", - "http://localhost:8010/tp5/bing.com/www.bing.com/search@q=mozilla&go=&form=QBLH&qs=n&sk=&sc=8-0.html", - "http://localhost:8011/tp5/icanhascheezburger.com/icanhascheezburger.com/index.html", - "http://localhost:8012/tp5/yandex.ru/yandex.ru/yandsearch@text=mozilla&lr=21215.html", - "http://localhost:8013/tp5/cgi.ebay.com/cgi.ebay.com/ALL-NEW-KINDLE-3-eBOOK-WIRELESS-READING-DEVICE-W-WIFI-/130496077314@pt=LH_DefaultDomain_0&hash=item1e622c1e02.html", - "http://localhost:8014/tp5/163.com/www.163.com/index.html", - "http://localhost:8015/tp5/mail.ru/mail.ru/index.html", - "http://localhost:8016/tp5/bbc.co.uk/www.bbc.co.uk/news/index.html", - "http://localhost:8017/tp5/store.apple.com/store.apple.com/us@mco=Nzc1MjMwNA.html", - "http://localhost:8018/tp5/imdb.com/www.imdb.com/title/tt1099212/index.html", - "http://localhost:8019/tp5/mozilla.com/www.mozilla.com/en-US/firefox/all-older.html", - "http://localhost:8020/tp5/ask.com/www.ask.com/web@q=What%27s+the+difference+between+brown+and+white+eggs%253F&gc=1&qsrc=3045&o=0&l=dir.html", - "http://localhost:8021/tp5/cnn.com/www.cnn.com/index.html", - "http://localhost:8022/tp5/sohu.com/www.sohu.com/index.html", - "http://localhost:8023/tp5/vkontakte.ru/vkontakte.ru/help.php@page=about.html", - "http://localhost:8024/tp5/youku.com/www.youku.com/index.html", - "http://localhost:8025/tp5/myparentswereawesome.tumblr.com/myparentswereawesome.tumblr.com/index.html", - "http://localhost:8026/tp5/ifeng.com/ifeng.com/index.html", - "http://localhost:8027/tp5/ameblo.jp/ameblo.jp/index.html", - "http://localhost:8028/tp5/tudou.com/www.tudou.com/index.html", - "http://localhost:8029/tp5/chemistry.about.com/chemistry.about.com/index.html", - "http://localhost:8030/tp5/beatonna.livejournal.com/beatonna.livejournal.com/index.html", - "http://localhost:8031/tp5/hao123.com/hao123.com/index.html", - "http://localhost:8032/tp5/rakuten.co.jp/www.rakuten.co.jp/index.html", - "http://localhost:8033/tp5/alibaba.com/www.alibaba.com/product-tp/101509462/World_s_Cheapest_Laptop.html", - "http://localhost:8034/tp5/uol.com.br/www.uol.com.br/index.html", - "http://localhost:8035/tp5/cnet.com/www.cnet.com/index.html", - "http://localhost:8036/tp5/ehow.com/www.ehow.com/how_4575878_prevent-fire-home.html", - "http://localhost:8037/tp5/thepiratebay.org/thepiratebay.org/top/201.html", - "http://localhost:8038/tp5/page.renren.com/page.renren.com/index.html", - "http://localhost:8039/tp5/chinaz.com/chinaz.com/index.html", - "http://localhost:8040/tp5/globo.com/www.globo.com/index.html", - "http://localhost:8041/tp5/spiegel.de/www.spiegel.de/index.html", - "http://localhost:8042/tp5/dailymotion.com/www.dailymotion.com/us.html", - "http://localhost:8043/tp5/goo.ne.jp/goo.ne.jp/index.html", - "http://localhost:8044/tp5/alipay.com/www.alipay.com/index.html", - "http://localhost:8045/tp5/stackoverflow.com/stackoverflow.com/questions/184618/what-is-the-best-comment-in-source-code-you-have-ever-encountered.html", - "http://localhost:8046/tp5/nicovideo.jp/www.nicovideo.jp/index.html", - "http://localhost:8047/tp5/ezinearticles.com/ezinearticles.com/index.html@Migraine-Ocular---The-Eye-Migraines&id=4684133.html", - "http://localhost:8048/tp5/taringa.net/www.taringa.net/index.html", - "http://localhost:8049/tp5/tmall.com/www.tmall.com/index.html@ver=2010s.html", - "http://localhost:8050/tp5/huffingtonpost.com/www.huffingtonpost.com/index.html", - "http://localhost:8051/tp5/deviantart.com/www.deviantart.com/index.html", - "http://localhost:8052/tp5/media.photobucket.com/media.photobucket.com/image/funny%20gif/findstuff22/Best%20Images/Funny/funny-gif1.jpg@o=1.html", - "http://localhost:8053/tp5/douban.com/www.douban.com/index.html", - "http://localhost:8054/tp5/imgur.com/imgur.com/gallery/index.html", - "http://localhost:8055/tp5/reddit.com/www.reddit.com/index.html", - "http://localhost:8056/tp5/digg.com/digg.com/news/story/New_logo_for_Mozilla_Firefox_browser.html", - "http://localhost:8057/tp5/filestube.com/www.filestube.com/t/the+vampire+diaries.html", - "http://localhost:8058/tp5/dailymail.co.uk/www.dailymail.co.uk/ushome/index.html", - "http://localhost:8059/tp5/whois.domaintools.com/whois.domaintools.com/mozilla.com.html", - "http://localhost:8060/tp5/indiatimes.com/www.indiatimes.com/index.html", - "http://localhost:8061/tp5/rambler.ru/www.rambler.ru/index.html", - "http://localhost:8062/tp5/torrentz.eu/torrentz.eu/search@q=movies.html", - "http://localhost:8063/tp5/reuters.com/www.reuters.com/index.html", - "http://localhost:8064/tp5/foxnews.com/www.foxnews.com/index.html", - "http://localhost:8065/tp5/xinhuanet.com/xinhuanet.com/index.html", - "http://localhost:8066/tp5/56.com/www.56.com/index.html", - "http://localhost:8067/tp5/bild.de/www.bild.de/index.html", - "http://localhost:8068/tp5/guardian.co.uk/www.guardian.co.uk/index.html", - "http://localhost:8069/tp5/w3schools.com/www.w3schools.com/html/default.asp.html", - "http://localhost:8070/tp5/naver.com/www.naver.com/index.html", - "http://localhost:8071/tp5/blogfa.com/blogfa.com/index.html", - "http://localhost:8072/tp5/terra.com.br/www.terra.com.br/portal/index.html", - "http://localhost:8073/tp5/ucoz.ru/www.ucoz.ru/index.html", - "http://localhost:8074/tp5/yelp.com/www.yelp.com/biz/alexanders-steakhouse-cupertino.html", - "http://localhost:8075/tp5/wsj.com/online.wsj.com/home-page.html", - "http://localhost:8076/tp5/noimpactman.typepad.com/noimpactman.typepad.com/index.html", - "http://localhost:8077/tp5/myspace.com/www.myspace.com/albumart.html", - "http://localhost:8078/tp5/google.com/www.google.com/search@q=mozilla.html", - "http://localhost:8079/tp5/orange.fr/www.orange.fr/index.html", - "http://localhost:8080/tp5/php.net/php.net/index.html", - "http://localhost:8081/tp5/zol.com.cn/www.zol.com.cn/index.html", - "http://localhost:8082/tp5/mashable.com/mashable.com/index.html", - "http://localhost:8083/tp5/etsy.com/www.etsy.com/category/geekery/videogame.html", - "http://localhost:8084/tp5/gmx.net/www.gmx.net/index.html", - "http://localhost:8085/tp5/csdn.net/csdn.net/index.html", - "http://localhost:8086/tp5/xunlei.com/xunlei.com/index.html", - "http://localhost:8087/tp5/hatena.ne.jp/www.hatena.ne.jp/index.html", - "http://localhost:8088/tp5/icious.com/www.delicious.com/index.html", - "http://localhost:8089/tp5/repubblica.it/www.repubblica.it/index.html", - "http://localhost:8090/tp5/web.de/web.de/index.html", - "http://localhost:8091/tp5/slideshare.net/www.slideshare.net/jameswillamor/lolcats-in-popular-culture-a-historical-perspective.html", - "http://localhost:8092/tp5/telegraph.co.uk/www.telegraph.co.uk/index.html", - "http://localhost:8093/tp5/seesaa.net/blog.seesaa.jp/index.html", - "http://localhost:8094/tp5/wp.pl/www.wp.pl/index.html", - "http://localhost:8095/tp5/aljazeera.net/aljazeera.net/portal.html", - "http://localhost:8096/tp5/w3.org/www.w3.org/standards/webdesign/htmlcss.html", - "http://localhost:8097/tp5/homeway.com.cn/www.hexun.com/index.html", - "http://localhost:8098/tp5/facebook.com/www.facebook.com/Google.html", - "http://localhost:8099/tp5/youtube.com/www.youtube.com/music.html", - "http://localhost:8100/tp5/people.com.cn/people.com.cn/index.html" -]; + "http://localhost:8001/tp5/thesartorialist.blogspot.com/thesartorialist.blogspot.com/index.html", + "http://localhost:8002/tp5/cakewrecks.blogspot.com/cakewrecks.blogspot.com/index.html", + "http://localhost:8003/tp5/baidu.com/www.baidu.com/s@wd=mozilla.html", + "http://localhost:8004/tp5/en.wikipedia.org/en.wikipedia.org/wiki/Rorschach_test.html", + "http://localhost:8005/tp5/twitter.com/twitter.com/ICHCheezburger.html", + "http://localhost:8006/tp5/msn.com/www.msn.com/index.html", + "http://localhost:8007/tp5/yahoo.co.jp/www.yahoo.co.jp/index.html", + "http://localhost:8008/tp5/amazon.com/www.amazon.com/Kindle-Wireless-Reader-Wifi-Graphite/dp/B002Y27P3M/507846.html", + "http://localhost:8009/tp5/linkedin.com/www.linkedin.com/in/christopherblizzard@goback=.nppvan_%252Flemuelf.html", + "http://localhost:8010/tp5/bing.com/www.bing.com/search@q=mozilla&go=&form=QBLH&qs=n&sk=&sc=8-0.html", + "http://localhost:8011/tp5/icanhascheezburger.com/icanhascheezburger.com/index.html", + "http://localhost:8012/tp5/yandex.ru/yandex.ru/yandsearch@text=mozilla&lr=21215.html", + "http://localhost:8013/tp5/cgi.ebay.com/cgi.ebay.com/ALL-NEW-KINDLE-3-eBOOK-WIRELESS-READING-DEVICE-W-WIFI-/130496077314@pt=LH_DefaultDomain_0&hash=item1e622c1e02.html", + "http://localhost:8014/tp5/163.com/www.163.com/index.html", + "http://localhost:8015/tp5/mail.ru/mail.ru/index.html", + "http://localhost:8016/tp5/bbc.co.uk/www.bbc.co.uk/news/index.html", + "http://localhost:8017/tp5/store.apple.com/store.apple.com/us@mco=Nzc1MjMwNA.html", + "http://localhost:8018/tp5/imdb.com/www.imdb.com/title/tt1099212/index.html", + "http://localhost:8019/tp5/mozilla.com/www.mozilla.com/en-US/firefox/all-older.html", + "http://localhost:8020/tp5/ask.com/www.ask.com/web@q=What%27s+the+difference+between+brown+and+white+eggs%253F&gc=1&qsrc=3045&o=0&l=dir.html", + "http://localhost:8021/tp5/cnn.com/www.cnn.com/index.html", + "http://localhost:8022/tp5/sohu.com/www.sohu.com/index.html", + "http://localhost:8023/tp5/vkontakte.ru/vkontakte.ru/help.php@page=about.html", + "http://localhost:8024/tp5/youku.com/www.youku.com/index.html", + "http://localhost:8025/tp5/myparentswereawesome.tumblr.com/myparentswereawesome.tumblr.com/index.html", + "http://localhost:8026/tp5/ifeng.com/ifeng.com/index.html", + "http://localhost:8027/tp5/ameblo.jp/ameblo.jp/index.html", + "http://localhost:8028/tp5/tudou.com/www.tudou.com/index.html", + "http://localhost:8029/tp5/chemistry.about.com/chemistry.about.com/index.html", + "http://localhost:8030/tp5/beatonna.livejournal.com/beatonna.livejournal.com/index.html", + "http://localhost:8031/tp5/hao123.com/hao123.com/index.html", + "http://localhost:8032/tp5/rakuten.co.jp/www.rakuten.co.jp/index.html", + "http://localhost:8033/tp5/alibaba.com/www.alibaba.com/product-tp/101509462/World_s_Cheapest_Laptop.html", + "http://localhost:8034/tp5/uol.com.br/www.uol.com.br/index.html", + "http://localhost:8035/tp5/cnet.com/www.cnet.com/index.html", + "http://localhost:8036/tp5/ehow.com/www.ehow.com/how_4575878_prevent-fire-home.html", + "http://localhost:8037/tp5/thepiratebay.org/thepiratebay.org/top/201.html", + "http://localhost:8038/tp5/page.renren.com/page.renren.com/index.html", + "http://localhost:8039/tp5/chinaz.com/chinaz.com/index.html", + "http://localhost:8040/tp5/globo.com/www.globo.com/index.html", + "http://localhost:8041/tp5/spiegel.de/www.spiegel.de/index.html", + "http://localhost:8042/tp5/dailymotion.com/www.dailymotion.com/us.html", + "http://localhost:8043/tp5/goo.ne.jp/goo.ne.jp/index.html", + "http://localhost:8044/tp5/alipay.com/www.alipay.com/index.html", + "http://localhost:8045/tp5/stackoverflow.com/stackoverflow.com/questions/184618/what-is-the-best-comment-in-source-code-you-have-ever-encountered.html", + "http://localhost:8046/tp5/nicovideo.jp/www.nicovideo.jp/index.html", + "http://localhost:8047/tp5/ezinearticles.com/ezinearticles.com/index.html@Migraine-Ocular---The-Eye-Migraines&id=4684133.html", + "http://localhost:8048/tp5/taringa.net/www.taringa.net/index.html", + "http://localhost:8049/tp5/tmall.com/www.tmall.com/index.html@ver=2010s.html", + "http://localhost:8050/tp5/huffingtonpost.com/www.huffingtonpost.com/index.html", + "http://localhost:8051/tp5/deviantart.com/www.deviantart.com/index.html", + "http://localhost:8052/tp5/media.photobucket.com/media.photobucket.com/image/funny%20gif/findstuff22/Best%20Images/Funny/funny-gif1.jpg@o=1.html", + "http://localhost:8053/tp5/douban.com/www.douban.com/index.html", + "http://localhost:8054/tp5/imgur.com/imgur.com/gallery/index.html", + "http://localhost:8055/tp5/reddit.com/www.reddit.com/index.html", + "http://localhost:8056/tp5/digg.com/digg.com/news/story/New_logo_for_Mozilla_Firefox_browser.html", + "http://localhost:8057/tp5/filestube.com/www.filestube.com/t/the+vampire+diaries.html", + "http://localhost:8058/tp5/dailymail.co.uk/www.dailymail.co.uk/ushome/index.html", + "http://localhost:8059/tp5/whois.domaintools.com/whois.domaintools.com/mozilla.com.html", + "http://localhost:8060/tp5/indiatimes.com/www.indiatimes.com/index.html", + "http://localhost:8061/tp5/rambler.ru/www.rambler.ru/index.html", + "http://localhost:8062/tp5/torrentz.eu/torrentz.eu/search@q=movies.html", + "http://localhost:8063/tp5/reuters.com/www.reuters.com/index.html", + "http://localhost:8064/tp5/foxnews.com/www.foxnews.com/index.html", + "http://localhost:8065/tp5/xinhuanet.com/xinhuanet.com/index.html", + "http://localhost:8066/tp5/56.com/www.56.com/index.html", + "http://localhost:8067/tp5/bild.de/www.bild.de/index.html", + "http://localhost:8068/tp5/guardian.co.uk/www.guardian.co.uk/index.html", + "http://localhost:8069/tp5/w3schools.com/www.w3schools.com/html/default.asp.html", + "http://localhost:8070/tp5/naver.com/www.naver.com/index.html", + "http://localhost:8071/tp5/blogfa.com/blogfa.com/index.html", + "http://localhost:8072/tp5/terra.com.br/www.terra.com.br/portal/index.html", + "http://localhost:8073/tp5/ucoz.ru/www.ucoz.ru/index.html", + "http://localhost:8074/tp5/yelp.com/www.yelp.com/biz/alexanders-steakhouse-cupertino.html", + "http://localhost:8075/tp5/wsj.com/online.wsj.com/home-page.html", + "http://localhost:8076/tp5/noimpactman.typepad.com/noimpactman.typepad.com/index.html", + "http://localhost:8077/tp5/myspace.com/www.myspace.com/albumart.html", + "http://localhost:8078/tp5/google.com/www.google.com/search@q=mozilla.html", + "http://localhost:8079/tp5/orange.fr/www.orange.fr/index.html", + "http://localhost:8080/tp5/php.net/php.net/index.html", + "http://localhost:8081/tp5/zol.com.cn/www.zol.com.cn/index.html", + "http://localhost:8082/tp5/mashable.com/mashable.com/index.html", + "http://localhost:8083/tp5/etsy.com/www.etsy.com/category/geekery/videogame.html", + "http://localhost:8084/tp5/gmx.net/www.gmx.net/index.html", + "http://localhost:8085/tp5/csdn.net/csdn.net/index.html", + "http://localhost:8086/tp5/xunlei.com/xunlei.com/index.html", + "http://localhost:8087/tp5/hatena.ne.jp/www.hatena.ne.jp/index.html", + "http://localhost:8088/tp5/icious.com/www.delicious.com/index.html", + "http://localhost:8089/tp5/repubblica.it/www.repubblica.it/index.html", + "http://localhost:8090/tp5/web.de/web.de/index.html", + "http://localhost:8091/tp5/slideshare.net/www.slideshare.net/jameswillamor/lolcats-in-popular-culture-a-historical-perspective.html", + "http://localhost:8092/tp5/telegraph.co.uk/www.telegraph.co.uk/index.html", + "http://localhost:8093/tp5/seesaa.net/blog.seesaa.jp/index.html", + "http://localhost:8094/tp5/wp.pl/www.wp.pl/index.html", + "http://localhost:8095/tp5/aljazeera.net/aljazeera.net/portal.html", + "http://localhost:8096/tp5/w3.org/www.w3.org/standards/webdesign/htmlcss.html", + "http://localhost:8097/tp5/homeway.com.cn/www.hexun.com/index.html", + "http://localhost:8098/tp5/facebook.com/www.facebook.com/Google.html", + "http://localhost:8099/tp5/youtube.com/www.youtube.com/music.html", + "http://localhost:8100/tp5/people.com.cn/people.com.cn/index.html" +] # Maximum number of tabs to open MAX_TABS = 30 @@ -128,6 +128,7 @@ # Amount of times to run through the test suite ITERATIONS = 5 + class TestMemoryUsage(MarionetteTestCase): """Provides a test that collects memory usage at various checkpoints: - "Start" - Just after startup @@ -139,6 +140,7 @@ class TestMemoryUsage(MarionetteTestCase): - "TabsClosedSettled" - After an additional wait time - "TabsClosedForceGC" - After forcibly invoking garbage collection """ + def setUp(self): MarionetteTestCase.setUp(self) @@ -149,13 +151,15 @@ def setUp(self): self._pages_to_load = self.testvars.get("entities", len(self._urls)) self._iterations = self.testvars.get("iterations", ITERATIONS) self._perTabPause = self.testvars.get("perTabPause", PER_TAB_PAUSE) - self._settleWaitTime = self.testvars.get("settleWaitTime", SETTLE_WAIT_TIME) + self._settleWaitTime = self.testvars.get( + "settleWaitTime", SETTLE_WAIT_TIME) self._maxTabs = self.testvars.get("maxTabs", MAX_TABS) # workaround for https://bugzilla.mozilla.org/show_bug.cgi?id=1128773 - js = os.path.abspath(os.path.join(__file__, os.path.pardir, "checkpoint.js")) + js = os.path.abspath(os.path.join( + __file__, os.path.pardir, "checkpoint.js")) with open(js) as f: - self._checkpoint_script = f.read() + self._checkpoint_script = f.read() self.reset_state() @@ -183,11 +187,11 @@ def do_full_gc(self): """ # NB: we could do this w/ a signal or the fifo queue too self.logger.info("starting gc...") - gc_script = """ + gc_script = """ const Cu = Components.utils; const Cc = Components.classes; const Ci = Components.interfaces; - + Cu.import("resource://gre/modules/Services.jsm"); Services.obs.notifyObservers(null, "child-mmu-request", null); @@ -196,15 +200,16 @@ def do_full_gc(self): """ result = None try: - result = self.marionette.execute_async_script(gc_script, script_timeout=180000); + result = self.marionette.execute_async_script( + gc_script, script_timeout=180000) except JavascriptException, e: - self.logger.error("GC JavaScript error: %s" % e) + self.logger.error("GC JavaScript error: %s" % e) except ScriptTimeoutException: - self.logger.error("GC timed out") + self.logger.error("GC timed out") except: - self.logger.error("Unexpected error: %s" % sys.exc_info()[0]) + self.logger.error("Unexpected error: %s" % sys.exc_info()[0]) else: - self.logger.info(result) + self.logger.info(result) return result is not None @@ -225,15 +230,16 @@ def do_memory_report(self, checkpointName): checkpoint = None try: - checkpoint = self.marionette.execute_async_script(script, script_timeout=60000) + checkpoint = self.marionette.execute_async_script( + script, script_timeout=60000) except JavascriptException, e: - self.logger.error("Checkpoint JavaScript error: %s" % e) + self.logger.error("Checkpoint JavaScript error: %s" % e) except ScriptTimeoutException: - self.logger.error("Memory report timed out") + self.logger.error("Memory report timed out") except: - self.logger.error("Unexpected error: %s" % sys.exc_info()[0]) + self.logger.error("Unexpected error: %s" % sys.exc_info()[0]) else: - self.logger.info("checkpoint created!") + self.logger.info("checkpoint created!") return checkpoint @@ -250,13 +256,15 @@ def open_and_focus(self): if tabs_loaded < self._maxTabs and tabs_loaded <= self._pages_loaded: full_tab_list = self.marionette.window_handles - # Trigger opening a new tab by finding the new tab button and clicking it + # Trigger opening a new tab by finding the new tab button and + # clicking it newtab_button = (self.marionette.find_element('id', 'tabbrowser-tabs') .find_element('anon attribute', {'anonid': 'tabs-newtab-button'})) newtab_button.click() - self.wait_for_condition(lambda mn: len(mn.window_handles) == tabs_loaded + 1) + self.wait_for_condition(lambda mn: len( + mn.window_handles) == tabs_loaded + 1) # NB: The tab list isn't sorted, so we do a set diff to determine # which is the new tab @@ -276,9 +284,9 @@ def open_and_focus(self): # NB: As a work-around for an e10s marionette bug, only select the tab # if we're really switching tabs. if tabs_loaded > 1: - self.logger.debug("switching to tab") - self.marionette.switch_to_window(tab) - self.logger.debug("switched to tab") + self.logger.debug("switching to tab") + self.marionette.switch_to_window(tab) + self.logger.debug("switched to tab") with self.marionette.using_context('content'): self.logger.info("loading %s" % page_to_load) @@ -287,13 +295,14 @@ def open_and_focus(self): # On e10s the tab handle can change after actually loading content if is_new_tab: - # First build a set up w/o the current tab - old_tabs = set(self._tabs) - old_tabs.remove(tab) - # Perform a set diff to get the (possibly) new handle - [new_tab] = set(self.marionette.window_handles) - old_tabs - # Update the tab list at the current index to preserve the tab ordering - self._tabs[tab_idx] = new_tab + # First build a set up w/o the current tab + old_tabs = set(self._tabs) + old_tabs.remove(tab) + # Perform a set diff to get the (possibly) new handle + [new_tab] = set(self.marionette.window_handles) - old_tabs + # Update the tab list at the current index to preserve the tab + # ordering + self._tabs[tab_idx] = new_tab # give the page time to settle time.sleep(self._perTabPause) @@ -331,14 +340,15 @@ def test_open_tabs(self): |testvars| object it passed in. """ # setup the results array - results = [ [] for x in range(self._iterations) ] + results = [[] for x in range(self._iterations)] def create_checkpoint(name, iteration): - checkpoint = self.do_memory_report(name) - self.assertIsNotNone(checkpoint, "Checkpoint was recorded") - results[iteration].append(checkpoint) + checkpoint = self.do_memory_report(name) + self.assertIsNotNone(checkpoint, "Checkpoint was recorded") + results[iteration].append(checkpoint) - # The first iteration gets Start and StartSettled entries before opening tabs + # The first iteration gets Start and StartSettled entries before + # opening tabs create_checkpoint("Start", 0) time.sleep(self._settleWaitTime) create_checkpoint("StartSettled", 0) @@ -375,4 +385,3 @@ def create_checkpoint(name, iteration): # TODO(ER): Temporary hack until bug 1121139 lands self.logger.info("setting results") self.testvars["results"] = results - diff --git a/setup.cfg b/setup.cfg new file mode 100644 index 0000000..6bb09a2 --- /dev/null +++ b/setup.cfg @@ -0,0 +1,8 @@ +[pylama] +linters = pep8 + +[pylama:benchtester/test_memory_usage.py] +ignore = E501 + +[pylama:pep8] +max_line_length = 100 diff --git a/tox.ini b/tox.ini index bb1ad21..80d76af 100644 --- a/tox.ini +++ b/tox.ini @@ -6,6 +6,9 @@ deps= mock mozfile mozhttpd + pylama pytest -commands=py.test tests +commands= + pylama benchtester + py.test tests