From d6870db981da78a55f5cae204a835cba5af88ff5 Mon Sep 17 00:00:00 2001 From: "Dustin J. Mitchell" Date: Thu, 21 Jul 2011 14:55:44 -0700 Subject: [PATCH 1/2] Terminate EC2 instances instead of stopping them. This will correctly delete the instance and associated storage, rather than leaving that storage allocated but unused as previous versions did. Fixes #1931. --- master/NEWS | 5 +++++ master/buildbot/ec2buildslave.py | 2 +- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/master/NEWS b/master/NEWS index b33ce080521..0d0f1cc1e3d 100644 --- a/master/NEWS +++ b/master/NEWS @@ -5,6 +5,11 @@ Major User visible changes in Buildbot. -*- outline -*- * Next Version +** EC2 instances are now terminated instead of stopped. + +This is really only relevant for EBS-backed instances, as Buildbot will now +free the instance and associated EBS storage when shutting down the slave. + ** SQLite databases use write-ahead logging WAL mode offers much greater concurrency (preventing the dreaded 'database is diff --git a/master/buildbot/ec2buildslave.py b/master/buildbot/ec2buildslave.py index 511c2977ec3..e899c4053e7 100644 --- a/master/buildbot/ec2buildslave.py +++ b/master/buildbot/ec2buildslave.py @@ -295,7 +295,7 @@ def _stop_instance(self, instance, fast): self.conn.disassociate_address(self.elastic_ip.public_ip) instance.update() if instance.state not in (SHUTTINGDOWN, TERMINATED): - instance.stop() + instance.terminate() log.msg('%s %s terminating instance %s' % (self.__class__.__name__, self.slavename, instance.id)) duration = 0 From b31aecd6832e77f07dcdb00767c41350db119cbb Mon Sep 17 00:00:00 2001 From: "Dustin J. Mitchell" Date: Thu, 21 Jul 2011 17:45:30 -0700 Subject: [PATCH 2/2] Tests for buildbot.status.logfile.LogFile (incomplete) This fixes some bugs found by the tests, and also removes code intended to support upgrades from versions older than 0.6.5 --- master/NEWS | 3 + master/buildbot/status/build.py | 15 - master/buildbot/status/builder.py | 2 - master/buildbot/status/logfile.py | 272 +++++++++++------- .../buildbot/test/unit/test_status_logfile.py | 272 +++++++++++++++++- 5 files changed, 449 insertions(+), 115 deletions(-) diff --git a/master/NEWS b/master/NEWS index 0d0f1cc1e3d..d0c9bbcf6c6 100644 --- a/master/NEWS +++ b/master/NEWS @@ -23,6 +23,9 @@ longer tracks master instances, so the unclaimOldIncarnationRequests method has been removed. Note that several of the methods in this module now perform fewer consistency checks, for efficiency. +*** Upgrades directly from versions older than 0.6.5 will no longer +automatically migrate logfiles. + ** Customizable validation regexps The global c['validation'] parameter can be used to adjust the regular diff --git a/master/buildbot/status/build.py b/master/buildbot/status/build.py index 7fde741fae8..2ff9f907705 100644 --- a/master/buildbot/status/build.py +++ b/master/buildbot/status/build.py @@ -384,21 +384,6 @@ def upgradeToVersion3(self): self.properties.update(propdict, "Upgrade from previous version") self.wasUpgraded = True - def upgradeLogfiles(self): - # upgrade any LogFiles that need it. This must occur after we've been - # attached to our Builder, and after we know about all LogFiles of - # all Steps (to get the filenames right). - assert self.builder - for s in self.steps: - for l in s.getLogs(): - if l.filename: - pass # new-style, log contents are on disk - else: - logfilename = self.generateLogfileName(s.name, l.name) - # let the logfile update its .filename pointer, - # transferring its contents onto disk if necessary - l.upgrade(logfilename) - def checkLogfiles(self): # check that all logfiles exist, and remove references to any that # have been deleted (e.g., by purge()) diff --git a/master/buildbot/status/builder.py b/master/buildbot/status/builder.py index beae9247298..6ecc121f2f9 100644 --- a/master/buildbot/status/builder.py +++ b/master/buildbot/status/builder.py @@ -230,8 +230,6 @@ def getBuildByNumber(self, number): log.msg("re-writing upgraded build pickle") build.saveYourself() - # handle LogFiles from after 0.5.0 and before 0.6.5 - build.upgradeLogfiles() # check that logfiles exist build.checkLogfiles() return self.touchBuildCache(build) diff --git a/master/buildbot/status/logfile.py b/master/buildbot/status/logfile.py index f34cfbe9ce0..7d039d1ba14 100644 --- a/master/buildbot/status/logfile.py +++ b/master/buildbot/status/logfile.py @@ -175,17 +175,16 @@ def logfileFinished(self, logfile): self.consumer = None class LogFile: - """A LogFile keeps all of its contents on disk, in a non-pickle format to - which new entries can easily be appended. The file on disk has a name - like 12-log-compile-output, under the Builder's directory. The actual - filename is generated (before the LogFile is created) by + """ + A LogFile keeps all of its contents on disk, in a non-pickle format to + which new entries can easily be appended. The file on disk has a name like + 12-log-compile-output, under the Builder's directory. The actual filename + is generated (before the LogFile is created) by L{BuildStatus.generateLogfileName}. - Old LogFile pickles (which kept their contents in .entries) must be - upgraded. The L{BuilderStatus} is responsible for doing this, when it - loads the L{BuildStatus} into memory. The Build pickle is not modified, - so users who go from 0.6.5 back to 0.6.4 don't have to lose their - logs.""" + @ivar length: length of the data in the logfile (sum of chunk sizes; not + the length of the on-disk encoding) + """ implements(interfaces.IStatusLog, interfaces.ILogFile) @@ -236,22 +235,57 @@ def __init__(self, parent, name, logfilename): self.tailBuffer = [] def getFilename(self): + """ + Get the base (uncompressed) filename for this log file. + + @returns: filename + """ return os.path.join(self.step.build.builder.basedir, self.filename) def hasContents(self): + """ + Return true if this logfile's contents are available. For a newly + created logfile, this is always true, but for a L{LogFile} instance + that has been persisted, the logfiles themselves may have been deleted, + in which case this method will return False. + + @returns: boolean + """ return os.path.exists(self.getFilename() + '.bz2') or \ os.path.exists(self.getFilename() + '.gz') or \ os.path.exists(self.getFilename()) def getName(self): + """ + Get this logfile's name + + @returns: string + """ return self.name def getStep(self): + """ + Get the L{BuildStepStatus} instance containing this logfile + + @returns: L{BuildStepStatus} instance + """ return self.step def isFinished(self): + """ + Return true if this logfile is finished (that is, if it will not + receive any additional data + + @returns: boolean + """ + return self.finished + def waitUntilFinished(self): + """ + Return a Deferred that will fire when this logfile is finished, or will + fire immediately if the logfile is already finished. + """ if self.finished: d = defer.succeed(self) else: @@ -260,6 +294,14 @@ def waitUntilFinished(self): return d def getFile(self): + """ + Get an open file object for this log. The file may also be in use for + writing, so it should not be closed by the caller, and the caller + should not rely on its file position remaining constant between + asynchronous code segments. + + @returns: file object + """ if self.openfile: # this is the filehandle we're using to write to the log, so # don't close it! @@ -381,14 +423,14 @@ def subscribeConsumer(self, consumer): # interface used by the build steps to add things to the log - def merge(self): + def _merge(self): # merge all .runEntries (which are all of the same type) into a # single chunk for .entries if not self.runEntries: return channel = self.runEntries[0][0] text = "".join([c[1] for c in self.runEntries]) - assert channel < 10 + assert channel < 10, "channel number must be a single decimal digit" f = self.openfile f.seek(0, 2) offset = 0 @@ -401,75 +443,122 @@ def merge(self): self.runEntries = [] self.runLength = 0 - def addEntry(self, channel, text): - assert not self.finished + def addEntry(self, channel, text, _no_watchers=False): + """ + Add an entry to the logfile. The C{channel} is one of L{STDOUT}, + L{STDERR}, or L{HEADER}. The C{text} is the text to add to the + logfile, which can be a unicode string or a bytestring which is + presumed to be encoded with utf-8. + + This method cannot be called after the logfile is finished. + + @param channel: channel to add a chunk for + @param text: chunk of text + @param _no_atchers: private + """ + + assert not self.finished, "logfile is already finished" if isinstance(text, unicode): text = text.encode('utf-8') + + # notify watchers first, before the chunk gets munged, so that they get + # a complete picture of the actual log output + # TODO: is this right, or should the watchers get a picture of the chunks? + if not _no_watchers: + for w in self.watchers: + w.logChunk(self.step.build, self.step, self, channel, text) + if channel != HEADER: # Truncate the log if it's more than logMaxSize bytes - if self.logMaxSize and self.nonHeaderLength > self.logMaxSize: - # Add a message about what's going on - if not self.maxLengthExceeded: - msg = "\nOutput exceeded %i bytes, remaining output has been truncated\n" % self.logMaxSize - self.addEntry(HEADER, msg) - self.merge() - self.maxLengthExceeded = True - - if self.logMaxTailSize: - # Update the tail buffer - self.tailBuffer.append((channel, text)) - self.tailLength += len(text) - while self.tailLength > self.logMaxTailSize: - # Drop some stuff off the beginning of the buffer - c,t = self.tailBuffer.pop(0) - n = len(t) - self.tailLength -= n - assert self.tailLength >= 0 - return - - self.nonHeaderLength += len(text) - - # we only add to .runEntries here. merge() is responsible for adding + if self.logMaxSize: + self.nonHeaderLength += len(text) + if self.nonHeaderLength > self.logMaxSize: + # Add a message about what's going on and truncate this + # chunk if necessary + if not self.maxLengthExceeded: + if self.runEntries and channel != self.runEntries[0][0]: + self._merge() + i = -(self.nonHeaderLength - self.logMaxSize) + trunc, text = text[:i], text[i:] + self.runEntries.append((channel, trunc)) + self._merge() + msg = ("\nOutput exceeded %i bytes, remaining output " + "has been truncated\n" % self.logMaxSize) + self.runEntries.append((HEADER, msg)) + self.maxLengthExceeded = True + + # and track the tail of the text + if self.logMaxTailSize and text: + # Update the tail buffer + self.tailBuffer.append((channel, text)) + self.tailLength += len(text) + while self.tailLength > self.logMaxTailSize: + # Drop some stuff off the beginning of the buffer + c,t = self.tailBuffer.pop(0) + n = len(t) + self.tailLength -= n + assert self.tailLength >= 0 + return + + # we only add to .runEntries here. _merge() is responsible for adding # merged chunks to .entries if self.runEntries and channel != self.runEntries[0][0]: - self.merge() + self._merge() self.runEntries.append((channel, text)) self.runLength += len(text) if self.runLength >= self.chunkSize: - self.merge() + self._merge() - for w in self.watchers: - w.logChunk(self.step.build, self.step, self, channel, text) self.length += len(text) def addStdout(self, text): + """ + Shortcut to add stdout text to the logfile + + @param text: text to add to the logfile + """ self.addEntry(STDOUT, text) + def addStderr(self, text): + """ + Shortcut to add stderr text to the logfile + + @param text: text to add to the logfile + """ self.addEntry(STDERR, text) + def addHeader(self, text): + """ + Shortcut to add header text to the logfile + + @param text: text to add to the logfile + """ self.addEntry(HEADER, text) def finish(self): + """ + Finish the logfile, flushing any buffers and preventing any further + writes to the log. + """ + self._merge() if self.tailBuffer: msg = "\nFinal %i bytes follow below:\n" % self.tailLength tmp = self.runEntries self.runEntries = [(HEADER, msg)] - self.merge() + self._merge() self.runEntries = self.tailBuffer - self.merge() + self._merge() self.runEntries = tmp - self.merge() + self._merge() self.tailBuffer = [] - else: - self.merge() if self.openfile: # we don't do an explicit close, because there might be readers # shareing the filehandle. As soon as they stop reading, the # filehandle will be released and automatically closed. self.openfile.flush() - del self.openfile + self.openfile = None self.finished = True watchers = self.finishedWatchers self.finishedWatchers = [] @@ -484,43 +573,48 @@ def compressLog(self): compressed = self.getFilename() + ".bz2.tmp" elif self.compressMethod == "gz": compressed = self.getFilename() + ".gz.tmp" - d = threads.deferToThread(self._compressLog, compressed) - d.addCallback(self._renameCompressedLog, compressed) - d.addErrback(self._cleanupFailedCompress, compressed) + else: + return defer.succeed(None) + + def _compressLog(): + infile = self.getFile() + if self.compressMethod == "bz2": + cf = BZ2File(compressed, 'w') + elif self.compressMethod == "gz": + cf = GzipFile(compressed, 'w') + bufsize = 1024*1024 + while True: + buf = infile.read(bufsize) + cf.write(buf) + if len(buf) < bufsize: + break + cf.close() + d = threads.deferToThread(_compressLog) + + def _renameCompressedLog(rv): + if self.compressMethod == "bz2": + filename = self.getFilename() + '.bz2' + else: + filename = self.getFilename() + '.gz' + if runtime.platformType == 'win32': + # windows cannot rename a file on top of an existing one, so + # fall back to delete-first. There are ways this can fail and + # lose the builder's history, so we avoid using it in the + # general (non-windows) case + if os.path.exists(filename): + os.unlink(filename) + os.rename(compressed, filename) + _tryremove(self.getFilename(), 1, 5) + d.addCallback(_renameCompressedLog) + + def _cleanupFailedCompress(failure): + log.msg("failed to compress %s" % self.getFilename()) + if os.path.exists(compressed): + _tryremove(compressed, 1, 5) + failure.trap() # reraise the failure + d.addErrback(_cleanupFailedCompress) return d - def _compressLog(self, compressed): - infile = self.getFile() - if self.compressMethod == "bz2": - cf = BZ2File(compressed, 'w') - elif self.compressMethod == "gz": - cf = GzipFile(compressed, 'w') - bufsize = 1024*1024 - while True: - buf = infile.read(bufsize) - cf.write(buf) - if len(buf) < bufsize: - break - cf.close() - def _renameCompressedLog(self, rv, compressed): - if self.compressMethod == "bz2": - filename = self.getFilename() + '.bz2' - else: - filename = self.getFilename() + '.gz' - if runtime.platformType == 'win32': - # windows cannot rename a file on top of an existing one, so - # fall back to delete-first. There are ways this can fail and - # lose the builder's history, so we avoid using it in the - # general (non-windows) case - if os.path.exists(filename): - os.unlink(filename) - os.rename(compressed, filename) - _tryremove(self.getFilename(), 1, 5) - def _cleanupFailedCompress(self, failure, compressed): - log.msg("failed to compress %s" % self.getFilename()) - if os.path.exists(compressed): - _tryremove(compressed, 1, 5) - failure.trap() # reraise the failure # persistence stuff def __getstate__(self): @@ -542,19 +636,6 @@ def __setstate__(self, d): # self.step must be filled in by our parent self.finished = True - def upgrade(self, logfilename): - """Save our .entries to a new-style offline log file (if necessary), - and modify our in-memory representation to use it. The original - pickled LogFile (inside the pickled Build) won't be modified.""" - self.filename = logfilename - if not os.path.exists(self.getFilename()): - self.openfile = open(self.getFilename(), "w") - self.finished = False - for channel,text in self.entries: - self.addEntry(channel, text) - self.finish() # releases self.openfile, which will be closed - del self.entries - class HTMLLogFile: implements(interfaces.IStatusLog) @@ -598,9 +679,6 @@ def __getstate__(self): del d['step'] return d - def upgrade(self, logfilename): - pass - def _tryremove(filename, timeout, retries): """Try to remove a file, and if failed, try again in timeout. diff --git a/master/buildbot/test/unit/test_status_logfile.py b/master/buildbot/test/unit/test_status_logfile.py index 2ef6ba2cca2..9bbab1c45c5 100644 --- a/master/buildbot/test/unit/test_status_logfile.py +++ b/master/buildbot/test/unit/test_status_logfile.py @@ -13,11 +13,13 @@ # # Copyright Buildbot Team Members +import os +import cStringIO, cPickle import mock -import cStringIO from twisted.trial import unittest from twisted.internet import defer from buildbot.status import logfile +from buildbot.test.util import dirs class TestLogFileProducer(unittest.TestCase): def make_static_logfile(self, contents): @@ -42,3 +44,271 @@ def test_getChunks_static_multichannel(self): # Remainder of LogFileProduer has a wacky interface that's not # well-defined, so it's not tested yet + +class TestLogFile(unittest.TestCase, dirs.DirsMixin): + + def setUp(self): + step = self.build_step_status = mock.Mock(name='build_step_status') + self.basedir = step.build.builder.basedir = os.path.abspath('basedir') + self.setUpDirs(self.basedir) + self.logfile = logfile.LogFile(step, 'testlf', '123-stdio') + + def tearDown(self): + self.tearDownDirs() + + def pickle_and_restore(self): + pkl = cPickle.dumps(self.logfile) + self.logfile = cPickle.loads(pkl) + step = self.build_step_status + self.logfile.step = step + step.build.builder.basedir = self.basedir + + # tests + + def test_getFilename(self): + self.assertEqual(self.logfile.getFilename(), + os.path.abspath(os.path.join('basedir', '123-stdio'))) + + def test_hasContents_yes(self): + self.assertTrue(self.logfile.hasContents()) + + def test_hasContents_no(self): + os.unlink(os.path.join('basedir', '123-stdio')) # created by constructor + self.assertFalse(self.logfile.hasContents()) + + def test_hasContents_gz(self): + os.unlink(os.path.join('basedir', '123-stdio')) # created by constructor + open(os.path.join(self.basedir, '123-stdio.gz'), "w").write("hi") + self.assertTrue(self.logfile.hasContents()) + + def test_hasContents_gz_pickled(self): + os.unlink(os.path.join('basedir', '123-stdio')) # created by constructor + open(os.path.join(self.basedir, '123-stdio.gz'), "w").write("hi") + self.pickle_and_restore() + self.assertTrue(self.logfile.hasContents()) + + def test_hasContents_bz2(self): + os.unlink(os.path.join('basedir', '123-stdio')) # created by constructor + open(os.path.join(self.basedir, '123-stdio.bz2'), "w").write("hi") + self.assertTrue(self.logfile.hasContents()) + + def test_getName(self): + self.assertEqual(self.logfile.getName(), 'testlf') + + def test_getStep(self): + self.assertEqual(self.logfile.getStep(), self.build_step_status) + + def test_isFinished_no(self): + self.assertFalse(self.logfile.isFinished()) + + def test_isFinished_yes(self): + self.logfile.finish() + self.assertTrue(self.logfile.isFinished()) + + def test_waitUntilFinished(self): + state = [] + d = self.logfile.waitUntilFinished() + d.addCallback(lambda _ : state.append('called')) + self.assertEqual(state, []) # not called yet + self.logfile.finish() + self.assertEqual(state, ['called']) + + def test_getFile(self): + # test getFile at a number of points in the life-cycle + self.logfile.addEntry(0, 'hello, world') + self.logfile._merge() + + # while still open for writing + fp = self.logfile.getFile() + fp.seek(0, 0) + self.assertEqual(fp.read(), '13:0hello, world,') + + self.logfile.finish() + + # fp is still open after finish() + fp.seek(0, 0) + self.assertEqual(fp.read(), '13:0hello, world,') + + # but a fresh getFile call works, too + fp = self.logfile.getFile() + fp.seek(0, 0) + self.assertEqual(fp.read(), '13:0hello, world,') + + self.pickle_and_restore() + + # even after it is pickled + fp = self.logfile.getFile() + fp.seek(0, 0) + self.assertEqual(fp.read(), '13:0hello, world,') + + # ..and compressed + self.logfile.compressMethod = 'bz2' + d = self.logfile.compressLog() + def check(_): + self.assertTrue( + os.path.exists(os.path.join(self.basedir, '123-stdio.bz2'))) + fp = self.logfile.getFile() + fp.seek(0, 0) + self.assertEqual(fp.read(), '13:0hello, world,') + d.addCallback(check) + return d + + def do_test_addEntry(self, entries, expected): + for chan, txt in entries: + self.logfile.addEntry(chan, txt) + self.logfile.finish() + fp = self.logfile.getFile() + fp.seek(0, 0) + self.assertEqual(fp.read(), expected) + + def test_addEntry_single(self): + return self.do_test_addEntry([(0, 'hello, world')], + '13:0hello, world,') + + def test_addEntry_run(self): + # test that addEntry is calling merge() correctly + return self.do_test_addEntry([ (0, c) for c in 'hello, world' ], + '13:0hello, world,') + + def test_addEntry_multichan(self): + return self.do_test_addEntry([(1, 'x'), (2, 'y'), (1, 'z')], + '2:1x,2:2y,2:1z,') + + def test_addEntry_length(self): + self.do_test_addEntry([(1, 'x'), (2, 'y')], + '2:1x,2:2y,') + self.assertEqual(self.logfile.length, 2) + + def test_addEntry_unicode(self): + return self.do_test_addEntry([(1, u'\N{SNOWMAN}')], + '4:1\xe2\x98\x83,') # utf-8 encoded + + def test_addEntry_logMaxSize(self): + self.logfile.logMaxSize = 10 # not evenly divisible by chunk size + return self.do_test_addEntry([(0, 'abcdef')] * 10 , + '11:0abcdefabcd,' + '64:2\nOutput exceeded 10 bytes, remaining output has been ' + 'truncated\n,') + + def test_addEntry_logMaxSize_ignores_header(self): + self.logfile.logMaxSize = 10 + return self.do_test_addEntry([(logfile.HEADER, 'abcdef')] * 10 , + '61:2' + 'abcdef'*10 + ',') + + def test_addEntry_logMaxSize_divisor(self): + self.logfile.logMaxSize = 12 # evenly divisible by chunk size + return self.do_test_addEntry([(0, 'abcdef')] * 10 , + '13:0abcdefabcdef,' + '64:2\nOutput exceeded 12 bytes, remaining output has been ' + 'truncated\n,') + + def test_addEntry_logMaxTailSize(self): + self.logfile.logMaxSize = 10 + self.logfile.logMaxTailSize = 14 + return self.do_test_addEntry([(0, 'abcdef')] * 10 , + '11:0abcdefabcd,' + '64:2\nOutput exceeded 10 bytes, remaining output has been ' + 'truncated\n,' + # NOTE: this gets too few bytes; this is OK for now, and + # easier than subdividing chunks in the tail tracking + '31:2\nFinal 12 bytes follow below:\n,' + '13:0abcdefabcdef,') + + def test_addEntry_logMaxTailSize_divisor(self): + self.logfile.logMaxSize = 10 + self.logfile.logMaxTailSize = 12 + return self.do_test_addEntry([(0, 'abcdef')] * 10 , + '11:0abcdefabcd,' + '64:2\nOutput exceeded 10 bytes, remaining output has been ' + 'truncated\n,' + '31:2\nFinal 12 bytes follow below:\n,' + '13:0abcdefabcdef,') + + # TODO: test that head and tail don't discriminate between stderr and stdout + + def test_addEntry_chunkSize(self): + self.logfile.chunkSize = 11 + return self.do_test_addEntry([(0, 'abcdef')] * 10 , + # note that this doesn't re-chunk everything; just shrinks + # chunks that will exceed the maximum size + '12:0abcdefabcde,2:0f,' * 5) + + def test_addEntry_big_channel(self): + # channels larger than one digit are not allowed + self.assertRaises(AssertionError, + lambda : self.do_test_addEntry([(9999, 'x')], '')) + + def test_addEntry_finished(self): + self.logfile.finish() + self.assertRaises(AssertionError, + lambda : self.do_test_addEntry([(0, 'x')], '')) + + def test_addEntry_merge_exception(self): + def fail(): + raise RuntimeError("FAIL") + self.patch(self.logfile, '_merge', fail) + self.assertRaises(RuntimeError, + lambda : self.do_test_addEntry([(0, 'x')], '')) + + def test_addEntry_watchers(self): + watcher = mock.Mock(name='watcher') + self.logfile.watchers.append(watcher) + self.do_test_addEntry([(0, 'x')], '2:0x,') + watcher.logChunk.assert_called_with(self.build_step_status.build, + self.build_step_status, self.logfile, 0, 'x') + + def test_addEntry_watchers_logMaxSize(self): + watcher = mock.Mock(name='watcher') + self.logfile.watchers.append(watcher) + self.logfile.logMaxSize = 10 + self.do_test_addEntry([(0, 'x')] * 15, + '11:0xxxxxxxxxx,' + '64:2\nOutput exceeded 10 bytes, remaining output has been ' + 'truncated\n,') + logChunk_chunks = [ tuple(args[0][3:]) + for args in watcher.logChunk.call_args_list ] + self.assertEqual(logChunk_chunks, [(0, 'x')] * 15) + + def test_addStdout(self): + addEntry = mock.Mock() + self.patch(self.logfile, 'addEntry', addEntry) + self.logfile.addStdout('oot') + addEntry.assert_called_with(0, 'oot') + + def test_addStderr(self): + addEntry = mock.Mock() + self.patch(self.logfile, 'addEntry', addEntry) + self.logfile.addStderr('eer') + addEntry.assert_called_with(1, 'eer') + + def test_addHeader(self): + addEntry = mock.Mock() + self.patch(self.logfile, 'addEntry', addEntry) + self.logfile.addHeader('hed') + addEntry.assert_called_with(2, 'hed') + + def do_test_compressLog(self, ext, expect_comp=True): + self.logfile.openfile.write('xyz' * 1000) + self.logfile.finish() + d = self.logfile.compressLog() + def check(_): + st = os.stat(self.logfile.getFilename() + ext) + if expect_comp: + self.assertTrue(0 < st.st_size < 3000) + else: + self.assertTrue(st.st_size == 3000) + d.addCallback(check) + return d + + def test_compressLog_gz(self): + self.logfile.compressMethod = 'gz' + return self.do_test_compressLog('.gz') + + def test_compressLog_bz2(self): + self.logfile.compressMethod = 'bz2' + return self.do_test_compressLog('.bz2') + + def test_compressLog_none(self): + self.logfile.compressMethod = None + return self.do_test_compressLog('', expect_comp=False) +