diff --git a/master/MANIFEST.in b/master/MANIFEST.in index fc855f38f2b..71484479002 100644 --- a/master/MANIFEST.in +++ b/master/MANIFEST.in @@ -8,13 +8,14 @@ include docs/Makefile include docs/version.py include docs/buildbot.1 -include buildbot/db/schema/tables.sql include buildbot/scripts/sample.cfg include buildbot/status/web/files/* include buildbot/status/web/templates/*.html buildbot/status/web/templates/*.xml include buildbot/clients/debug.glade include buildbot/buildbot.png +include buildbot/db/migrate/README + include contrib/* contrib/windows/* contrib/os-x/* contrib/css/* include contrib/trac/* contrib/trac/bbwatcher/* contrib/trac/bbwatcher/templates/* include contrib/init-scripts/* diff --git a/master/NEWS b/master/NEWS index 1abc74848b3..25ec7103367 100644 --- a/master/NEWS +++ b/master/NEWS @@ -8,6 +8,15 @@ Major User visible changes in Buildbot. -*- outline -*- * Buildbot 0.8.3 (December 19, 2010) +** SQLAlchemy & SQLAlchemy-Migrate + +Buildbot now uses SQLAlchemy as a database abstraction layer. This will give +us greater inter-database compatibility and a more stable and reliable basis +for this core component of the framework. SQLAlchemy-Migrate is used to manage +changes to the database schema from version to version. + +* Next Release + ** PBChangeSource now supports authentication PBChangeSource now supports the `user` and `passwd` arguments. Users with a diff --git a/master/buildbot/changes/changes.py b/master/buildbot/changes/changes.py index 4539a7b4e7a..c847925d8ba 100644 --- a/master/buildbot/changes/changes.py +++ b/master/buildbot/changes/changes.py @@ -219,7 +219,7 @@ def saveYourself(self): # bytestrings in an old changes.pck into unicode strings def recode_changes(self, old_encoding, quiet=False): """Processes the list of changes, with the change attributes re-encoded - as UTF-8 bytestrings""" + unicode objects""" nconvert = 0 for c in self.changes: # give revision special handling, in case it is an integer @@ -235,6 +235,22 @@ def recode_changes(self, old_encoding, quiet=False): except UnicodeDecodeError: raise UnicodeError("Error decoding %s of change #%s as %s:\n%r" % (attr, c.number, old_encoding, a)) + + # filenames are a special case, but in general they'll have the same encoding + # as everything else on a system. If not, well, hack this script to do your + # import! + newfiles = [] + for filename in util.flatten(c.files): + if isinstance(filename, str): + try: + filename = filename.decode(old_encoding) + nconvert += 1 + except UnicodeDecodeError: + raise UnicodeError("Error decoding filename '%s' of change #%s as %s:\n%r" % + (filename.decode('ascii', 'replace'), + c.number, old_encoding, a)) + newfiles.append(filename) + c.files = newfiles if not quiet: print "converted %d strings" % nconvert diff --git a/master/buildbot/db/schema/base.py b/master/buildbot/db/base.py similarity index 57% rename from master/buildbot/db/schema/base.py rename to master/buildbot/db/base.py index 4274501d9da..0eb7feebe39 100644 --- a/master/buildbot/db/schema/base.py +++ b/master/buildbot/db/base.py @@ -13,15 +13,20 @@ # # Copyright Buildbot Team Members -class Upgrader(object): +""" +Base classes for database handling +""" - def __init__(self, dbapi, conn, basedir, quiet=False): - self.dbapi = dbapi - self.conn = conn - self.basedir = basedir - self.quiet = quiet +class DBConnectorComponent(object): + """ + A fixed component of the DBConnector, handling one particular aspect of the + database. Instances of subclasses are assigned to attributes of the + DBConnector object, so that they are available at e.g., C{master.db.model} + or C{master.db.changes}. This parent class takes care of the necessary + backlinks and other housekeeping. + """ - self.dbapiName = dbapi.__name__ + connector = None - def upgrade(self): - raise NotImplementedError + def __init__(self, connector): + self.connector = connector diff --git a/master/buildbot/db/connector.py b/master/buildbot/db/connector.py index 4919f0a1bd5..8f1f32ef87a 100644 --- a/master/buildbot/db/connector.py +++ b/master/buildbot/db/connector.py @@ -13,11 +13,12 @@ # # Copyright Buildbot Team Members -import sys, collections, base64 +import collections, base64 from twisted.python import log, threadable from twisted.internet import defer -from twisted.enterprise import adbapi +from buildbot.db import enginestrategy + from buildbot import util from buildbot.util import collections as bbcollections from buildbot.changes.changes import Change @@ -27,20 +28,7 @@ from buildbot.status.builder import SUCCESS, WARNINGS, FAILURE from buildbot.util.eventual import eventually from buildbot.util import json - -# Don't auto-resubmit queries that encounter a broken connection: let them -# fail. Use the "notification doorbell" thing to provide the retry. Set -# cp_reconnect=True, so that a connection failure will prepare the -# ConnectionPool to reconnect next time. - -class MyTransaction(adbapi.Transaction): - def execute(self, *args, **kwargs): - #print "Q", args, kwargs - return self._cursor.execute(*args, **kwargs) - def fetchall(self): - rc = self._cursor.fetchall() - #print " F", rc - return rc +from buildbot.db import pool, model def _one_or_else(res, default=None, process_f=lambda x: x): if not res: @@ -55,33 +43,43 @@ def str_or_none(s): class Token: # used for _start_operation/_end_operation pass -class DBConnector(util.ComparableMixin): - # this will refuse to create the database: use 'create-master' for that - compare_attrs = ["args", "kwargs"] +from twisted.enterprise import adbapi +class TempAdbapiPool(adbapi.ConnectionPool): + def __init__(self, engine): + # this wants a module name, so give it one.. + adbapi.ConnectionPool.__init__(self, "buildbot.db.connector") + self._engine = engine + + def connect(self): + return self._engine.raw_connection() + + def stop(self): + pass + +class DBConnector(object): + """ + The connection between Buildbot and its backend database. This is + generally accessible as master.db, but is also used during upgrades. + + Most of the interesting operations available via the connector are + implemented in connector components, available as attributes of this + object, and listed below. + """ + synchronized = ["notify", "_end_operation"] MAX_QUERY_TIMES = 1000 - def __init__(self, spec): - # typical args = (dbmodule, dbname, username, password) - self._query_times = collections.deque() - self._spec = spec + def __init__(self, db_url, basedir): + self.basedir = basedir + "basedir for this master - used for upgrades" - # this is for synchronous calls: runQueryNow, runInteractionNow - self._dbapi = spec.get_dbapi() - self._nonpool = None - self._nonpool_lastused = None - self._nonpool_max_idle = spec.get_maxidle() + self._engine = enginestrategy.create_engine(db_url, basedir=self.basedir) + self.pool = pool.DBThreadPool(self._engine) + "thread pool (L{buildbot.db.pool.DBThreadPool}) for this db" - # pass queries in with "?" placeholders. If the backend uses a - # different style, we'll replace them. - self.paramstyle = self._dbapi.paramstyle + self._oldpool = TempAdbapiPool(self._engine) - self._pool = spec.get_async_connection_pool() - self._pool.transactionFactory = MyTransaction - # the pool must be started before it can be used. The real - # buildmaster process will do this at reactor start. CLI tools (like - # "buildbot upgrade-master") must do it manually. Unit tests are run - # in an environment in which it is already started. + self._query_times = collections.deque() self._change_cache = util.LRUCache() self._sourcestamp_cache = util.LRUCache() @@ -93,32 +91,33 @@ def __init__(self, spec): self._started = False + # set up components + self.model = model.Model(self) + "L{buildbot.db.model.Model} instance" + + + + def _getCurrentTime(self): # this is a seam for use in testing return util.now() - def start(self): + def start(self): # TODO: remove # this only *needs* to be called in reactorless environments (which # should be eliminated anyway). but it doesn't hurt anyway - self._pool.start() + self._oldpool.start() self._started = True - def stop(self): + def stop(self): # TODO: remove """Call this when you're done with me""" - # Close our synchronous connection if we've got one - if self._nonpool: - self._nonpool.close() - self._nonpool = None - self._nonpool_lastused = None - if not self._started: return - self._pool.close() + self._oldpool.stop() self._started = False - del self._pool + del self._oldpool - def quoteq(self, query): + def quoteq(self, query): # TODO: remove """ Given a query that contains qmark-style placeholders, like:: INSERT INTO foo (col1, col2) VALUES (?,?) @@ -126,12 +125,13 @@ def quoteq(self, query): placeholders, like:: INSERT INTO foo (col1, col2) VALUES (%s,%s) """ - if self.paramstyle == "format": - return query.replace("?","%s") - assert self.paramstyle == "qmark" + # TODO: assumes sqlite +# if self.paramstyle == "format": +# return query.replace("?","%s") + #assert self.paramstyle == "qmark" return query - def parmlist(self, count): + def parmlist(self, count): # TODO: remove """ When passing long lists of values to e.g., an INSERT query, it is tedious to pass long strings of ? placeholders. This function will @@ -141,18 +141,7 @@ def parmlist(self, count): p = self.quoteq("?") return "(" + ",".join([p]*count) + ")" - def get_version(self): - """Returns None for an empty database, or a number (probably 1) for - the database's version""" - try: - res = self.runQueryNow("SELECT version FROM version") - except (self._dbapi.OperationalError, self._dbapi.ProgrammingError): - # this means the version table is missing: the db is empty - return None - assert len(res) == 1 - return res[0][0] - - def runQueryNow(self, *args, **kwargs): + def runQueryNow(self, *args, **kwargs): # TODO: remove # synchronous+blocking version of runQuery() assert self._started return self.runInteractionNow(self._runQuery, *args, **kwargs) @@ -161,6 +150,7 @@ def _runQuery(self, c, *args, **kwargs): c.execute(*args, **kwargs) return c.fetchall() + # TODO: remove def _start_operation(self): t = Token() self._active_operations.add(t) @@ -179,7 +169,7 @@ def _end_operation(self, t): eventually(self.send_notification, category, args) self._pending_notifications = [] - def runInteractionNow(self, interaction, *args, **kwargs): + def runInteractionNow(self, interaction, *args, **kwargs): # TODO: remove # synchronous+blocking version of runInteraction() assert self._started start = self._getCurrentTime() @@ -190,93 +180,65 @@ def runInteractionNow(self, interaction, *args, **kwargs): self._end_operation(t) self._add_query_time(start) - def get_sync_connection(self): - # This is a wrapper around spec.get_sync_connection that maintains a - # single connection to the database for synchronous usage. It will get - # a new connection if the existing one has been idle for more than - # max_idle seconds. - if self._nonpool_max_idle is not None: - now = util.now() - if self._nonpool_lastused and self._nonpool_lastused + self._nonpool_max_idle < now: - self._nonpool = None - - if not self._nonpool: - self._nonpool = self._spec.get_sync_connection() + def get_sync_connection(self): # TODO: remove + # TODO: SYNC CONNECTIONS MUST DIE + return self._engine.raw_connection() - self._nonpool_lastused = util.now() - return self._nonpool - - def _runInteractionNow(self, interaction, *args, **kwargs): + def _runInteractionNow(self, interaction, *args, **kwargs): # TODO: remove conn = self.get_sync_connection() c = conn.cursor() - try: - result = interaction(c, *args, **kwargs) - c.close() - conn.commit() - return result - except: - excType, excValue, excTraceback = sys.exc_info() - try: - conn.rollback() - c2 = conn.cursor() - c2.execute(self._pool.good_sql) - c2.close() - conn.commit() - except: - log.msg("rollback failed, will reconnect next query") - log.err() - # and the connection is probably dead: clear the reference, - # so we'll establish a new connection next time - self._nonpool = None - raise excType, excValue, excTraceback - - def notify(self, category, *args): + result = interaction(c, *args, **kwargs) + c.close() + conn.commit() + return result + + def notify(self, category, *args): # TODO: remove # this is wrapped by synchronized= and threadable.synchronous(), # since it will be invoked from runInteraction threads self._pending_notifications.append( (category,args) ) - def send_notification(self, category, args): + def send_notification(self, category, args): # TODO: remove # in the distributed system, this will be invoked by lineReceived() #print "SEND", category, args for observer in self._subscribers[category]: eventually(observer, category, *args) - def subscribe_to(self, category, observer): + def subscribe_to(self, category, observer): # TODO: remove self._subscribers[category].add(observer) - def runQuery(self, *args, **kwargs): + def runQuery(self, *args, **kwargs): # TODO: remove assert self._started self._pending_operation_count += 1 - d = self._pool.runQuery(*args, **kwargs) + d = self._oldpool.runQuery(*args, **kwargs) return d - def _runQuery_done(self, res, start, t): + def _runQuery_done(self, res, start, t): # TODO: remove self._end_operation(t) self._add_query_time(start) self._pending_operation_count -= 1 return res - def _add_query_time(self, start): + def _add_query_time(self, start): # TODO: remove elapsed = self._getCurrentTime() - start self._query_times.append(elapsed) if len(self._query_times) > self.MAX_QUERY_TIMES: self._query_times.popleft() - def runInteraction(self, *args, **kwargs): + def runInteraction(self, *args, **kwargs): # TODO: remove assert self._started self._pending_operation_count += 1 start = self._getCurrentTime() t = self._start_operation() - d = self._pool.runInteraction(*args, **kwargs) + d = self._oldpool.runInteraction(*args, **kwargs) d.addBoth(self._runInteraction_done, start, t) return d - def _runInteraction_done(self, res, start, t): + def _runInteraction_done(self, res, start, t): # TODO: remove self._end_operation(t) self._add_query_time(start) self._pending_operation_count -= 1 return res - # ChangeManager methods + # old ChangeManager methods def addChangeToDatabase(self, change): self.runInteractionNow(self._txn_addChangeToDatabase, change) @@ -405,6 +367,8 @@ def _txn_getChangeNumberedNow(self, t, changeid): c.number = changeid return c + # note that this is the async version of getChangeNumberedNow .. consistent + # naming is the hobgoblin of hackable software! def getChangeByNumber(self, changeid): # return a Deferred that fires with a Change instance, or None if # there is no Change with that number diff --git a/master/buildbot/db/dbspec.py b/master/buildbot/db/dbspec.py deleted file mode 100644 index 76119c2547e..00000000000 --- a/master/buildbot/db/dbspec.py +++ /dev/null @@ -1,263 +0,0 @@ -# This file is part of Buildbot. Buildbot is free software: you can -# redistribute it and/or modify it under the terms of the GNU General Public -# License as published by the Free Software Foundation, version 2. -# -# This program is distributed in the hope that it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more -# details. -# -# You should have received a copy of the GNU General Public License along with -# this program; if not, write to the Free Software Foundation, Inc., 51 -# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. -# -# Copyright Buildbot Team Members - -import sys, os, cgi, re, time - -from twisted.python import log, reflect -from twisted.enterprise import adbapi - -from buildbot import util - -class ExpiringConnectionPool(adbapi.ConnectionPool): - """ - A Connection pool that expires connections after a certain amount of idle - time. - """ - def __init__(self, dbapiName, max_idle=60, *args, **kwargs): - """ - @param max_idle: reconnect connections that have been idle more than - this number of seconds. - """ - - log.msg("Using expiring pool with max_idle=%i" % max_idle) - - adbapi.ConnectionPool.__init__(self, dbapiName, *args, **kwargs) - self.max_idle = max_idle - - self.connection_lastused = {} - - def connect(self): - tid = self.threadID() - now = util.now() - lastused = self.connection_lastused.get(tid) - if lastused and lastused + self.max_idle < now: - conn = self.connections.get(tid) - if self.noisy: - log.msg("expiring old connection") - self.disconnect(conn) - - conn = adbapi.ConnectionPool.connect(self) - self.connection_lastused[tid] = now - return conn - - def disconnect(self, conn): - adbapi.ConnectionPool.disconnect(self, conn) - tid = self.threadID() - del self.connection_lastused[tid] - -class TimeoutError(Exception): - def __init__(self, msg): - Exception.__init__(self, msg) - -class RetryingCursor: - max_retry_time = 1800 # Half an hour - max_sleep_time = 1 - - def __init__(self, dbapi, cursor): - self.dbapi = dbapi - self.cursor = cursor - - def sleep(self, s): - time.sleep(s) - - def execute(self, *args, **kw): - start_time = util.now() - sleep_time = 0.1 - while True: - try: - query_start_time = util.now() - result = self.cursor.execute(*args, **kw) - end_time = util.now() - if end_time - query_start_time > 2: - log.msg("Long query (%is): %s" % ((end_time - query_start_time), str((args, kw)))) - return result - except self.dbapi.OperationalError, e: - if e.args[0] == 'database is locked': - # Retry - log.msg("Retrying query %s" % str((args, kw))) - now = util.now() - if start_time + self.max_retry_time < now: - raise TimeoutError("Exceeded timeout trying to do %s" % str((args, kw))) - self.sleep(sleep_time) - sleep_time = max(self.max_sleep_time, sleep_time * 2) - continue - raise - - def __getattr__(self, name): - return getattr(self.cursor, name) - -class RetryingConnection: - def __init__(self, dbapi, conn): - self.dbapi = dbapi - self.conn = conn - - def cursor(self): - return RetryingCursor(self.dbapi, self.conn.cursor()) - - def __getattr__(self, name): - return getattr(self.conn, name) - -class RetryingConnectionPool(adbapi.ConnectionPool): - def connect(self): - return RetryingConnection(self.dbapi, adbapi.ConnectionPool.connect(self)) - -class DBSpec(object): - """ - A specification for the database type and other connection parameters. - """ - - # List of connkw arguments that are applicable to the connection pool only - pool_args = ["max_idle"] - def __init__(self, dbapiName, *connargs, **connkw): - # special-case 'sqlite3', replacing it with the available implementation - if dbapiName == 'sqlite3': - dbapiName = self._get_sqlite_dbapi_name() - - self.dbapiName = dbapiName - self.connargs = connargs - self.connkw = connkw - - @classmethod - def from_url(cls, url, basedir=None): - """ - Parses a URL of the format - driver://[username:password@]host:port/database[?args] - and returns a DB object representing this URL. Percent- - substitution will be performed, replacing %(basedir)s with - the basedir argument. - - raises ValueError on an invalid URL. - """ - match = re.match(r""" - ^(?P\w+):// - ( - ((?P\w+)(:(?P\S+))?@)? - ((?P[-A-Za-z0-9.]+)(:(?P\d+))?)?/ - (?P\S+?)(\?(?P.*))? - )?$""", url, re.X) - if not match: - raise ValueError("Malformed url") - - d = match.groupdict() - driver = d['driver'] - user = d['user'] - passwd = d['passwd'] - host = d['host'] - port = d['port'] - if port is not None: - port = int(port) - database = d['database'] - args = {} - if d['args']: - for key, value in cgi.parse_qsl(d['args']): - args[key] = value - - if driver == "sqlite": - # user, passwd, host, and port must all be None - if not user == passwd == host == port == None: - raise ValueError("user, passwd, host, port must all be None") - if not database: - database = ":memory:" - elif basedir: - database = database % dict(basedir=basedir) - database = os.path.join(basedir, database) - return cls("sqlite3", database, **args) - elif driver == "mysql": - args['host'] = host - args['db'] = database - if user: - args['user'] = user - if passwd: - args['passwd'] = passwd - if port: - args['port'] = port - if 'max_idle' in args: - args['max_idle'] = int(args['max_idle']) - - return cls("MySQLdb", use_unicode=True, charset="utf8", **args) - else: - raise ValueError("Unsupported dbapi %s" % driver) - - def _get_sqlite_dbapi_name(self): - # see which dbapi we can use and return that name; prefer - # pysqlite2.dbapi2 if it is available. - sqlite_dbapi_name = None - try: - from pysqlite2 import dbapi2 as sqlite3 - assert sqlite3 - sqlite_dbapi_name = "pysqlite2.dbapi2" - except ImportError: - # don't use built-in sqlite3 on 2.5 -- it has *bad* bugs - if sys.version_info >= (2,6): - import sqlite3 - assert sqlite3 - sqlite_dbapi_name = "sqlite3" - else: - raise - return sqlite_dbapi_name - - def get_dbapi(self): - """ - Get the dbapi module used for this connection (for things like - exceptions and module-global attributes - """ - return reflect.namedModule(self.dbapiName) - - def get_sync_connection(self): - """ - Get a synchronous connection to the specified database. This returns - a simple DBAPI connection object. - """ - dbapi = self.get_dbapi() - connkw = self.connkw.copy() - for arg in self.pool_args: - if arg in connkw: - del connkw[arg] - conn = dbapi.connect(*self.connargs, **connkw) - if 'sqlite' in self.dbapiName: - conn = RetryingConnection(dbapi, conn) - return conn - - def get_async_connection_pool(self): - """ - Get an asynchronous (adbapi) connection pool for the specified - database. - """ - - # add some connection keywords - connkw = self.connkw.copy() - connkw["cp_reconnect"] = True - connkw["cp_noisy"] = True - - # This disables sqlite's obsessive checks that a given connection is - # only used in one thread; this is justified by the Twisted ticket - # regarding the errors you get on connection shutdown if you do *not* - # add this parameter: http://twistedmatrix.com/trac/ticket/3629 - if 'sqlite' in self.dbapiName: - connkw['check_same_thread'] = False - log.msg("creating adbapi pool: %s %s %s" % \ - (self.dbapiName, self.connargs, connkw)) - - # MySQL needs support for expiring idle connections - if self.dbapiName == 'MySQLdb': - return ExpiringConnectionPool(self.dbapiName, *self.connargs, **connkw) - else: - return RetryingConnectionPool(self.dbapiName, *self.connargs, **connkw) - - def get_maxidle(self): - default = None - if self.dbapiName == "MySQLdb": - default = 60 - return self.connkw.get("max_idle", default) diff --git a/master/buildbot/db/migrate/README b/master/buildbot/db/migrate/README new file mode 100644 index 00000000000..6218f8cac42 --- /dev/null +++ b/master/buildbot/db/migrate/README @@ -0,0 +1,4 @@ +This is a database migration repository. + +More information at +http://code.google.com/p/sqlalchemy-migrate/ diff --git a/master/buildbot/db/migrate/migrate.cfg b/master/buildbot/db/migrate/migrate.cfg new file mode 100644 index 00000000000..d63b6835711 --- /dev/null +++ b/master/buildbot/db/migrate/migrate.cfg @@ -0,0 +1,20 @@ +[db_settings] +# Used to identify which repository this database is versioned under. +# You can use the name of your project. +repository_id=Buildbot + +# The name of the database table used to track the schema version. +# This name shouldn't already be used by your project. +# If this is changed once a database is under version control, you'll need to +# change the table name in each database too. +version_table=migrate_version + +# When committing a change script, Migrate will attempt to generate the +# sql for all supported databases; normally, if one of them fails - probably +# because you don't have that database installed - it is ignored and the +# commit continues, perhaps ending successfully. +# Databases in this list MUST compile successfully during a commit, or the +# entire commit will fail. List the databases your application will actually +# be using to ensure your updates to that database work properly. +# This must be a list; example: ['postgres','sqlite'] +required_dbs=[] diff --git a/master/buildbot/db/migrate/versions/001_initial.py b/master/buildbot/db/migrate/versions/001_initial.py new file mode 100644 index 00000000000..6cc80ab4cf2 --- /dev/null +++ b/master/buildbot/db/migrate/versions/001_initial.py @@ -0,0 +1,279 @@ +# This file is part of Buildbot. Buildbot is free software: you can +# redistribute it and/or modify it under the terms of the GNU General Public +# License as published by the Free Software Foundation, version 2. +# +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more +# details. +# +# You should have received a copy of the GNU General Public License along with +# this program; if not, write to the Free Software Foundation, Inc., 51 +# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Copyright Buildbot Team Members + +import os +import cPickle +from twisted.persisted import styles +from buildbot.util import json +import sqlalchemy as sa + +metadata = sa.MetaData() + +last_access = sa.Table('last_access', metadata, + sa.Column('who', sa.String(256), nullable=False), + sa.Column('writing', sa.Integer, nullable=False), + sa.Column('last_access', sa.Integer, nullable=False), +) + +changes_nextid = sa.Table('changes_nextid', metadata, + sa.Column('next_changeid', sa.Integer), +) + +changes = sa.Table('changes', metadata, + sa.Column('changeid', sa.Integer, autoincrement=False, primary_key=True), + sa.Column('author', sa.String(1024), nullable=False), + sa.Column('comments', sa.String(1024), nullable=False), + sa.Column('is_dir', sa.SmallInteger, nullable=False), + sa.Column('branch', sa.String(1024)), + sa.Column('revision', sa.String(256)), + sa.Column('revlink', sa.String(256)), + sa.Column('when_timestamp', sa.Integer, nullable=False), + sa.Column('category', sa.String(256)), +) + +change_links = sa.Table('change_links', metadata, + sa.Column('changeid', sa.Integer, sa.ForeignKey('changes.changeid'), nullable=False), + sa.Column('link', sa.String(1024), nullable=False), +) + +change_files = sa.Table('change_files', metadata, + sa.Column('changeid', sa.Integer, sa.ForeignKey('changes.changeid'), nullable=False), + sa.Column('filename', sa.String(1024), nullable=False), +) + +change_properties = sa.Table('change_properties', metadata, + sa.Column('changeid', sa.Integer, sa.ForeignKey('changes.changeid'), nullable=False), + sa.Column('property_name', sa.String(256), nullable=False), + sa.Column('property_value', sa.String(1024), nullable=False), +) + +schedulers = sa.Table("schedulers", metadata, + sa.Column('schedulerid', sa.Integer, autoincrement=False, primary_key=True), + sa.Column('name', sa.String(128), nullable=False), + sa.Column('state', sa.String(1024), nullable=False), +) + +scheduler_changes = sa.Table('scheduler_changes', metadata, + sa.Column('schedulerid', sa.Integer, sa.ForeignKey('schedulers.schedulerid')), + sa.Column('changeid', sa.Integer, sa.ForeignKey('changes.changeid')), + sa.Column('important', sa.SmallInteger), +) + +scheduler_upstream_buildsets = sa.Table('scheduler_upstream_buildsets', metadata, + sa.Column('buildsetid', sa.Integer, sa.ForeignKey('buildsets.id')), + sa.Column('schedulerid', sa.Integer, sa.ForeignKey('schedulers.schedulerid')), + sa.Column('active', sa.SmallInteger), +) + +sourcestamps = sa.Table('sourcestamps', metadata, + sa.Column('id', sa.Integer, autoincrement=False, primary_key=True), + sa.Column('branch', sa.String(256), server_default=sa.DefaultClause("NULL")), + sa.Column('revision', sa.String(256), server_default=sa.DefaultClause("NULL")), + sa.Column('patchid', sa.Integer, sa.ForeignKey('patches.id'), server_default=sa.DefaultClause("NULL")), +) + +patches = sa.Table('patches', metadata, + sa.Column('id', sa.Integer, autoincrement=False, primary_key=True), + sa.Column('patchlevel', sa.Integer, nullable=False), + sa.Column('patch_base64', sa.Text, nullable=False), + sa.Column('subdir', sa.Text), +) + +sourcestamp_changes = sa.Table('sourcestamp_changes', metadata, + sa.Column('sourcestampid', sa.Integer, sa.ForeignKey('sourcestamps.id'), nullable=False), + sa.Column('changeid', sa.Integer, sa.ForeignKey('changes.changeid'), nullable=False), +) + +buildsets = sa.Table('buildsets', metadata, + sa.Column('id', sa.Integer, autoincrement=False, primary_key=True), + sa.Column('external_idstring', sa.String(256)), + sa.Column('reason', sa.String(256)), + sa.Column('sourcestampid', sa.Integer, sa.ForeignKey('sourcestamps.id'), nullable=False), + sa.Column('submitted_at', sa.Integer, nullable=False), + sa.Column('complete', sa.SmallInteger, nullable=False, server_default=sa.DefaultClause("0")), + sa.Column('complete_at', sa.Integer), + sa.Column('results', sa.SmallInteger), +) + +buildset_properties = sa.Table('buildset_properties', metadata, + sa.Column('buildsetid', sa.Integer, sa.ForeignKey('buildsets.id'), nullable=False), + sa.Column('property_name', sa.String(256), nullable=False), + sa.Column('property_value', sa.String(1024), nullable=False), +) + +buildrequests = sa.Table('buildrequests', metadata, + sa.Column('id', sa.Integer, autoincrement=False, primary_key=True), + sa.Column('buildsetid', sa.Integer, sa.ForeignKey("buildsets.id"), nullable=False), + sa.Column('buildername', sa.String(length=None), nullable=False), + sa.Column('priority', sa.Integer, nullable=False, server_default=sa.DefaultClause("0")), + sa.Column('claimed_at', sa.Integer, server_default=sa.DefaultClause("0")), + sa.Column('claimed_by_name', sa.String(length=None), server_default=sa.DefaultClause("NULL")), + sa.Column('claimed_by_incarnation', sa.String(length=None), server_default=sa.DefaultClause("NULL")), + sa.Column('complete', sa.Integer, server_default=sa.DefaultClause("0")), + sa.Column('results', sa.SmallInteger), + sa.Column('submitted_at', sa.Integer, nullable=False), + sa.Column('complete_at', sa.Integer), +) + +builds = sa.Table('builds', metadata, + sa.Column('id', sa.Integer, autoincrement=False, primary_key=True), + sa.Column('number', sa.Integer, nullable=False), + sa.Column('brid', sa.Integer, sa.ForeignKey('buildrequests.id'), nullable=False), + sa.Column('start_time', sa.Integer, nullable=False), + sa.Column('finish_time', sa.Integer), +) + +def test_unicode(migrate_engine): + """Test that the database can handle inserting and selecting Unicode""" + # set up a subsidiary MetaData object to hold this temporary table + submeta = sa.MetaData() + submeta.bind = migrate_engine + + test_unicode = sa.Table('test_unicode', submeta, + sa.Column('u', sa.Unicode), + sa.Column('b', sa.BLOB), + ) + test_unicode.create() + + # insert a unicode value in there + u = u"Frosty the \N{SNOWMAN}" + b='\xff\xff\x00' + ins = test_unicode.insert().values(u=u, b=b) + migrate_engine.execute(ins) + + # see if the data is intact + row = migrate_engine.execute(sa.select([test_unicode])).fetchall()[0] + assert type(row['u']) is unicode + assert row['u'] == u + assert type(row['b']) is str + assert row['b'] == b + + # drop the test table + test_unicode.drop() + +def import_changes(migrate_engine): + # get the basedir from the engine - see model.py if you're wondering + # how it got there + basedir = migrate_engine.buildbot_basedir + + # strip None from any of these values, just in case + def remove_none(x): + if x is None: return u"" + elif isinstance(x, str): + return x.decode("utf8") + else: + return x + + # if we still have a changes.pck, then we need to migrate it + changes_pickle = os.path.join(basedir, "changes.pck") + if not os.path.exists(changes_pickle): + migrate_engine.execute(changes_nextid.insert(), + next_changeid=1) + return + + #if not quiet: print "migrating changes.pck to database" + + # 'source' will be an old b.c.changes.ChangeMaster instance, with a + # .changes attribute + source = cPickle.load(open(changes_pickle,"rb")) + styles.doUpgrade() + + #if not quiet: print " (%d Change objects)" % len(source.changes) + + # first, scan for changes without a number. If we find any, then we'll + # renumber the changes sequentially + have_unnumbered = False + for c in source.changes: + if c.revision and c.number is None: + have_unnumbered = True + break + if have_unnumbered: + n = 1 + for c in source.changes: + if c.revision: + c.number = n + n = n + 1 + + # insert the changes + for c in source.changes: + if not c.revision: + continue + try: + values = dict( + changeid=c.number, + author=c.who, + comments=c.comments, + is_dir=c.isdir, + branch=c.branch, + revision=c.revision, + revlink=c.revlink, + when_timestamp=c.when, + category=c.category) + values = dict([ (k, remove_none(v)) for k, v in values.iteritems() ]) + except UnicodeDecodeError, e: + raise UnicodeError("Trying to import change data as UTF-8 failed. Please look at contrib/fix_changes_pickle_encoding.py: %s" % str(e)) + + migrate_engine.execute(changes.insert(), **values) + + for link in c.links: + migrate_engine.execute(change_links.insert(), + changeid=c.number, link=link) + + # sometimes c.files contains nested lists -- why, I do not know! But we deal with + # it all the same - see bug #915. We'll assume for now that c.files contains *either* + # lists of filenames or plain filenames, not both. + def flatten(l): + if l and type(l[0]) == list: + rv = [] + for e in l: + if type(e) == list: + rv.extend(e) + else: + rv.append(e) + return rv + else: + return l + for filename in flatten(c.files): + migrate_engine.execute(change_files.insert(), + changeid=c.number, + filename=filename) + + for propname,propvalue in c.properties.properties.items(): + encoded_value = json.dumps(propvalue) + migrate_engine.execute(change_properties.insert(), + changeid=c.number, + property_name=propname, + property_value=encoded_value) + + # update next_changeid + max_changeid = max([ c.number for c in source.changes if c.revision ] + [ 0 ]) + migrate_engine.execute(changes_nextid.insert(), + next_changeid=max_changeid+1) + + #if not quiet: + # print "moving changes.pck to changes.pck.old; delete it or keep it as a backup" + os.rename(changes_pickle, changes_pickle+".old") + +def upgrade(migrate_engine): + metadata.bind = migrate_engine + + # do some tests before getting started + test_unicode(migrate_engine) + + # create the initial schema + metadata.create_all() + + # and import some changes + import_changes(migrate_engine) diff --git a/master/buildbot/db/migrate/versions/002_add_proj_repo.py b/master/buildbot/db/migrate/versions/002_add_proj_repo.py new file mode 100644 index 00000000000..b2a300ebb86 --- /dev/null +++ b/master/buildbot/db/migrate/versions/002_add_proj_repo.py @@ -0,0 +1,30 @@ +# This file is part of Buildbot. Buildbot is free software: you can +# redistribute it and/or modify it under the terms of the GNU General Public +# License as published by the Free Software Foundation, version 2. +# +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more +# details. +# +# You should have received a copy of the GNU General Public License along with +# this program; if not, write to the Free Software Foundation, Inc., 51 +# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Copyright Buildbot Team Members + +import sqlalchemy as sa + +def upgrade(migrate_engine): + metadata = sa.MetaData() + metadata.bind = migrate_engine + + # add project and repository columns to 'changes' an 'sourcestamps' + def add_cols(table): + repository = sa.Column('repository', sa.Text, nullable=False, server_default=sa.DefaultClause('')) + repository.create(table, populate_default=True) + project = sa.Column('project', sa.Text, nullable=False, server_default=sa.DefaultClause('')) + project.create(table, populate_default=True) + + add_cols(sa.Table('changes', metadata, autoload=True)) + add_cols(sa.Table('sourcestamps', metadata, autoload=True)) diff --git a/master/buildbot/db/migrate/versions/003_scheduler_class_name.py b/master/buildbot/db/migrate/versions/003_scheduler_class_name.py new file mode 100644 index 00000000000..a11bf062eda --- /dev/null +++ b/master/buildbot/db/migrate/versions/003_scheduler_class_name.py @@ -0,0 +1,29 @@ +# This file is part of Buildbot. Buildbot is free software: you can +# redistribute it and/or modify it under the terms of the GNU General Public +# License as published by the Free Software Foundation, version 2. +# +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more +# details. +# +# You should have received a copy of the GNU General Public License along with +# this program; if not, write to the Free Software Foundation, Inc., 51 +# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Copyright Buildbot Team Members + +import sqlalchemy as sa + +def upgrade(migrate_engine): + metadata = sa.MetaData() + metadata.bind = migrate_engine + + # add an empty class_name to the schedulers table + schedulers = sa.Table('schedulers', metadata, autoload=True) + class_name = sa.Column('class_name', sa.Text, nullable=False, server_default=sa.DefaultClause('')) + class_name.create(schedulers, populate_default=True) + + # and an index since we'll be selecting with (name= AND class=) + idx = sa.Index('name_and_class', schedulers.c.name, schedulers.c.class_name) + idx.create(migrate_engine) diff --git a/master/buildbot/db/migrate/versions/004_add_autoincrement.py b/master/buildbot/db/migrate/versions/004_add_autoincrement.py new file mode 100644 index 00000000000..53a6d93a74e --- /dev/null +++ b/master/buildbot/db/migrate/versions/004_add_autoincrement.py @@ -0,0 +1,73 @@ +# This file is part of Buildbot. Buildbot is free software: you can +# redistribute it and/or modify it under the terms of the GNU General Public +# License as published by the Free Software Foundation, version 2. +# +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more +# details. +# +# You should have received a copy of the GNU General Public License along with +# this program; if not, write to the Free Software Foundation, Inc., 51 +# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Copyright Buildbot Team Members + +import sqlalchemy as sa + +def upgrade(migrate_engine): + metadata = sa.MetaData() + metadata.bind = migrate_engine + + # re-include some of the relevant tables, as they were in version 3, since + # sqlalchemy's reflection doesn't work very well for defaults + + sa.Table("schedulers", metadata, + sa.Column('schedulerid', sa.Integer, autoincrement=False, primary_key=True), + sa.Column('name', sa.String(128), nullable=False), + sa.Column('state', sa.String(1024), nullable=False), + sa.Column('class_name', sa.Text, nullable=False, server_default=sa.DefaultClause('')) + ) + + sa.Table('changes', metadata, + sa.Column('changeid', sa.Integer, autoincrement=False, primary_key=True), + sa.Column('author', sa.String(1024), nullable=False), + sa.Column('comments', sa.String(1024), nullable=False), + sa.Column('is_dir', sa.SmallInteger, nullable=False), + sa.Column('branch', sa.String(1024)), + sa.Column('revision', sa.String(256)), + sa.Column('revlink', sa.String(256)), + sa.Column('when_timestamp', sa.Integer, nullable=False), + sa.Column('category', sa.String(256)), + sa.Column('repository', sa.Text, nullable=False, server_default=sa.DefaultClause('')), + sa.Column('project', sa.Text, nullable=False, server_default=sa.DefaultClause('')), + ) + + sa.Table('sourcestamps', metadata, + sa.Column('id', sa.Integer, autoincrement=False, primary_key=True), + sa.Column('branch', sa.String(256), server_default=sa.DefaultClause("NULL")), + sa.Column('revision', sa.String(256), server_default=sa.DefaultClause("NULL")), + sa.Column('patchid', sa.Integer, sa.ForeignKey('patches.id'), server_default=sa.DefaultClause("NULL")), + sa.Column('repository', sa.Text, nullable=False, server_default=''), + sa.Column('project', sa.Text, nullable=False, server_default=''), + ) + + to_autoinc = [ s.split(".") for s in + "schedulers.schedulerid", + "builds.id", + "changes.changeid", + "buildrequests.id", + "buildsets.id", + "patches.id", + "sourcestamps.id", + ] + + for table_name, col_name in to_autoinc: + table = sa.Table(table_name, metadata, autoload=True) + col = table.c[col_name] + col.alter(autoincrement=True) + + # also drop the changes_nextid table here (which really should have been a + # sequence..) + table = sa.Table('changes_nextid', metadata, autoload=True) + table.drop() diff --git a/master/buildbot/db/migrate/versions/005_add_indexes.py b/master/buildbot/db/migrate/versions/005_add_indexes.py new file mode 100644 index 00000000000..2a98085a638 --- /dev/null +++ b/master/buildbot/db/migrate/versions/005_add_indexes.py @@ -0,0 +1,62 @@ +# This file is part of Buildbot. Buildbot is free software: you can +# redistribute it and/or modify it under the terms of the GNU General Public +# License as published by the Free Software Foundation, version 2. +# +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more +# details. +# +# You should have received a copy of the GNU General Public License along with +# this program; if not, write to the Free Software Foundation, Inc., 51 +# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Copyright Buildbot Team Members + +import sqlalchemy as sa + +def upgrade(migrate_engine): + metadata = sa.MetaData() + metadata.bind = migrate_engine + metadata.reflect() + + def add_index(table_name, col_name): + idx_name = "%s_%s" % (table_name, col_name) + idx = sa.Index(idx_name, metadata.tables[table_name].c[col_name]) + idx.create(migrate_engine) + add_index("buildrequests", "buildsetid") + add_index("buildrequests", "buildername") + add_index("buildrequests", "complete") + add_index("buildrequests", "claimed_at") + add_index("buildrequests", "claimed_by_name") + + add_index("builds", "number") + add_index("builds", "brid") + + add_index("buildsets", "complete") + add_index("buildsets", "submitted_at") + + add_index("buildset_properties", "buildsetid") + + add_index("changes", "branch") + add_index("changes", "revision") + add_index("changes", "author") + add_index("changes", "category") + add_index("changes", "when_timestamp") + + add_index("change_files", "changeid") + add_index("change_links", "changeid") + add_index("change_properties", "changeid") + + # schedulers already has an index + + add_index("scheduler_changes", "schedulerid") + add_index("scheduler_changes", "changeid") + + add_index("scheduler_upstream_buildsets", "buildsetid") + add_index("scheduler_upstream_buildsets", "schedulerid") + add_index("scheduler_upstream_buildsets", "active") + + # sourcestamps are only queried by id, no need for additional indexes + + add_index("sourcestamp_changes", "sourcestampid") diff --git a/master/buildbot/db/schema/v6.py b/master/buildbot/db/migrate/versions/006_drop_last_access.py similarity index 73% rename from master/buildbot/db/schema/v6.py rename to master/buildbot/db/migrate/versions/006_drop_last_access.py index 9beaba1e631..fc8077148cf 100644 --- a/master/buildbot/db/schema/v6.py +++ b/master/buildbot/db/migrate/versions/006_drop_last_access.py @@ -13,11 +13,11 @@ # # Copyright Buildbot Team Members -from buildbot.db.schema import base +import sqlalchemy as sa -class Upgrader(base.Upgrader): - def upgrade(self): - cursor = self.conn.cursor() - cursor.execute("DROP table last_access") - cursor.execute("""UPDATE version set version = 6 where version = 5""") +def upgrade(migrate_engine): + metadata = sa.MetaData() + metadata.bind = migrate_engine + table = sa.Table('last_access', metadata, autoload=True) + table.drop() diff --git a/master/buildbot/db/schema/__init__.py b/master/buildbot/db/migrate/versions/__init__.py old mode 100644 new mode 100755 similarity index 100% rename from master/buildbot/db/schema/__init__.py rename to master/buildbot/db/migrate/versions/__init__.py diff --git a/master/buildbot/db/model.py b/master/buildbot/db/model.py new file mode 100644 index 00000000000..59aef4fd6da --- /dev/null +++ b/master/buildbot/db/model.py @@ -0,0 +1,405 @@ +# This file is part of Buildbot. Buildbot is free software: you can +# redistribute it and/or modify it under the terms of the GNU General Public +# License as published by the Free Software Foundation, version 2. +# +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more +# details. +# +# You should have received a copy of the GNU General Public License along with +# this program; if not, write to the Free Software Foundation, Inc., 51 +# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Copyright Buildbot Team Members + +""" +Storage for the database model (schema) +""" + +import sqlalchemy as sa +import migrate +import migrate.versioning.schema +import migrate.versioning.repository +import migrate.versioning.exceptions +from twisted.python import util, log +from buildbot.db import base + +class Model(base.DBConnectorComponent): + """ + DBConnector component to handle the database model; an instance is available + at C{master.db.model}. + + This class has attributes for each defined table, as well as methods to + handle schema migration (using sqlalchemy-migrate). View the source to see + the table definitions. + + Note that the Buildbot metadata is never bound to an engine, since that might + lead users to execute queries outside of the thread pool. + """ + + # + # schema + # + + metadata = sa.MetaData() + + # NOTES + # + # * always use default=sa.DefaultClause(..) instead of default=.., so that we can add + # non-null columns with a server-side default value. + + # build requests + + buildrequests = sa.Table('buildrequests', metadata, + sa.Column('id', sa.Integer, primary_key=True), + sa.Column('buildsetid', sa.Integer, sa.ForeignKey("buildsets.id"), nullable=False), + sa.Column('buildername', sa.String(length=None), nullable=False), + sa.Column('priority', sa.Integer, nullable=False, server_default=sa.DefaultClause("NULL")), + + # claimed_at is the time at which a master most recently asserted that + # it is responsible for running the build: this will be updated + # periodically to maintain the claim. Note that 0 and NULL mean the + # same thing here (and not 1969!) + sa.Column('claimed_at', sa.Integer, server_default=sa.DefaultClause("0")), # TODO: timestamp + + # claimed_by indicates which buildmaster has claimed this request. The + # 'name' contains hostname/basedir, and will be the same for subsequent + # runs of any given buildmaster. The 'incarnation' contains bootime/pid, + # and will be different for subsequent runs. This allows each buildmaster + # to distinguish their current claims, their old claims, and the claims + # of other buildmasters, to treat them each appropriately. + sa.Column('claimed_by_name', sa.String(length=None), server_default=sa.DefaultClause("NULL")), + sa.Column('claimed_by_incarnation', sa.String(length=None), server_default=sa.DefaultClause("NULL")), + + # if this is zero, then the build is still pending + sa.Column('complete', sa.Integer, server_default=sa.DefaultClause("0")), # TODO: boolean + + # results is only valid when complete == 1; 0 = SUCCESS, 1 = WARNINGS, + # etc - see master/buildbot/status/builder.py + sa.Column('results', sa.SmallInteger), + + # time the buildrequest was created + sa.Column('submitted_at', sa.Integer, nullable=False), # TODO: timestamp + + # time the buildrequest was completed, or NULL + sa.Column('complete_at', sa.Integer), # TODO: timestamp + ) + """A BuildRequest is a request for a particular build to be performed. + Each BuildRequest is a part of a BuildSet. BuildRequests are claimed by + masters, to avoid multiple masters running the same build.""" + + # builds + + builds = sa.Table('builds', metadata, + sa.Column('id', sa.Integer, primary_key=True), + + # XXX + # the build number is local to the builder and (maybe?) the buildmaster + sa.Column('number', sa.Integer, nullable=False), + + sa.Column('brid', sa.Integer, sa.ForeignKey('buildrequests.id'), nullable=False), + sa.Column('start_time', sa.Integer, nullable=False), + sa.Column('finish_time', sa.Integer), + ) + """This table contains basic information about each build. Note that most data + about a build is still stored in on-disk pickles.""" + + # buildsets + + buildset_properties = sa.Table('buildset_properties', metadata, + sa.Column('buildsetid', sa.Integer, sa.ForeignKey('buildsets.id'), nullable=False), + sa.Column('property_name', sa.String(256), nullable=False), + # JSON-encoded property value + sa.Column('property_value', sa.String(1024), nullable=False), # TODO: too short? + ) + """This table contains input properties for buildsets""" + + buildsets = sa.Table('buildsets', metadata, + sa.Column('id', sa.Integer, primary_key=True), + sa.Column('external_idstring', sa.String(256)), # TODO: what is this?? + sa.Column('reason', sa.String(256)), # TODO: sa.Text + sa.Column('sourcestampid', sa.Integer, sa.ForeignKey('sourcestamps.id'), nullable=False), + sa.Column('submitted_at', sa.Integer, nullable=False), # TODO: timestamp (or redundant?) + + # if this is zero, then the build set is still pending + sa.Column('complete', sa.SmallInteger, nullable=False, server_default=sa.DefaultClause("0")), # TODO: redundant + sa.Column('complete_at', sa.Integer), # TODO: timestamp (or redundant?) + + # results is only valid when complete == 1; 0 = SUCCESS, 1 = WARNINGS, + # etc - see master/buildbot/status/builder.py + sa.Column('results', sa.SmallInteger), # TODO: synthesize from buildrequests + ) + """This table represents BuildSets - sets of BuildRequests that share the same + original cause an source information.""" + + # changes + + change_files = sa.Table('change_files', metadata, + sa.Column('changeid', sa.Integer, sa.ForeignKey('changes.changeid'), nullable=False), + sa.Column('filename', sa.String(1024), nullable=False), # TODO: sa.Text + ) + """Files touched in changes""" + + change_links = sa.Table('change_links', metadata, + sa.Column('changeid', sa.Integer, sa.ForeignKey('changes.changeid'), nullable=False), + sa.Column('link', sa.String(1024), nullable=False), # TODO: sa.Text + ) + """Links (URLs) for changes""" + + change_properties = sa.Table('change_properties', metadata, + sa.Column('changeid', sa.Integer, sa.ForeignKey('changes.changeid'), nullable=False), + sa.Column('property_name', sa.String(256), nullable=False), + # JSON-encoded property value + sa.Column('property_value', sa.String(1024), nullable=False), # TODO: too short? + ) + """Properties for changes""" + + changes = sa.Table('changes', metadata, + # changeid also serves as 'change number' + sa.Column('changeid', sa.Integer, primary_key=True), # TODO: rename to 'id' + + # author's name (usually an email address) + sa.Column('author', sa.String(1024), nullable=False), + + # commit comment + sa.Column('comments', sa.String(1024), nullable=False), # TODO: too short? + + # old, CVS-related boolean + sa.Column('is_dir', sa.SmallInteger, nullable=False), # old, for CVS + + # The branch where this change occurred. When branch is NULL, that + # means the main branch (trunk, master, etc.) + sa.Column('branch', sa.String(1024)), + + # revision identifier for this change + sa.Column('revision', sa.String(256)), # CVS uses NULL + + # ?? (TODO) + sa.Column('revlink', sa.String(256)), + + # this is the timestamp of the change - it is usually copied from the + # version-control system, and may be long in the past or even in the + # future! + sa.Column('when_timestamp', sa.Integer, nullable=False), + + # an arbitrary string used for filtering changes + sa.Column('category', sa.String(256)), + + # repository specifies, along with revision and branch, the + # source tree in which this change was detected. + sa.Column('repository', sa.Text, nullable=False, server_default=''), + + # project names the project this source code represents. It is used + # later to filter changes + sa.Column('project', sa.Text, nullable=False, server_default=''), + ) + """Changes to the source code, produced by ChangeSources""" + + # sourcestamps + + patches = sa.Table('patches', metadata, + sa.Column('id', sa.Integer, primary_key=True), + + # number of directory levels to strip off (patch -pN) + sa.Column('patchlevel', sa.Integer, nullable=False), + + # base64-encoded version of the patch file + sa.Column('patch_base64', sa.Text, nullable=False), + + # subdirectory in which the patch should be applied; NULL for top-level + sa.Column('subdir', sa.Text), + ) + """Patches for SourceStamps that were generated through the try mechanism""" + + sourcestamp_changes = sa.Table('sourcestamp_changes', metadata, + sa.Column('sourcestampid', sa.Integer, sa.ForeignKey('sourcestamps.id'), nullable=False), + sa.Column('changeid', sa.Integer, sa.ForeignKey('changes.changeid'), nullable=False), + ) + """The changes that led up to a particular source stamp.""" + # TODO: changes should be the result of the difference of two sourcestamps! + + sourcestamps = sa.Table('sourcestamps', metadata, + sa.Column('id', sa.Integer, primary_key=True), + + # the branch to check out. When branch is NULL, that means + # the main branch (trunk, master, etc.) + sa.Column('branch', sa.String(256), server_default=sa.DefaultClause("NULL")), + + # the revision to check out, or the latest if NULL + sa.Column('revision', sa.String(256), server_default=sa.DefaultClause("NULL")), + + # the patch to apply to generate this source code + sa.Column('patchid', sa.Integer, sa.ForeignKey('patches.id'), server_default=sa.DefaultClause("NULL")), + + # the repository from which this source should be checked out + sa.Column('repository', sa.Text(length=None), nullable=False, server_default=''), + + # the project this source code represents + sa.Column('project', sa.Text(length=None), nullable=False, server_default=''), + ) + """A sourcestamp identifies a particular instance of the source code. + Ideally, this would always be absolute, but in practice source stamps can + also mean "latest" (when revision is NULL), which is of course a + time-dependent definition.""" + + # schedulers + + scheduler_changes = sa.Table('scheduler_changes', metadata, + sa.Column('schedulerid', sa.Integer, sa.ForeignKey('schedulers.schedulerid')), + sa.Column('changeid', sa.Integer, sa.ForeignKey('changes.changeid')), + # true if this change is important to this scheduler + sa.Column('important', sa.SmallInteger), # TODO: Boolean + ) + """This table references "classified" changes that have not yet been "processed". + That is, the scheduler has looked at these changes and determined that + something should be done, but that hasn't happened yet. Rows are deleted + from this table as soon as the scheduler is done with the change.""" + + scheduler_upstream_buildsets = sa.Table('scheduler_upstream_buildsets', metadata, + sa.Column('buildsetid', sa.Integer, sa.ForeignKey('buildsets.id')), + sa.Column('schedulerid', sa.Integer, sa.ForeignKey('schedulers.schedulerid')), + # true if this buildset is still active + sa.Column('active', sa.SmallInteger), # TODO: redundant + ) + """This table references buildsets in which a particular scheduler is + interested. On every run, a scheduler checks its upstream buildsets for + completion and reacts accordingly. Records are never deleted from this + table, but active is set to 0 when the record is no longer necessary.""" + # TODO: delete records eventually + + schedulers = sa.Table("schedulers", metadata, + # unique ID for scheduler + sa.Column('schedulerid', sa.Integer, primary_key=True), # TODO: rename to id + # scheduler's name in master.cfg + sa.Column('name', sa.String(128), nullable=False), + # JSON-encoded state for this scheduler + sa.Column('state', sa.String(1024), nullable=False), + # scheduler's class name, basically representing a "type" for the state + sa.Column('class_name', sa.String(128), nullable=False), + ) + """This table records the "state" for each scheduler. This state is, at least, + the last change that was analyzed, but is stored in an opaque JSON object. + Note that schedulers are never deleted.""" + # TODO: delete records eventually + + # indexes + + sa.Index('name_and_class', schedulers.c.name, schedulers.c.class_name) + sa.Index('buildrequests_buildsetid', buildrequests.c.buildsetid) + sa.Index('buildrequests_buildername', buildrequests.c.buildername) + sa.Index('buildrequests_complete', buildrequests.c.complete) + sa.Index('buildrequests_claimed_at', buildrequests.c.claimed_at) + sa.Index('buildrequests_claimed_by_name', buildrequests.c.claimed_by_name) + sa.Index('builds_number', builds.c.number) + sa.Index('builds_brid', builds.c.brid) + sa.Index('buildsets_complete', buildsets.c.complete) + sa.Index('buildsets_submitted_at', buildsets.c.submitted_at) + sa.Index('buildset_properties_buildsetid', buildset_properties.c.buildsetid) + sa.Index('changes_branch', changes.c.branch) + sa.Index('changes_revision', changes.c.revision) + sa.Index('changes_author', changes.c.author) + sa.Index('changes_category', changes.c.category) + sa.Index('changes_when_timestamp', changes.c.when_timestamp) + sa.Index('change_files_changeid', change_files.c.changeid) + sa.Index('change_links_changeid', change_links.c.changeid) + sa.Index('change_properties_changeid', change_properties.c.changeid) + sa.Index('scheduler_changes_schedulerid', scheduler_changes.c.schedulerid) + sa.Index('scheduler_changes_changeid', scheduler_changes.c.changeid) + sa.Index('scheduler_upstream_buildsets_buildsetid', scheduler_upstream_buildsets.c.buildsetid) + sa.Index('scheduler_upstream_buildsets_schedulerid', scheduler_upstream_buildsets.c.schedulerid) + sa.Index('scheduler_upstream_buildsets_active', scheduler_upstream_buildsets.c.active) + sa.Index('sourcestamp_changes_sourcestampid', sourcestamp_changes.c.sourcestampid) + + # + # migration support + # + + # this is a bit more complicated than might be expected because the first + # seven database versions were once implemented using a homespun migration + # system, and we need to support upgrading masters from that system. The + # old system used a 'version' table, where SQLAlchemy-Migrate uses + # 'migrate_version' + + repo_path = util.sibpath(__file__, "migrate") + "path to the SQLAlchemy-Migrate 'repository'" + + def is_current(self): + """Returns true (via deferred) if the database's version is up to date.""" + def thd(engine): + # we don't even have to look at the old version table - if there's + # no migrate_version, then we're not up to date. + repo = migrate.versioning.repository.Repository(self.repo_path) + repo_version = repo.latest + try: + # migrate.api doesn't let us hand in an engine + schema = migrate.versioning.schema.ControlledSchema(engine, self.repo_path) + db_version = schema.version + except migrate.versioning.exceptions.DatabaseNotControlledError: + return False + + return db_version == repo_version + return self.connector.pool.do_with_engine(thd) + + def upgrade(self): + """Upgrade the database to the most recent schema version, returning a + deferred.""" + + # here, things are a little tricky. If we have a 'version' table, then + # we need to version_control the database with the proper version + # number, drop 'version', and then upgrade. If we have no 'version' + # table and no 'migrate_version' table, then we need to version_control + # the database. Otherwise, we just need to upgrade it. + + def table_exists(engine, tbl): + try: + r = engine.execute("select * from %s limit 1" % tbl) + r.close() + return True + except: + return False + + # due to http://code.google.com/p/sqlalchemy-migrate/issues/detail?id=100, we cannot + # use the migrate.versioning.api module. So these methods perform similar wrapping + # functions to what is done by the API functions, but without disposing of the engine. + def upgrade(engine): + schema = migrate.versioning.schema.ControlledSchema(engine, self.repo_path) + changeset = schema.changeset(None) + for version, change in changeset: + log.msg('migrating schema version %s -> %d' + % (version, version + 1)) + schema.runchange(version, change, 1) + + def version_control(engine, version=None): + migrate.versioning.schema.ControlledSchema.create(engine, self.repo_path, version) + + # the upgrade process must run in a db thread + def thd(engine): + # if the migrate_version table exists, we can just let migrate + # take care of this process. + if table_exists(engine, 'migrate_version'): + upgrade(engine) + + # if the version table exists, then we can version_control things + # at that version, drop the version table, and let migrate take + # care of the rest. + elif table_exists(engine, 'version'): + # get the existing version + r = engine.execute("select version from version limit 1") + old_version = r.scalar() + + # set up migrate at the same version + version_control(engine, old_version) + + # drop the no-longer-required version table + engine.drop('version') + + # and, finally, upgrade using migrate + upgrade(engine) + + # otherwise, this db is uncontrolled, so we just version control it + # and update it. + else: + version_control(engine) + upgrade(engine) + return self.connector.pool.do_with_engine(thd) diff --git a/master/buildbot/db/pool.py b/master/buildbot/db/pool.py index ede15680076..cbd8e28c49f 100644 --- a/master/buildbot/db/pool.py +++ b/master/buildbot/db/pool.py @@ -50,7 +50,19 @@ def thd(): return rv return threads.deferToThreadPool(reactor, self, thd) - # older implementation for twisted < 0.8.2 + def do_with_engine(self, callable, *args, **kwargs): + """ + Like l{do}, but with an SQLAlchemy Engine as the first argument + """ + def thd(): + conn = self.engine + rv = callable(conn, *args, **kwargs) + assert not isinstance(rv, engine.ResultProxy), \ + "do not return ResultProxy objects!" + return rv + return threads.deferToThreadPool(reactor, self, thd) + + # older implementations for twisted < 0.8.2 def do_081(self, callable, *args, **kwargs): d = defer.Deferred() def thd(): @@ -64,5 +76,19 @@ def thd(): reactor.callFromThread(d.errback, failure.Failure()) self.callInThread(thd) return d + def do_with_engine_081(self, callable, *args, **kwargs): + d = defer.Deferred() + def thd(): + try: + conn = self.engine + rv = callable(conn, *args, **kwargs) + assert not isinstance(rv, engine.ResultProxy), \ + "do not return ResultProxy objects!" + reactor.callFromThread(d.callback, rv) + except: + reactor.callFromThread(d.errback, failure.Failure()) + self.callInThread(thd) + return d if twisted.version < versions.Version('twisted', 8, 2, 0): do = do_081 + do_with_engine = do_with_engine_081 diff --git a/master/buildbot/db/schema/manager.py b/master/buildbot/db/schema/manager.py deleted file mode 100644 index a58679f37c4..00000000000 --- a/master/buildbot/db/schema/manager.py +++ /dev/null @@ -1,84 +0,0 @@ -# This file is part of Buildbot. Buildbot is free software: you can -# redistribute it and/or modify it under the terms of the GNU General Public -# License as published by the Free Software Foundation, version 2. -# -# This program is distributed in the hope that it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more -# details. -# -# You should have received a copy of the GNU General Public License along with -# this program; if not, write to the Free Software Foundation, Inc., 51 -# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. -# -# Copyright Buildbot Team Members - -from twisted.python import reflect - -# note that schema modules are not loaded unless an upgrade is taking place - -CURRENT_VERSION = 6 - -class DBSchemaManager(object): - """ - This class is responsible for managing the database schema and upgrading it - as necessary. This includes both the *actual* database and the old pickle - database, as migrations move data between the two. - - Note that this class is *entirely synchronous*! Performing any other operations - while changing the schema is just asking for trouble. - """ - def __init__(self, spec, basedir): - self.spec = spec - self.basedir = basedir - self.dbapi = self.spec.get_dbapi() - - def get_db_version(self, conn=None): - """ - Get the current schema version for this database - """ - close_conn = False - if not conn: - conn = self.spec.get_sync_connection() - close_conn = True - c = conn.cursor() - try: - try: - c.execute("SELECT version FROM version") - rows = c.fetchall() - assert len(rows) == 1, "%i rows in version table! (should only be 1)" % len(rows) - return rows[0][0] - except (self.dbapi.OperationalError, self.dbapi.ProgrammingError): - # no version table = version 0 - return 0 - finally: - if close_conn: - conn.close() - - def get_current_version(self): - """ - Get the current db version for this release of buildbot - """ - return CURRENT_VERSION - - def is_current(self): - """ - Is this database current? - """ - return self.get_db_version() == self.get_current_version() - - def upgrade(self, quiet=False): - """ - Upgrade this database to the current version - """ - conn = self.spec.get_sync_connection() - try: - while self.get_db_version() < self.get_current_version(): - next_version = self.get_db_version() + 1 - next_version_module = reflect.namedModule("buildbot.db.schema.v%d" % next_version) - upg = next_version_module.Upgrader(self.dbapi, conn, self.basedir, quiet) - upg.upgrade() - conn.commit() - assert self.get_db_version() == next_version - finally: - conn.close() diff --git a/master/buildbot/db/schema/tables.sql b/master/buildbot/db/schema/tables.sql deleted file mode 100644 index ba768d63a9a..00000000000 --- a/master/buildbot/db/schema/tables.sql +++ /dev/null @@ -1,181 +0,0 @@ -CREATE TABLE buildrequests ( - `id` INTEGER PRIMARY KEY AUTO_INCREMENT, - - -- every BuildRequest has a BuildSet - -- the sourcestampid and reason live in the BuildSet - `buildsetid` INTEGER NOT NULL, - - `buildername` VARCHAR(256) NOT NULL, - - `priority` INTEGER NOT NULL default 0, - - -- claimed_at is the time at which a master most recently asserted that - -- it is responsible for running the build: this will be updated - -- periodically to maintain the claim - `claimed_at` INTEGER default 0, - - -- claimed_by indicates which buildmaster has claimed this request. The - -- 'name' contains hostname/basedir, and will be the same for subsequent - -- runs of any given buildmaster. The 'incarnation' contains bootime/pid, - -- and will be different for subsequent runs. This allows each buildmaster - -- to distinguish their current claims, their old claims, and the claims - -- of other buildmasters, to treat them each appropriately. - `claimed_by_name` VARCHAR(256) default NULL, - `claimed_by_incarnation` VARCHAR(256) default NULL, - - `complete` INTEGER default 0, -- complete=0 means 'pending' - - -- results is only valid when complete==1 - `results` SMALLINT, -- 0=SUCCESS,1=WARNINGS,etc, from status/builder.py - - `submitted_at` INTEGER NOT NULL, - - `complete_at` INTEGER -); -CREATE TABLE builds ( - `id` INTEGER PRIMARY KEY AUTO_INCREMENT, - `number` INTEGER NOT NULL, -- BuilderStatus.getBuild(number) - -- 'number' is scoped to both the local buildmaster and the buildername - `brid` INTEGER NOT NULL, -- matches buildrequests.id - `start_time` INTEGER NOT NULL, - `finish_time` INTEGER -); -CREATE TABLE buildset_properties ( - `buildsetid` INTEGER NOT NULL, - `property_name` VARCHAR(256) NOT NULL, - `property_value` VARCHAR(1024) NOT NULL -- too short? -); -CREATE TABLE buildsets ( - `id` INTEGER PRIMARY KEY AUTO_INCREMENT, - `external_idstring` VARCHAR(256), - `reason` VARCHAR(256), - `sourcestampid` INTEGER NOT NULL, - `submitted_at` INTEGER NOT NULL, - `complete` SMALLINT NOT NULL default 0, - `complete_at` INTEGER, - `results` SMALLINT -- 0=SUCCESS,2=FAILURE, from status/builder.py - -- results is NULL until complete==1 -); -CREATE TABLE change_files ( - `changeid` INTEGER NOT NULL, - `filename` VARCHAR(1024) NOT NULL -); -CREATE TABLE change_links ( - `changeid` INTEGER NOT NULL, - `link` VARCHAR(1024) NOT NULL -); -CREATE TABLE change_properties ( - `changeid` INTEGER NOT NULL, - `property_name` VARCHAR(256) NOT NULL, - `property_value` VARCHAR(1024) NOT NULL -- too short? -); -CREATE TABLE changes ( - `changeid` INTEGER PRIMARY KEY AUTO_INCREMENT, -- also serves as 'change number' - `author` VARCHAR(1024) NOT NULL, - `comments` VARCHAR(1024) NOT NULL, -- too short? - `is_dir` SMALLINT NOT NULL, -- old, for CVS - `branch` VARCHAR(1024) NULL, - `revision` VARCHAR(256), -- CVS uses NULL. too short for darcs? - `revlink` VARCHAR(256) NULL, - `when_timestamp` INTEGER NOT NULL, -- copied from incoming Change - `category` VARCHAR(256) NULL, - - -- repository specifies, along with revision and branch, the - -- source tree in which this change was detected. - `repository` text not null default '', - - -- project names the project this source code represents. It is used - -- later to filter changes - `project` text not null default '' -); - -CREATE TABLE patches ( - `id` INTEGER PRIMARY KEY AUTO_INCREMENT, - `patchlevel` INTEGER NOT NULL, - `patch_base64` TEXT NOT NULL, -- encoded bytestring - `subdir` TEXT -- usually NULL -); -CREATE TABLE sourcestamp_changes ( - `sourcestampid` INTEGER NOT NULL, - `changeid` INTEGER NOT NULL -); -CREATE TABLE sourcestamps ( - `id` INTEGER PRIMARY KEY AUTO_INCREMENT, - `branch` VARCHAR(256) default NULL, - `revision` VARCHAR(256) default NULL, - `patchid` INTEGER default NULL, - `repository` TEXT not null default '', - `project` TEXT not null default '' -); - --- --- Scheduler Tables --- - --- This table records the "state" for each scheduler. This state is, at least, --- the last change that was analyzed, but is stored in an opaque JSON object. --- Note that schedulers are never deleted. -CREATE TABLE schedulers ( - `schedulerid` INTEGER PRIMARY KEY AUTO_INCREMENT, -- joins to other tables - `name` VARCHAR(128) NOT NULL, -- the scheduler's name according to master.cfg - `class_name` VARCHAR(128) NOT NULL, -- the scheduler's class - `state` VARCHAR(1024) NOT NULL -- JSON-encoded state dictionary -); -CREATE UNIQUE INDEX `name_and_class` ON schedulers (`name`, `class_name`); - - --- This stores "classified" changes that have not yet been "processed". That --- is, the scheduler has looked at these changes and determined that something --- should be done, but that hasn't happened yet. Rows are "retired" from this --- table as soon as the scheduler is done with the change. -CREATE TABLE scheduler_changes ( - `schedulerid` INTEGER, - `changeid` INTEGER, - `important` SMALLINT -); - --- This stores buildsets in which a particular scheduler is interested. --- On every run, a scheduler checks its upstream buildsets for completion --- and reacts accordingly. Records are never deleted from this table, but --- active is set to 0 when the record is no longer necessary. -CREATE TABLE scheduler_upstream_buildsets ( - `buildsetid` INTEGER, - `schedulerid` INTEGER, - `active` SMALLINT -); - --- --- Schema Information --- - --- database version; each upgrade script should change this -CREATE TABLE version ( - version INTEGER NOT NULL -); - -CREATE INDEX `buildrequests_buildsetid` ON `buildrequests` (`buildsetid`); -CREATE INDEX `buildrequests_buildername` ON `buildrequests` (`buildername` (255)); -CREATE INDEX `buildrequests_complete` ON `buildrequests` (`complete`); -CREATE INDEX `buildrequests_claimed_at` ON `buildrequests` (`claimed_at`); -CREATE INDEX `buildrequests_claimed_by_name` ON `buildrequests` (`claimed_by_name` (255)); -CREATE INDEX `builds_number` ON `builds` (`number`); -CREATE INDEX `builds_brid` ON `builds` (`brid`); -CREATE INDEX `buildsets_complete` ON `buildsets` (`complete`); -CREATE INDEX `buildsets_submitted_at` ON `buildsets` (`submitted_at`); -CREATE INDEX `buildset_properties_buildsetid` ON `buildset_properties` (`buildsetid`); -CREATE INDEX `changes_branch` ON `changes` (`branch` (255)); -CREATE INDEX `changes_revision` ON `changes` (`revision` (255)); -CREATE INDEX `changes_author` ON `changes` (`author` (255)); -CREATE INDEX `changes_category` ON `changes` (`category` (255)); -CREATE INDEX `changes_when_timestamp` ON `changes` (`when_timestamp`); -CREATE INDEX `change_files_changeid` ON `change_files` (`changeid`); -CREATE INDEX `change_links_changeid` ON `change_links` (`changeid`); -CREATE INDEX `change_properties_changeid` ON `change_properties` (`changeid`); -CREATE INDEX `scheduler_changes_schedulerid` ON `scheduler_changes` (`schedulerid`); -CREATE INDEX `scheduler_changes_changeid` ON `scheduler_changes` (`changeid`); -CREATE INDEX `scheduler_upstream_buildsets_buildsetid` ON `scheduler_upstream_buildsets` (`buildsetid`); -CREATE INDEX `scheduler_upstream_buildsets_schedulerid` ON `scheduler_upstream_buildsets` (`schedulerid`); -CREATE INDEX `scheduler_upstream_buildsets_active` ON `scheduler_upstream_buildsets` (`active`); -CREATE INDEX `sourcestamp_changes_sourcestampid` ON `sourcestamp_changes` (`sourcestampid`); - -INSERT INTO version VALUES(5); diff --git a/master/buildbot/db/schema/v1.py b/master/buildbot/db/schema/v1.py deleted file mode 100644 index 3bafbdc3180..00000000000 --- a/master/buildbot/db/schema/v1.py +++ /dev/null @@ -1,349 +0,0 @@ -# This file is part of Buildbot. Buildbot is free software: you can -# redistribute it and/or modify it under the terms of the GNU General Public -# License as published by the Free Software Foundation, version 2. -# -# This program is distributed in the hope that it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more -# details. -# -# You should have received a copy of the GNU General Public License along with -# this program; if not, write to the Free Software Foundation, Inc., 51 -# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. -# -# Copyright Buildbot Team Members - -import cPickle -import textwrap -import os -import sys - -from twisted.persisted import styles - -from buildbot.db import util -from buildbot.db.schema import base -from buildbot.util import json - -# This is version 1, so it introduces a lot of new tables over version 0, -# which had no database. - -TABLES = [ - # the schema here is defined as version 1 - textwrap.dedent(""" - CREATE TABLE version ( - version INTEGER NOT NULL -- contains one row, currently set to 1 - ); - """), - - # last_access is used for logging, to record the last time that each - # client (or rather class of clients) touched the DB. The idea is that if - # something gets weird, you can check this and discover that you have an - # older tool (which uses a different schema) mucking things up. - textwrap.dedent(""" - CREATE TABLE last_access ( - `who` VARCHAR(256) NOT NULL, -- like 'buildbot-0.8.0' - `writing` INTEGER NOT NULL, -- 1 if you are writing, 0 if you are reading - -- PRIMARY KEY (who, writing), - `last_access` TIMESTAMP -- seconds since epoch - ); - """), - - textwrap.dedent(""" - CREATE TABLE changes_nextid (next_changeid INTEGER); - """), - - textwrap.dedent(""" - -- Changes are immutable: once added, never changed - CREATE TABLE changes ( - `changeid` INTEGER PRIMARY KEY NOT NULL, -- also serves as 'change number' - `author` VARCHAR(1024) NOT NULL, - `comments` VARCHAR(1024) NOT NULL, -- too short? - `is_dir` SMALLINT NOT NULL, -- old, for CVS - `branch` VARCHAR(1024) NULL, - `revision` VARCHAR(256), -- CVS uses NULL. too short for darcs? - `revlink` VARCHAR(256) NULL, - `when_timestamp` INTEGER NOT NULL, -- copied from incoming Change - `category` VARCHAR(256) NULL - ); - """), - - textwrap.dedent(""" - CREATE TABLE change_links ( - `changeid` INTEGER NOT NULL, - `link` VARCHAR(1024) NOT NULL - ); - """), - - textwrap.dedent(""" - CREATE TABLE change_files ( - `changeid` INTEGER NOT NULL, - `filename` VARCHAR(1024) NOT NULL - ); - """), - - textwrap.dedent(""" - CREATE TABLE change_properties ( - `changeid` INTEGER NOT NULL, - `property_name` VARCHAR(256) NOT NULL, - `property_value` VARCHAR(1024) NOT NULL -- too short? - ); - """), - - # Scheduler tables - textwrap.dedent(""" - CREATE TABLE schedulers ( - `schedulerid` INTEGER PRIMARY KEY, -- joins to other tables - `name` VARCHAR(127) UNIQUE NOT NULL, - `state` VARCHAR(1024) NOT NULL -- JSON-encoded state dictionary - ); - """), - - textwrap.dedent(""" - CREATE TABLE scheduler_changes ( - `schedulerid` INTEGER, - `changeid` INTEGER, - `important` SMALLINT - ); - """), - - textwrap.dedent(""" - CREATE TABLE scheduler_upstream_buildsets ( - `buildsetid` INTEGER, - `schedulerid` INTEGER, - `active` SMALLINT - ); - """), - - # SourceStamps - textwrap.dedent(""" - -- SourceStamps are immutable: once added, never changed - CREATE TABLE sourcestamps ( - `id` INTEGER PRIMARY KEY, - `branch` VARCHAR(256) default NULL, - `revision` VARCHAR(256) default NULL, - `patchid` INTEGER default NULL - ); - """), - textwrap.dedent(""" - CREATE TABLE patches ( - `id` INTEGER PRIMARY KEY, - `patchlevel` INTEGER NOT NULL, - `patch_base64` TEXT NOT NULL, -- encoded bytestring - `subdir` TEXT -- usually NULL - ); - """), - textwrap.dedent(""" - CREATE TABLE sourcestamp_changes ( - `sourcestampid` INTEGER NOT NULL, - `changeid` INTEGER NOT NULL - ); - """), - - # BuildRequests - textwrap.dedent(""" - -- BuildSets are mutable. Python code may not cache them. Every - -- BuildRequest must have exactly one associated BuildSet. - CREATE TABLE buildsets ( - `id` INTEGER PRIMARY KEY NOT NULL, - `external_idstring` VARCHAR(256), - `reason` VARCHAR(256), - `sourcestampid` INTEGER NOT NULL, - `submitted_at` INTEGER NOT NULL, - `complete` SMALLINT NOT NULL default 0, - `complete_at` INTEGER, - `results` SMALLINT -- 0=SUCCESS,2=FAILURE, from status/builder.py - -- results is NULL until complete==1 - ); - """), - textwrap.dedent(""" - CREATE TABLE buildset_properties ( - `buildsetid` INTEGER NOT NULL, - `property_name` VARCHAR(256) NOT NULL, - `property_value` VARCHAR(1024) NOT NULL -- too short? - ); - """), - - textwrap.dedent(""" - -- the buildrequests table represents the queue of builds that need to be - -- done. In an idle buildbot, all requests will have complete=1. - -- BuildRequests are mutable. Python code may not cache them. - CREATE TABLE buildrequests ( - `id` INTEGER PRIMARY KEY NOT NULL, - - -- every BuildRequest has a BuildSet - -- the sourcestampid and reason live in the BuildSet - `buildsetid` INTEGER NOT NULL, - - `buildername` VARCHAR(256) NOT NULL, - - `priority` INTEGER NOT NULL default 0, - - -- claimed_at is the time at which a master most recently asserted that - -- it is responsible for running the build: this will be updated - -- periodically to maintain the claim - `claimed_at` INTEGER default 0, - - -- claimed_by indicates which buildmaster has claimed this request. The - -- 'name' contains hostname/basedir, and will be the same for subsequent - -- runs of any given buildmaster. The 'incarnation' contains bootime/pid, - -- and will be different for subsequent runs. This allows each buildmaster - -- to distinguish their current claims, their old claims, and the claims - -- of other buildmasters, to treat them each appropriately. - `claimed_by_name` VARCHAR(256) default NULL, - `claimed_by_incarnation` VARCHAR(256) default NULL, - - `complete` INTEGER default 0, -- complete=0 means 'pending' - - -- results is only valid when complete==1 - `results` SMALLINT, -- 0=SUCCESS,1=WARNINGS,etc, from status/builder.py - - `submitted_at` INTEGER NOT NULL, - - `complete_at` INTEGER - ); - """), - - textwrap.dedent(""" - -- this records which builds have been started for each request - CREATE TABLE builds ( - `id` INTEGER PRIMARY KEY NOT NULL, - `number` INTEGER NOT NULL, -- BuilderStatus.getBuild(number) - -- 'number' is scoped to both the local buildmaster and the buildername - `brid` INTEGER NOT NULL, -- matches buildrequests.id - `start_time` INTEGER NOT NULL, - `finish_time` INTEGER - ); - """), -] - -class Upgrader(base.Upgrader): - def upgrade(self): - self.test_unicode() - self.add_tables() - self.migrate_changes() - self.set_version() - - def test_unicode(self): - # first, create a test table - c = self.conn.cursor() - c.execute("CREATE TABLE test_unicode (`name` VARCHAR(100))") - q = util.sql_insert(self.dbapi, 'test_unicode', ["name"]) - try: - val = u"Frosty the \N{SNOWMAN}" - c.execute(q, [val]) - c.execute("SELECT * FROM test_unicode") - row = c.fetchall()[0] - if row[0] != val: - raise UnicodeError("Your database doesn't support unicode data; for MySQL, set the default collation to utf8_general_ci.") - finally: - pass - c.execute("DROP TABLE test_unicode") - - def add_tables(self): - # first, add all of the tables - c = self.conn.cursor() - for t in TABLES: - try: - c.execute(t) - except: - print >>sys.stderr, "error executing SQL query: %s" % t - raise - - def _addChangeToDatabase(self, change, cursor): - # strip None from any of these values, just in case - def remove_none(x): - if x is None: return u"" - elif isinstance(x, str): - return x.decode("utf8") - else: - return x - try: - values = tuple(remove_none(x) for x in - (change.number, change.who, - change.comments, change.isdir, - change.branch, change.revision, change.revlink, - change.when, change.category)) - except UnicodeDecodeError, e: - raise UnicodeError("Trying to import change data as UTF-8 failed. Please look at contrib/fix_changes_pickle_encoding.py: %s" % str(e)) - - q = util.sql_insert(self.dbapi, 'changes', - """changeid author comments is_dir branch revision - revlink when_timestamp category""".split()) - cursor.execute(q, values) - - for link in change.links: - cursor.execute(util.sql_insert(self.dbapi, 'change_links', ('changeid', 'link')), - (change.number, link)) - - # sometimes change.files contains nested lists -- why, I do not know! But we deal with - # it all the same - see bug #915. We'll assume for now that change.files contains *either* - # lists of filenames or plain filenames, not both. - def flatten(l): - if l and type(l[0]) == list: - rv = [] - for e in l: - if type(e) == list: - rv.extend(e) - else: - rv.append(e) - return rv - else: - return l - for filename in flatten(change.files): - cursor.execute(util.sql_insert(self.dbapi, 'change_files', ('changeid', 'filename')), - (change.number, filename)) - for propname,propvalue in change.properties.properties.items(): - encoded_value = json.dumps(propvalue) - cursor.execute(util.sql_insert(self.dbapi, 'change_properties', - ('changeid', 'property_name', 'property_value')), - (change.number, propname, encoded_value)) - - def migrate_changes(self): - # if we still have a changes.pck, then we need to migrate it - changes_pickle = os.path.join(self.basedir, "changes.pck") - if os.path.exists(changes_pickle): - if not self.quiet: print "migrating changes.pck to database" - - # 'source' will be an old b.c.changes.ChangeMaster instance, with a - # .changes attribute - source = cPickle.load(open(changes_pickle,"rb")) - styles.doUpgrade() - - if not self.quiet: print " (%d Change objects)" % len(source.changes) - - # first, scan for changes without a number. If we find any, then we'll - # renumber the changes sequentially - have_unnumbered = False - for c in source.changes: - if c.revision and c.number is None: - have_unnumbered = True - break - if have_unnumbered: - n = 1 - for c in source.changes: - if c.revision: - c.number = n - n = n + 1 - - # insert the changes - cursor = self.conn.cursor() - for c in source.changes: - if not c.revision: - continue - self._addChangeToDatabase(c, cursor) - - # update next_changeid - max_changeid = max([ c.number for c in source.changes if c.revision ] + [ 0 ]) - cursor.execute("""INSERT into changes_nextid VALUES (%d)""" % (max_changeid+1)) - - if not self.quiet: - print "moving changes.pck to changes.pck.old; delete it or keep it as a backup" - os.rename(changes_pickle, changes_pickle+".old") - else: - c = self.conn.cursor() - c.execute("""INSERT into changes_nextid VALUES (1)""") - - def set_version(self): - c = self.conn.cursor() - c.execute("""INSERT INTO version VALUES (1)""") - diff --git a/master/buildbot/db/schema/v2.py b/master/buildbot/db/schema/v2.py deleted file mode 100644 index 1ebce80ba65..00000000000 --- a/master/buildbot/db/schema/v2.py +++ /dev/null @@ -1,50 +0,0 @@ -# This file is part of Buildbot. Buildbot is free software: you can -# redistribute it and/or modify it under the terms of the GNU General Public -# License as published by the Free Software Foundation, version 2. -# -# This program is distributed in the hope that it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more -# details. -# -# You should have received a copy of the GNU General Public License along with -# this program; if not, write to the Free Software Foundation, Inc., 51 -# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. -# -# Copyright Buildbot Team Members - - -from buildbot.db.schema import base - -class Upgrader(base.Upgrader): - def upgrade(self): - self.add_columns() - self.set_version() - - def add_columns(self): - if self.dbapiName == 'MySQLdb': - default_text = "" - else: - default_text = "default ''" - - cursor = self.conn.cursor() - cursor.execute(""" - ALTER TABLE changes - add column `repository` text not null %s - """ % default_text) - cursor.execute(""" - ALTER TABLE changes - add column `project` text not null %s - """ % default_text) - cursor.execute(""" - ALTER TABLE sourcestamps - add column `repository` text not null %s - """ % default_text) - cursor.execute(""" - ALTER TABLE sourcestamps - add column `project` text not null %s - """ % default_text) - - def set_version(self): - c = self.conn.cursor() - c.execute("""UPDATE version set version = 2 where version = 1""") diff --git a/master/buildbot/db/schema/v3.py b/master/buildbot/db/schema/v3.py deleted file mode 100644 index f5ed0c5770c..00000000000 --- a/master/buildbot/db/schema/v3.py +++ /dev/null @@ -1,74 +0,0 @@ -# This file is part of Buildbot. Buildbot is free software: you can -# redistribute it and/or modify it under the terms of the GNU General Public -# License as published by the Free Software Foundation, version 2. -# -# This program is distributed in the hope that it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more -# details. -# -# You should have received a copy of the GNU General Public License along with -# this program; if not, write to the Free Software Foundation, Inc., 51 -# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. -# -# Copyright Buildbot Team Members - -from buildbot.db.schema import base - -class Upgrader(base.Upgrader): - def upgrade(self): - self.migrate_schedulers() - self.set_version() - - def migrate_schedulers(self): - cursor = self.conn.cursor() - # If this fails, there's no cleaning up to do - cursor.execute(""" - ALTER TABLE schedulers - RENAME TO schedulers_old - """) - - try: - cursor.execute(""" - CREATE TABLE schedulers ( - `schedulerid` INTEGER PRIMARY KEY, -- joins to other tables - `name` VARCHAR(127) NOT NULL, -- the scheduler's name according to master.cfg - `class_name` VARCHAR(127) NOT NULL, -- the scheduler's class - `state` VARCHAR(1024) NOT NULL -- JSON-encoded state dictionary - ); - """) - except: - # Restore the original table - cursor.execute(""" - ALTER TABLE schedulers_old - RENAME TO schedulers - """) - raise - - try: - cursor.execute(""" - CREATE UNIQUE INDEX `name_and_class` ON - schedulers (`name`, `class_name`) - """) - - cursor.execute(""" - INSERT INTO schedulers (`schedulerid`, `name`, `state`, `class_name`) - SELECT `schedulerid`, `name`, `state`, '' FROM schedulers_old - """) - cursor.execute(""" - DROP TABLE schedulers_old - """) - except: - # Clean up the new table, and restore the original - cursor.execute(""" - DROP TABLE schedulers - """) - cursor.execute(""" - ALTER TABLE schedulers_old - RENAME TO schedulers - """) - raise - - def set_version(self): - c = self.conn.cursor() - c.execute("""UPDATE version set version = 3 where version = 2""") diff --git a/master/buildbot/db/schema/v4.py b/master/buildbot/db/schema/v4.py deleted file mode 100644 index b7d13723380..00000000000 --- a/master/buildbot/db/schema/v4.py +++ /dev/null @@ -1,224 +0,0 @@ -# This file is part of Buildbot. Buildbot is free software: you can -# redistribute it and/or modify it under the terms of the GNU General Public -# License as published by the Free Software Foundation, version 2. -# -# This program is distributed in the hope that it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more -# details. -# -# You should have received a copy of the GNU General Public License along with -# this program; if not, write to the Free Software Foundation, Inc., 51 -# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. -# -# Copyright Buildbot Team Members - -from buildbot.db.schema import base - -class Upgrader(base.Upgrader): - def upgrade(self): - self.migrate_buildrequests() - self.migrate_builds() - self.migrate_buildsets() - self.migrate_changes() - self.migrate_patches() - self.migrate_sourcestamps() - self.migrate_schedulers() - self.set_version() - - def makeAutoincColumn(self, name): - if self.dbapiName == 'MySQLdb': - return "`%s` INTEGER PRIMARY KEY AUTO_INCREMENT" % name - elif self.dbapiName in ('sqlite3', 'pysqlite2.dbapi2'): - return "`%s` INTEGER PRIMARY KEY AUTOINCREMENT" % name - raise ValueError("Unsupported dbapi: %s" % self.dbapiName) - - def migrate_table(self, table_name, schema): - names = { - 'old_name': "%s_old" % table_name, - 'table_name': table_name, - } - cursor = self.conn.cursor() - # If this fails, there's no cleaning up to do - cursor.execute(""" - ALTER TABLE %(table_name)s - RENAME TO %(old_name)s - """ % names) - - try: - cursor.execute(schema) - except: - # Restore the original table - cursor.execute(""" - ALTER TABLE %(old_name)s - RENAME TO %(table_name)s - """ % names) - raise - - try: - cursor.execute(""" - INSERT INTO %(table_name)s - SELECT * FROM %(old_name)s - """ % names) - cursor.execute(""" - DROP TABLE %(old_name)s - """ % names) - except: - # Clean up the new table, and restore the original - cursor.execute(""" - DROP TABLE %(table_name)s - """ % names) - cursor.execute(""" - ALTER TABLE %(old_name)s - RENAME TO %(table_name)s - """ % names) - raise - - def set_version(self): - c = self.conn.cursor() - c.execute("""UPDATE version set version = 4 where version = 3""") - - def migrate_schedulers(self): - schedulerid_col = self.makeAutoincColumn('schedulerid') - schema = """ - CREATE TABLE schedulers ( - %(schedulerid_col)s, -- joins to other tables - `name` VARCHAR(100) NOT NULL, -- the scheduler's name according to master.cfg - `class_name` VARCHAR(100) NOT NULL, -- the scheduler's class - `state` VARCHAR(1024) NOT NULL -- JSON-encoded state dictionary - ); - """ % {'schedulerid_col': schedulerid_col} - self.migrate_table('schedulers', schema) - - # Fix up indices - cursor = self.conn.cursor() - cursor.execute(""" - CREATE UNIQUE INDEX `name_and_class` ON - schedulers (`name`, `class_name`) - """) - - def migrate_builds(self): - buildid_col = self.makeAutoincColumn('id') - schema = """ - CREATE TABLE builds ( - %(buildid_col)s, - `number` INTEGER NOT NULL, -- BuilderStatus.getBuild(number) - -- 'number' is scoped to both the local buildmaster and the buildername - `brid` INTEGER NOT NULL, -- matches buildrequests.id - `start_time` INTEGER NOT NULL, - `finish_time` INTEGER - ); - """ % {'buildid_col': buildid_col} - self.migrate_table('builds', schema) - - def migrate_changes(self): - changeid_col = self.makeAutoincColumn('changeid') - schema = """ - CREATE TABLE changes ( - %(changeid_col)s, -- also serves as 'change number' - `author` VARCHAR(1024) NOT NULL, - `comments` VARCHAR(1024) NOT NULL, -- too short? - `is_dir` SMALLINT NOT NULL, -- old, for CVS - `branch` VARCHAR(1024) NULL, - `revision` VARCHAR(256), -- CVS uses NULL. too short for darcs? - `revlink` VARCHAR(256) NULL, - `when_timestamp` INTEGER NOT NULL, -- copied from incoming Change - `category` VARCHAR(256) NULL, - - -- repository specifies, along with revision and branch, the - -- source tree in which this change was detected. - `repository` TEXT NOT NULL default '', - - -- project names the project this source code represents. It is used - -- later to filter changes - `project` TEXT NOT NULL default '' - ); - """ % {'changeid_col': changeid_col} - self.migrate_table('changes', schema) - - # Drop changes_nextid columnt - cursor = self.conn.cursor() - cursor.execute("DROP TABLE changes_nextid") - - def migrate_buildrequests(self): - buildrequestid_col = self.makeAutoincColumn('id') - schema = """ - CREATE TABLE buildrequests ( - %(buildrequestid_col)s, - - -- every BuildRequest has a BuildSet - -- the sourcestampid and reason live in the BuildSet - `buildsetid` INTEGER NOT NULL, - - `buildername` VARCHAR(256) NOT NULL, - - `priority` INTEGER NOT NULL default 0, - - -- claimed_at is the time at which a master most recently asserted that - -- it is responsible for running the build: this will be updated - -- periodically to maintain the claim - `claimed_at` INTEGER default 0, - - -- claimed_by indicates which buildmaster has claimed this request. The - -- 'name' contains hostname/basedir, and will be the same for subsequent - -- runs of any given buildmaster. The 'incarnation' contains bootime/pid, - -- and will be different for subsequent runs. This allows each buildmaster - -- to distinguish their current claims, their old claims, and the claims - -- of other buildmasters, to treat them each appropriately. - `claimed_by_name` VARCHAR(256) default NULL, - `claimed_by_incarnation` VARCHAR(256) default NULL, - - `complete` INTEGER default 0, -- complete=0 means 'pending' - - -- results is only valid when complete==1 - `results` SMALLINT, -- 0=SUCCESS,1=WARNINGS,etc, from status/builder.py - - `submitted_at` INTEGER NOT NULL, - - `complete_at` INTEGER - ); - """ % {'buildrequestid_col': buildrequestid_col} - self.migrate_table('buildrequests', schema) - - def migrate_buildsets(self): - buildsetsid_col = self.makeAutoincColumn('id') - schema = """ - CREATE TABLE buildsets ( - %(buildsetsid_col)s, - `external_idstring` VARCHAR(256), - `reason` VARCHAR(256), - `sourcestampid` INTEGER NOT NULL, - `submitted_at` INTEGER NOT NULL, - `complete` SMALLINT NOT NULL default 0, - `complete_at` INTEGER, - `results` SMALLINT -- 0=SUCCESS,2=FAILURE, from status/builder.py - -- results is NULL until complete==1 - ); - """ % {'buildsetsid_col': buildsetsid_col} - self.migrate_table("buildsets", schema) - - def migrate_patches(self): - patchesid_col = self.makeAutoincColumn('id') - schema = """ - CREATE TABLE patches ( - %(patchesid_col)s, - `patchlevel` INTEGER NOT NULL, - `patch_base64` TEXT NOT NULL, -- encoded bytestring - `subdir` TEXT -- usually NULL - ); - """ % {'patchesid_col': patchesid_col} - self.migrate_table("patches", schema) - - def migrate_sourcestamps(self): - sourcestampsid_col = self.makeAutoincColumn('id') - schema = """ - CREATE TABLE sourcestamps ( - %(sourcestampsid_col)s, - `branch` VARCHAR(256) default NULL, - `revision` VARCHAR(256) default NULL, - `patchid` INTEGER default NULL, - `repository` TEXT not null default '', - `project` TEXT not null default '' - ); - """ % {'sourcestampsid_col': sourcestampsid_col} - self.migrate_table("sourcestamps", schema) diff --git a/master/buildbot/db/schema/v5.py b/master/buildbot/db/schema/v5.py deleted file mode 100644 index 73be5f17691..00000000000 --- a/master/buildbot/db/schema/v5.py +++ /dev/null @@ -1,69 +0,0 @@ -# This file is part of Buildbot. Buildbot is free software: you can -# redistribute it and/or modify it under the terms of the GNU General Public -# License as published by the Free Software Foundation, version 2. -# -# This program is distributed in the hope that it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more -# details. -# -# You should have received a copy of the GNU General Public License along with -# this program; if not, write to the Free Software Foundation, Inc., 51 -# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. -# -# Copyright Buildbot Team Members - -from buildbot.db.schema import base - -class Upgrader(base.Upgrader): - def upgrade(self): - self.add_index("buildrequests", "buildsetid") - self.add_index("buildrequests", "buildername", 255) - self.add_index("buildrequests", "complete") - self.add_index("buildrequests", "claimed_at") - self.add_index("buildrequests", "claimed_by_name", 255) - - self.add_index("builds", "number") - self.add_index("builds", "brid") - - self.add_index("buildsets", "complete") - self.add_index("buildsets", "submitted_at") - - self.add_index("buildset_properties", "buildsetid") - - self.add_index("changes", "branch", 255) - self.add_index("changes", "revision", 255) - self.add_index("changes", "author", 255) - self.add_index("changes", "category", 255) - self.add_index("changes", "when_timestamp") - - self.add_index("change_files", "changeid") - self.add_index("change_links", "changeid") - self.add_index("change_properties", "changeid") - - # schedulers already has an index - - self.add_index("scheduler_changes", "schedulerid") - self.add_index("scheduler_changes", "changeid") - - self.add_index("scheduler_upstream_buildsets", "buildsetid") - self.add_index("scheduler_upstream_buildsets", "schedulerid") - self.add_index("scheduler_upstream_buildsets", "active") - - # sourcestamps are only queried by id, no need for additional indexes - - self.add_index("sourcestamp_changes", "sourcestampid") - - self.set_version() - - def add_index(self, table, column, length=None): - lengthstr="" - if length is not None and self.dbapiName == 'MySQLdb': - lengthstr = " (%i)" % length - q = "CREATE INDEX `%(table)s_%(column)s` ON `%(table)s` (`%(column)s`%(lengthstr)s)" - cursor = self.conn.cursor() - cursor.execute(q % {'table': table, 'column': column, 'lengthstr': lengthstr}) - - def set_version(self): - c = self.conn.cursor() - c.execute("""UPDATE version set version = 5 where version = 4""") diff --git a/master/buildbot/db/util.py b/master/buildbot/db/util.py index 754b678d4ba..94b9e096b2d 100644 --- a/master/buildbot/db/util.py +++ b/master/buildbot/db/util.py @@ -13,7 +13,7 @@ # # Copyright Buildbot Team Members -def sql_insert(dbapi, table, columns): +def sql_insert(table, columns): """ Make an SQL insert statement for the given table and columns, using the appropriate paramstyle for the dbi. Note that this only supports positional @@ -21,12 +21,6 @@ def sql_insert(dbapi, table, columns): a name-based paramstyle. """ - if dbapi.paramstyle == 'qmark': - params = ",".join(("?",)*len(columns)) - elif dbapi.paramstyle == 'numeric': - params = ",".join(":%d" % d for d in range(1, len(columns)+1)) - elif dbapi.paramstyle == 'format': - params = ",".join(("%s",)*len(columns)) - else: - raise RuntimeError("unsupported paramstyle %s" % dbapi.paramstyle) + # TODO: this assumes sqlite for now!! + params = ",".join(("?",)*len(columns)) return "INSERT INTO %s (%s) VALUES (%s)" % (table, ", ".join(columns), params) diff --git a/master/buildbot/master.py b/master/buildbot/master.py index 8147c3dcb93..ce61f148fd2 100644 --- a/master/buildbot/master.py +++ b/master/buildbot/master.py @@ -39,9 +39,7 @@ from buildbot.process.properties import Properties from buildbot.config import BuilderConfig from buildbot.process.builder import BuilderControl -from buildbot.db.dbspec import DBSpec from buildbot.db import connector, exceptions -from buildbot.db.schema.manager import DBSchemaManager from buildbot.schedulers.manager import SchedulerManager from buildbot.util.loop import DelegateLoop @@ -612,7 +610,7 @@ class BuildMaster(service.MultiService): change_svc = None properties = Properties() - def __init__(self, basedir, configFileName="master.cfg", db_spec=None): + def __init__(self, basedir, configFileName="master.cfg"): service.MultiService.__init__(self) self.setName("buildmaster") self.basedir = basedir @@ -648,8 +646,6 @@ def __init__(self, basedir, configFileName="master.cfg", db_spec=None): self.db = None self.db_url = None self.db_poll_interval = _Unset - if db_spec: - self.loadDatabase(db_spec) # note that "read" here is taken in the past participal (i.e., "I read # the config already") rather than the imperative ("you should read the @@ -1052,13 +1048,19 @@ def _done(res): d.addErrback(log.err) return d - def loadDatabase(self, db_spec, db_poll_interval=None): + def loadDatabase(self, db_url, db_poll_interval=None): if self.db: return + self.db = connector.DBConnector(db_url, self.basedir) + if self.changeCacheSize: + self.db.setChangeCacheSize(self.changeCacheSize) + self.db.start() + # make sure it's up to date - sm = DBSchemaManager(db_spec, self.basedir) - if not sm.is_current(): + def check_current(res): + if res: + return # good to go! raise exceptions.DatabaseNotReadyError, textwrap.dedent(""" The Buildmaster database needs to be upgraded before this version of buildbot can run. Use the following command-line @@ -1066,46 +1068,47 @@ def loadDatabase(self, db_spec, db_poll_interval=None): to upgrade the database, and try starting the buildmaster again. You may want to make a backup of your buildmaster before doing so. If you are using MySQL, you must specify the connector string on the upgrade-master command line: - buildbot upgrade-master --db= path/to/master + buildbot upgrade-master --db= path/to/master """) - - self.db = connector.DBConnector(db_spec) - if self.changeCacheSize: - self.db.setChangeCacheSize(self.changeCacheSize) - self.db.start() - - self.botmaster.db = self.db - self.status.setDB(self.db) - - self.db.subscribe_to("add-buildrequest", - self.botmaster.trigger_add_buildrequest) - - sm = SchedulerManager(self, self.db, self.change_svc) - self.db.subscribe_to("add-change", sm.trigger_add_change) - self.db.subscribe_to("modify-buildset", sm.trigger_modify_buildset) - - self.scheduler_manager = sm - sm.setServiceParent(self) - - # Set db_poll_interval (perhaps to 30 seconds) if you are using - # multiple buildmasters that share a common database, such that the - # masters need to discover what each other is doing by polling the - # database. TODO: this will be replaced by the DBNotificationServer. - if db_poll_interval: - # it'd be nice if TimerService let us set now=False - t1 = TimerService(db_poll_interval, sm.trigger) - t1.setServiceParent(self) - t2 = TimerService(db_poll_interval, self.botmaster.loop.trigger) - t2.setServiceParent(self) - # adding schedulers (like when loadConfig happens) will trigger the - # scheduler loop at least once, which we need to jump-start things - # like Periodic. + d = self.db.model.is_current() + d.addCallback(check_current) + + # set up the stuff that depends on the db + def set_up_db_dependents(): + # TODO: this needs to go + self.botmaster.db = self.db + self.status.setDB(self.db) + + self.db.subscribe_to("add-buildrequest", + self.botmaster.trigger_add_buildrequest) + + sm = SchedulerManager(self, self.db, self.change_svc) + self.db.subscribe_to("add-change", sm.trigger_add_change) + self.db.subscribe_to("modify-buildset", sm.trigger_modify_buildset) + + self.scheduler_manager = sm + sm.setServiceParent(self) + + # Set db_poll_interval (perhaps to 30 seconds) if you are using + # multiple buildmasters that share a common database, such that the + # masters need to discover what each other is doing by polling the + # database. TODO: this will be replaced by the DBNotificationServer. + if db_poll_interval: + # it'd be nice if TimerService let us set now=False + t1 = TimerService(db_poll_interval, sm.trigger) + t1.setServiceParent(self) + t2 = TimerService(db_poll_interval, self.botmaster.loop.trigger) + t2.setServiceParent(self) + # adding schedulers (like when loadConfig happens) will trigger the + # scheduler loop at least once, which we need to jump-start things + # like Periodic. + d.addCallback(set_up_db_dependents) + return d def loadConfig_Database(self, db_url, db_poll_interval): self.db_url = db_url self.db_poll_interval = db_poll_interval - db_spec = DBSpec.from_url(db_url, self.basedir) - self.loadDatabase(db_spec, db_poll_interval) + return self.loadDatabase(db_url, db_poll_interval) def loadConfig_Slaves(self, new_slaves): return self.botmaster.loadConfig_Slaves(new_slaves) diff --git a/master/buildbot/scripts/runner.py b/master/buildbot/scripts/runner.py index d0ed26c1081..c1d5717bab8 100644 --- a/master/buildbot/scripts/runner.py +++ b/master/buildbot/scripts/runner.py @@ -296,13 +296,13 @@ def public_html(self, files): f.close() def create_db(self): - from buildbot.db import dbspec, exceptions - spec = dbspec.DBSpec.from_url(self.config["db"], self.basedir) + from buildbot.db import enginestrategy, exceptions + engine = enginestrategy.create_engine(self.config['db'], basedir=self.basedir) if not self.config['quiet']: print "creating database" # upgrade from "nothing" from buildbot.db.schema import manager - sm = manager.DBSchemaManager(spec, self.basedir) + sm = manager.DBSchemaManager(engine, self.basedir) if sm.get_db_version() != 0: raise exceptions.DBAlreadyExistsError sm.upgrade() @@ -410,7 +410,6 @@ def check_master_cfg(self): status information. The default (which creates an SQLite database in BASEDIR/state.sqlite) is equivalent to: - --db='DBSpec("sqlite3", basedir+"/state.sqlite"))' --db='sqlite:///state.sqlite' To use a remote MySQL database instead, use something like: @@ -472,13 +471,12 @@ def upgradeMaster(config): m.move_if_present(os.path.join(basedir, "public_html/index.html"), os.path.join(basedir, "templates/root.html")) - from buildbot.db import dbspec - spec = dbspec.DBSpec.from_url(config["db"], basedir) - # TODO: check that TAC file specifies the right spec + from buildbot.db import enginestrategy + engine = enginestrategy.create_engine(config['db'], basedir=basedir) # upgrade the db from buildbot.db.schema import manager - sm = manager.DBSchemaManager(spec, basedir) + sm = manager.DBSchemaManager(engine, basedir) sm.upgrade() # check the configuration diff --git a/master/buildbot/test/integration/README.txt b/master/buildbot/test/integration/README.txt new file mode 100644 index 00000000000..92435a72eb0 --- /dev/null +++ b/master/buildbot/test/integration/README.txt @@ -0,0 +1,5 @@ +"Integration" tests are tests that exercise a significant chunk of the +Buildbot code, and thus do not really count as unit tests. + +When debugging, get the unit tests working first, *then* work on the +integration tests. diff --git a/master/buildbot/test/integration/__init__.py b/master/buildbot/test/integration/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/master/buildbot/test/integration/citools-README.txt b/master/buildbot/test/integration/citools-README.txt new file mode 100644 index 00000000000..42f20a97a2e --- /dev/null +++ b/master/buildbot/test/integration/citools-README.txt @@ -0,0 +1,6 @@ +-- citools historical master tarball -- + +This tarball was donated by Lukas Linhart and represents a real-world set of +changes, builder pickles, build pickles, and logfiles. + +This was generated with a 0.7.11p1 master. diff --git a/master/buildbot/test/integration/citools.tgz b/master/buildbot/test/integration/citools.tgz new file mode 100644 index 00000000000..ddc845f77c0 Binary files /dev/null and b/master/buildbot/test/integration/citools.tgz differ diff --git a/master/buildbot/test/integration/master-0-7-5-README.txt b/master/buildbot/test/integration/master-0-7-5-README.txt new file mode 100644 index 00000000000..5fb359b9458 --- /dev/null +++ b/master/buildbot/test/integration/master-0-7-5-README.txt @@ -0,0 +1,22 @@ +-- 0.7.5 historical master tarball -- + +This tarball exists to allow testing upgrades of old versions. It was created +by running the master against the included master.cfg using a normal +buildbot.tac. The slave was connected, and a few changes sent, including one +with some funny characters in it: + +$ snowman=`python -c 'print u"\N{SNOWMAN}".encode("utf-8")'` +$ black_star=`python -c 'print u"\N{BLACK STAR}".encode("utf-8")'` +$ comet=`python -c 'print u"\N{COMET}".encode("utf-8")'` +$ buildbot sendchange --master=localhost:9989 -u \ + "the snowman <$snowman@norpole.net>" \ + --revision="${black_star}-devel" -b "$comet" \ + --comments "shooting star or $comet?" \ + "$black_star/funny_chars/in/a/path" "normal/path" +$ buildbot sendchange --master=localhost:9989 + -u "dustin " --revision="1234" + --comments "on-branch change" boring/path + +0.7.5 did not support change properties from sendchange. + +Note that the master.cfg also puts a funny character in stdout (in UTF-8). diff --git a/master/buildbot/test/integration/master-0-7-5.tgz b/master/buildbot/test/integration/master-0-7-5.tgz new file mode 100644 index 00000000000..4a9e01c565d Binary files /dev/null and b/master/buildbot/test/integration/master-0-7-5.tgz differ diff --git a/master/buildbot/test/integration/test_upgrade.py b/master/buildbot/test/integration/test_upgrade.py new file mode 100644 index 00000000000..adf6c9ea19d --- /dev/null +++ b/master/buildbot/test/integration/test_upgrade.py @@ -0,0 +1,215 @@ +# This file is part of Buildbot. Buildbot is free software: you can +# redistribute it and/or modify it under the terms of the GNU General Public +# License as published by the Free Software Foundation, version 2. +# +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more +# details. +# +# You should have received a copy of the GNU General Public License along with +# this program; if not, write to the Free Software Foundation, Inc., 51 +# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Copyright Buildbot Team Members + +import os +import shutil +import cPickle +import tarfile +from twisted.python import util +from twisted.internet import defer +from twisted.trial import unittest +import sqlalchemy as sa +import migrate.versioning.api +from buildbot.db import connector +from buildbot.test.util import db + +class Thing(object): + # simple object-with-attributes for use in faking pickled objects + def __init__(self, **kwargs): + self.__dict__.update(kwargs) + +class UpgradeTestMixin(object): + """Supporting code to test upgrading from older versions by untarring a + basedir tarball and then checking that the results are as expected.""" + + # class variables to set in subclasses + + source_tarball = None # filename of the tarball (sibling to this file) + db_url = "sqlite:///state.sqlite" # db URL to use (usually default is OK) + + def setUpUpgradeTest(self): + self.basedir = None + + tarball = util.sibpath(__file__, self.source_tarball) + if not os.path.exists(tarball): + raise unittest.SkipTest( + "'%s' not found (normal when not building from Git)" % tarball) + + tf = tarfile.open(tarball) + prefixes = set() + for inf in tf: + tf.extract(inf) + prefixes.add(inf.name.split('/', 1)[0]) + # (note that tf.extractall isn't available in py2.4) + + # get the top-level dir from the tarball + assert len(prefixes) == 1, "tarball has multiple top-level dirs!" + self.basedir = prefixes.pop() + + self.db = connector.DBConnector(self.db_url, self.basedir) + + def tearDownUpgradeTest(self): + if self.basedir: + pass #shutil.rmtree(self.basedir) + + # save subclasses the trouble of calling our setUp and tearDown methods + + def setUp(self): + self.setUpUpgradeTest() + + def tearDown(self): + self.tearDownUpgradeTest() + +class DBUtilsMixin(object): + """Utilities -- assertions and whatnot -- for classes below""" + + def assertModelMatches(self): + def comp(engine): + return migrate.versioning.api.compare_model_to_db( + engine, + self.db.model.repo_path, + self.db.model.metadata) + d = self.db.pool.do_with_engine(comp) + def check(diff): + if diff: + self.fail(str(diff)) + d.addCallback(check) + return d + + def fix_pickle_encoding(self, old_encoding): + """Do the equivalent of master/contrib/fix_pickle_encoding.py""" + changes_file = os.path.join(self.basedir, "changes.pck") + fp = open(changes_file) + changemgr = cPickle.load(fp) + fp.close() + changemgr.recode_changes(old_encoding, quiet=True) + cPickle.dump(changemgr, open(changes_file, "w")) + +class UpgradeTestEmpty(db.RealDatabaseMixin, DBUtilsMixin, unittest.TestCase): + + def setUp(self): + self.setUpRealDatabase() + + self.basedir = os.path.abspath("basedir") + if os.path.exists(self.basedir): + shutil.rmtree(self.basedir) + os.makedirs(self.basedir) + + self.db = connector.DBConnector(self.db_url, self.basedir) + + def tearDown(self): + if os.path.exists(self.basedir): + shutil.rmtree(self.basedir) + + self.tearDownRealDatabase() + + def test_emptydb_modelmatches(self): + d = self.db.model.upgrade() + d.addCallback(lambda r : self.assertModelMatches()) + return d + +class UpgradeTest075(UpgradeTestMixin, DBUtilsMixin, unittest.TestCase): + + # this tarball contains some unicode changes, encoded as utf8, so it + # needs fix_pickle_encoding invoked before we can get started + source_tarball = "master-0-7-5.tgz" + + def verify_thd(self, conn): + "verify the contents of the db - run in a thread" + # note that this will all change if we re-create the tarball! + model = self.db.model + + r = conn.execute( + sa.select([model.changes], order_by=model.changes.c.changeid)) + ch = r.fetchone() + self.failUnlessEqual(ch.changeid, 1) + self.failUnlessEqual(ch.author, u'the snowman <\N{SNOWMAN}@norpole.net>') + self.failUnlessEqual(ch.comments, u'shooting star or \N{COMET}?') + self.failUnlessEqual(ch.revision, u'\N{BLACK STAR}-devel') + self.failUnlessEqual(ch.branch, u'\N{COMET}') + ch = r.fetchone() + self.failUnlessEqual(ch.changeid, 2) + self.failUnlessEqual(ch.author, u"dustin ") + self.failUnlessEqual(ch.comments, u'on-branch change') + self.failUnlessEqual(ch.revision, u'1234') + self.failUnlessEqual(ch.branch, u'') # arguably a bug - should be None? + + r = conn.execute( + sa.select([model.change_files])) + # use a set to avoid depending on db collation + filenames = set([ row.filename for row in r ]) + expected = set([ + u'boring/path', + u'normal/path', + u'\N{BLACK STAR}/funny_chars/in/a/path', + ]) + self.failUnlessEqual(filenames, expected) + + + def test_test(self): + d = defer.succeed(None) + d.addCallback(lambda _ : self.fix_pickle_encoding('utf8')) + d.addCallback(lambda _ : self.db.model.upgrade()) + d.addCallback(lambda _ : self.assertModelMatches()) + d.addCallback(lambda _ : self.db.pool.do(self.verify_thd)) + return d + +class UpgradeTestCitools(UpgradeTestMixin, DBUtilsMixin, unittest.TestCase): + + source_tarball = "citools.tgz" + + def verify_thd(self, conn): + "partially verify the contents of the db - run in a thread" + model = self.db.model + + # this is a big db, so we only spot-check things -- hopefully any errors + # will occur on the import + r = conn.execute( + sa.select([model.changes], + whereclause=model.changes.c.changeid == 70)) + ch = r.fetchone() + self.failUnlessEqual(ch.changeid, 70) + self.failUnlessEqual(ch.author, u'Jakub Vysoky ') + self.failUnlessEqual(ch.comments, u'some failing tests in check_downgrade and metapackage_version') + self.failUnlessEqual(ch.revision, u'2ce0c33b7e10cce98e8d9c5b734b8c133ee4d320') + self.failUnlessEqual(ch.branch, u'master') + + r = conn.execute( + sa.select([model.change_files.c.filename], + whereclause=model.change_files.c.changeid == 70)) + self.assertEqual(r.scalar(), 'tests/test_debian.py') + + r = conn.execute( + sa.select([model.changes], + whereclause=model.changes.c.changeid == 77)) + ch = r.fetchone() + self.failUnlessEqual(ch.changeid, 77) + self.failUnlessEqual(ch.author, u'BuildBot') + self.failUnlessEqual(ch.comments, u'Dependency changed, sending dummy commit') + self.failUnlessEqual(ch.revision, u'HEAD') + self.failUnlessEqual(ch.branch, u'master') + + r = conn.execute( + sa.select([model.change_files.c.filename], + whereclause=model.change_files.c.changeid == 77)) + self.assertEqual(r.scalar(), 'CHANGELOG') + + + def test_test(self): + d = defer.succeed(None) + d.addCallback(lambda _ : self.db.model.upgrade()) + d.addCallback(lambda _ : self.assertModelMatches()) + d.addCallback(lambda _ : self.db.pool.do(self.verify_thd)) + return d diff --git a/master/buildbot/test/regressions/test_change_properties.py b/master/buildbot/test/regressions/test_change_properties.py index f3102dd4a0c..6c2a6690cdd 100644 --- a/master/buildbot/test/regressions/test_change_properties.py +++ b/master/buildbot/test/regressions/test_change_properties.py @@ -15,67 +15,53 @@ import os import shutil - from twisted.trial import unittest - +from twisted.internet import defer from buildbot.changes.changes import Change - from buildbot import util - -from buildbot.db.schema import manager -from buildbot.db.dbspec import DBSpec from buildbot.db.connector import DBConnector +from buildbot.test.util import db -class TestChangeProperties(unittest.TestCase): +class TestChangeProperties(db.RealDatabaseMixin, unittest.TestCase): def setUp(self): - self.basedir = "ChangeProperties" + self.setUpRealDatabase() + self.basedir = os.path.abspath("basedir") if os.path.exists(self.basedir): shutil.rmtree(self.basedir) os.makedirs(self.basedir) - self.spec = DBSpec.from_url("sqlite:///state.sqlite", self.basedir) - - self.sm = manager.DBSchemaManager(self.spec, self.basedir) - self.sm.upgrade(quiet=True) - self.db = DBConnector(self.spec) - self.db.start() + self.db = DBConnector(self.db_url, self.basedir) + d = self.db.model.upgrade() + d.addCallback(lambda _ : self.db.start()) + return d def tearDown(self): self.db.stop() - shutil.rmtree(self.basedir) + self.tearDownRealDatabase() - def testDBGetChangeNumberedNow(self): + def testDBGetChangeByNumber(self): db = self.db c = Change(who="catlee", files=["foo"], comments="", branch="b1") c.properties.setProperty("foo", "bar", "property_source") - db.addChangeToDatabase(c) - - c1 = db.getChangeNumberedNow(c.number) - self.assertEquals(c1.properties, c.properties) - # Flush the cache - db._change_cache = util.LRUCache() + d = defer.succeed(None) - c1 = db.getChangeNumberedNow(c.number) - self.assertEquals(c1.properties, c.properties) + # add the change to the db.. + d.addCallback(lambda _ : db.addChangeToDatabase(c)) # TODO not async yet - def testDBGetChangeByNumber(self): - db = self.db - - c = Change(who="catlee", files=["foo"], comments="", branch="b1") - c.properties.setProperty("foo", "bar", "property_source") - db.addChangeToDatabase(c) - - d = db.getChangeByNumber(c.number) + # get it and check (probably from the cache) + d.addCallback(lambda _ : db.getChangeByNumber(c.number)) def check(c1): self.assertEquals(c1.properties, c.properties) d.addCallback(check) - def flush(ign): - # Flush the cache + # flush the cache + def flush(_): db._change_cache = util.LRUCache() - return db.getChangeByNumber(c.number) d.addCallback(flush) + + # and get the change again, this time using the db, and check it + d.addCallback(lambda _ : db.getChangeByNumber(c.number)) d.addCallback(check) diff --git a/master/buildbot/test/regressions/test_import_unicode_changes.py b/master/buildbot/test/regressions/test_import_unicode_changes.py index 3a3fa8c7c0c..e0f2e9b5c20 100644 --- a/master/buildbot/test/regressions/test_import_unicode_changes.py +++ b/master/buildbot/test/regressions/test_import_unicode_changes.py @@ -13,127 +13,96 @@ # # Copyright Buildbot Team Members -import os -import shutil -import cPickle - from twisted.trial import unittest - -from buildbot.changes.changes import Change, OldChangeMaster - -from buildbot.db.schema import manager -from buildbot.db.dbspec import DBSpec from buildbot.db.connector import DBConnector +from buildbot.test.util import change_import -class TestUnicodeChanges(unittest.TestCase): +# TODO: schema is not importing changes?!!? +class TestUnicodeChanges(change_import.ChangeImportMixin, unittest.TestCase): def setUp(self): - self.basedir = "UnicodeChanges" - if os.path.exists(self.basedir): - shutil.rmtree(self.basedir) - os.makedirs(self.basedir) - - # Now try the upgrade process, which will import the old changes. - self.spec = DBSpec.from_url("sqlite:///state.sqlite", self.basedir) - - self.db = DBConnector(self.spec) - self.db.start() + self.setUpChangeImport() + self.dbc = DBConnector(self.db_url, self.basedir) + # note the connector isn't started, as we're testing upgrades def tearDown(self): - if self.db: - self.db.stop() - - def mkchanges(self, changes): - import buildbot.changes.changes - cm = buildbot.changes.changes.OldChangeMaster() - cm.changes = changes - return cm + if self.dbc: + self.dbc.stop() + self.tearDownChangeImport() def testUnicodeChange(self): - # Create changes.pck - changes = [Change(who=u"Frosty the \N{SNOWMAN}".encode("utf8"), - files=["foo"], comments=u"Frosty the \N{SNOWMAN}".encode("utf8"), - branch="b1", revision=12345)] - cPickle.dump(self.mkchanges(changes), open(os.path.join(self.basedir, - "changes.pck"), "wb")) - - sm = manager.DBSchemaManager(self.spec, self.basedir) - sm.upgrade(quiet=True) - - c = self.db.getChangeNumberedNow(1) - - self.assertEquals(c.who, u"Frosty the \N{SNOWMAN}") - self.assertEquals(c.comments, u"Frosty the \N{SNOWMAN}") + self.make_pickle( + self.make_change( + who=u"Frosty the \N{SNOWMAN}".encode("utf8"), + files=["foo"], + comments=u"Frosty the \N{SNOWMAN}".encode("utf8"), + branch="b1", + revision=12345)) + + d = self.dbc.model.upgrade() + d.addCallback(lambda _ : self.dbc.start()) + d.addCallback(lambda _ : self.dbc.getChangeByNumber(1)) + def check(c): + self.failIf(c is None) + self.assertEquals(c.who, u"Frosty the \N{SNOWMAN}") + self.assertEquals(c.comments, u"Frosty the \N{SNOWMAN}") + d.addCallback(check) + return d def testNonUnicodeChange(self): - # Create changes.pck - changes = [Change(who="\xff\xff\x00", files=["foo"], - comments="\xff\xff\x00", branch="b1", revision=12345)] - cPickle.dump(self.mkchanges(changes), open(os.path.join(self.basedir, - "changes.pck"), "wb")) - - sm = manager.DBSchemaManager(self.spec, self.basedir) - self.assertRaises(UnicodeError, lambda : sm.upgrade(quiet=True)) + self.make_pickle( + self.make_change( + who="\xff\xff\x00", + files=["foo"], + comments="\xff\xff\x00", + branch="b1", + revision=12345)) + + d = self.dbc.model.upgrade() + def eb(f): + self.failUnless("UnicodeError" in str(f)) + def cb(r): + self.fail("upgrade did not fail for non-unicode changes") + d.addCallbacks(cb, eb) + return d def testAsciiChange(self): - # Create changes.pck - changes = [Change(who="Frosty the Snowman", - files=["foo"], comments="Frosty the Snowman", branch="b1", revision=12345)] - cPickle.dump(self.mkchanges(changes), open(os.path.join(self.basedir, - "changes.pck"), "wb")) - - sm = manager.DBSchemaManager(self.spec, self.basedir) - sm.upgrade(quiet=True) - - c = self.db.getChangeNumberedNow(1) - - self.assertEquals(c.who, "Frosty the Snowman") - self.assertEquals(c.comments, "Frosty the Snowman") + self.make_pickle( + self.make_change( + who="Frosty the Snowman", + files=["foo"], + comments="Frosty the Snowman", + branch="b1", + revision=12345)) + + d = self.dbc.model.upgrade() + d.addCallback(lambda _ : self.dbc.start()) + d.addCallback(lambda _ : self.dbc.getChangeByNumber(1)) + def check(c): + self.failIf(c is None) + self.assertEquals(c.who, "Frosty the Snowman") + self.assertEquals(c.comments, "Frosty the Snowman") + d.addCallback(check) + return d def testUTF16Change(self): - # Create changes.pck - cm = OldChangeMaster() - cm.changes = [Change(who=u"Frosty the \N{SNOWMAN}".encode("utf16"), - files=["foo"], comments=u"Frosty the \N{SNOWMAN}".encode("utf16"), - branch="b1", revision=12345)] - - # instead of running contrib/fix_changes_pickle_encoding.py, we just call - # the changemanager's recode_changes directly - it's the function at the - # heart of the script anyway. - cm.recode_changes('utf16', quiet=True) - - # and dump the recoded changemanager to changes.pck before trying a schema upgrade - cPickle.dump(cm, open(os.path.join(self.basedir, "changes.pck"), "wb")) - - sm = manager.DBSchemaManager(self.spec, self.basedir) - sm.upgrade(quiet=True) - - c = self.db.getChangeNumberedNow(1) - - self.assertEquals(c.who, u"Frosty the \N{SNOWMAN}") - self.assertEquals(c.comments, u"Frosty the \N{SNOWMAN}") - -class TestMySQLDBUnicodeChanges(TestUnicodeChanges): - def setUp(self): - self.basedir = "MySQLDBUnicodeChanges" - if os.path.exists(self.basedir): - shutil.rmtree(self.basedir) - os.makedirs(self.basedir) - - # Now try the upgrade process, which will import the old changes. - self.spec = DBSpec.from_url( - "mysql://buildbot_test:buildbot_test@localhost/buildbot_test", self.basedir) - - self.db = DBConnector(self.spec) - self.db.start() - - result = self.db.runQueryNow("SHOW TABLES") - for row in result: - self.db.runQueryNow("DROP TABLE %s" % row[0]) - self.db.runQueryNow("COMMIT") - -try: - import MySQLdb - conn = MySQLdb.connect(user="buildbot_test", db="buildbot_test", - passwd="buildbot_test", use_unicode=True, charset='utf8') -except: - TestMySQLDBUnicodeChanges.skip = "MySQLdb not installed" + self.make_pickle( + self.make_change( + who=u"Frosty the \N{SNOWMAN}".encode("utf16"), + files=[u"foo".encode('utf16')], + comments=u"Frosty the \N{SNOWMAN}".encode("utf16"), + branch="b1", + revision=12345), + # instead of running contrib/fix_changes_pickle_encoding.py, we + # just call the changemanager's recode_changes directly - it's + # the function at the heart of the script anyway. + recode_fn=lambda cm : cm.recode_changes('utf16', quiet=True)) + + d = self.dbc.model.upgrade() + d.addCallback(lambda _ : self.dbc.start()) + d.addCallback(lambda _ : self.dbc.getChangeByNumber(1)) + def check(c): + self.failIf(c is None) + self.assertEquals(c.who, u"Frosty the \N{SNOWMAN}") + self.assertEquals(c.comments, u"Frosty the \N{SNOWMAN}") + d.addCallback(check) + return d diff --git a/master/buildbot/test/regressions/test_import_weird_changes.py b/master/buildbot/test/regressions/test_import_weird_changes.py index c543bf7c331..2509b91db81 100644 --- a/master/buildbot/test/regressions/test_import_weird_changes.py +++ b/master/buildbot/test/regressions/test_import_weird_changes.py @@ -13,54 +13,37 @@ # # Copyright Buildbot Team Members -import os -import shutil -import cPickle - from twisted.trial import unittest - -from buildbot.changes.changes import Change - -from buildbot.db.schema import manager -from buildbot.db.dbspec import DBSpec from buildbot.db.connector import DBConnector +from buildbot.test.util import change_import -class TestWeirdChanges(unittest.TestCase): +class TestWeirdChanges(change_import.ChangeImportMixin, unittest.TestCase): def setUp(self): - self.basedir = "WeirdChanges" - if os.path.exists(self.basedir): - shutil.rmtree(self.basedir) - os.makedirs(self.basedir) - - # Now try the upgrade process, which will import the old changes. - self.spec = DBSpec.from_url("sqlite:///state.sqlite", self.basedir) - - self.db = DBConnector(self.spec) - self.db.start() + self.setUpChangeImport() + self.dbc = DBConnector(self.db_url, self.basedir) + # note the connector isn't started, as we're testing upgrades def tearDown(self): - if self.db: - self.db.stop() - if os.path.exists(self.basedir): - shutil.rmtree(self.basedir) - - def mkchanges(self, changes): - import buildbot.changes.changes - cm = buildbot.changes.changes.OldChangeMaster() - cm.changes = changes - return cm + if self.dbc: + self.dbc.stop() + self.tearDownChangeImport() def testListsAsFilenames(self): - # Create changes.pck - changes = [Change(who=u"Frosty the \N{SNOWMAN}".encode("utf8"), - files=[["foo","bar"],['bing']], comments=u"Frosty the \N{SNOWMAN}".encode("utf8"), - branch="b1", revision=12345)] - cPickle.dump(self.mkchanges(changes), open(os.path.join(self.basedir, - "changes.pck"), "wb")) - - sm = manager.DBSchemaManager(self.spec, self.basedir) - sm.upgrade(quiet=True) - - c = self.db.getChangeNumberedNow(1) - - self.assertEquals(sorted(c.files), sorted([u"foo", u"bar", u"bing"])) + # sometimes the 'filenames' in a Change object are actually lists of files. I don't + # know how this happens, but we should be resilient to it. + self.make_pickle( + self.make_change( + who=u"Frosty the \N{SNOWMAN}".encode("utf8"), + files=[["foo","bar"],['bing']], + comments=u"Frosty the \N{SNOWMAN}".encode("utf8"), + branch="b1", + revision=12345)) + + d = self.dbc.model.upgrade() + d.addCallback(lambda _ : self.dbc.start()) + d.addCallback(lambda _ : self.dbc.getChangeByNumber(1)) + def check(c): + self.failIf(c is None) + self.assertEquals(sorted(c.files), sorted([u"foo", u"bar", u"bing"])) + d.addCallback(check) + return d diff --git a/master/buildbot/test/unit/test_db_connector.py b/master/buildbot/test/unit/test_db_connector.py index 9cc03b79d2b..8e2742df49c 100644 --- a/master/buildbot/test/unit/test_db_connector.py +++ b/master/buildbot/test/unit/test_db_connector.py @@ -13,45 +13,24 @@ # # Copyright Buildbot Team Members +import os from twisted.trial import unittest +from buildbot.db import connector +from buildbot.test.util import db -from buildbot.db import dbspec, connector -from buildbot.test.util import threads - -class DBConnector_Basic(threads.ThreadLeakMixin, unittest.TestCase): +class DBConnector_Basic(db.RealDatabaseMixin, unittest.TestCase): """ Basic tests of the DBConnector class - all start with an empty DB """ def setUp(self): - self.setUpThreadLeak() - # use an in-memory sqlite database to test - self.dbc = connector.DBConnector(dbspec.DBSpec.from_url("sqlite://")) + self.setUpRealDatabase() + self.dbc = connector.DBConnector(self.db_url, os.path.abspath('basedir')) self.dbc.start() def tearDown(self): self.dbc.stop() - self.tearDownThreadLeak() - - def test_quoteq_format(self): - self.dbc.paramstyle = "format" # override default - self.assertEqual( - self.dbc.quoteq("SELECT * from developers where name='?'"), - "SELECT * from developers where name='%s'") - - def test_quoteq_qmark(self): - assert self.dbc.paramstyle == "qmark" # default for sqlite - self.assertEqual( - self.dbc.quoteq("SELECT * from developers where name='?'"), - "SELECT * from developers where name='?'") - - def test_paramlist_single(self): - self.dbc.paramstyle = "format" # override default - self.assertEqual(self.dbc.parmlist(1), "(%s)") - - def test_paramlist_multiple(self): - self.dbc.paramstyle = "format" # override default - self.assertEqual(self.dbc.parmlist(3), "(%s,%s,%s)") + self.tearDownRealDatabase() def test_runQueryNow_simple(self): self.assertEqual(self.dbc.runQueryNow("SELECT 1"), diff --git a/master/buildbot/test/unit/test_db_dbspec.py b/master/buildbot/test/unit/test_db_dbspec.py deleted file mode 100644 index e5d426df3ba..00000000000 --- a/master/buildbot/test/unit/test_db_dbspec.py +++ /dev/null @@ -1,176 +0,0 @@ -# This file is part of Buildbot. Buildbot is free software: you can -# redistribute it and/or modify it under the terms of the GNU General Public -# License as published by the Free Software Foundation, version 2. -# -# This program is distributed in the hope that it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more -# details. -# -# You should have received a copy of the GNU General Public License along with -# this program; if not, write to the Free Software Foundation, Inc., 51 -# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. -# -# Copyright Buildbot Team Members - -import os - -from twisted.trial import unittest - -from buildbot.db import dbspec -from buildbot.test.util import threads - -class DBSpec(unittest.TestCase): - # a dburl of "sqlite:///.." can use either the third-party sqlite3 - # module, or the stdlib pysqlite2.dbapi2 module, depending upon the - # version of python in use - SQLITE_NAMES = ["sqlite3", "pysqlite2.dbapi2"] - - def failUnlessConnection(self, spec, dbapiName, connargs=None, connkw=None): - errs = [] - if dbapiName is self.SQLITE_NAMES: - if spec.dbapiName not in self.SQLITE_NAMES: - errs.append("unexpected dbapiName %s" % spec.dbapiName) - else: - if spec.dbapiName != dbapiName: - errs.append("unexpected dbapiName %s" % spec.dbapiName) - if connargs is not None: - if spec.connargs != connargs: - errs.append("unexpected connargs: %s, expected %s" % (spec.connargs, connargs)) - if connkw is not None: - if spec.connkw != connkw: - errs.append("unexpected connkw: %s, expected %s" % (spec.connkw, connkw)) - if errs: - raise unittest.FailTest("; ".join(errs)) - - def test_fromURL_sqliteRelative(self): - basedir = "/foo/bar" - d = dbspec.DBSpec.from_url("sqlite:///state.sqlite", basedir=basedir) - self.failUnlessConnection(d, self.SQLITE_NAMES, - connargs=(os.path.join(basedir, "state.sqlite"),)) - - def test_fromURL_sqlitePercentSub(self): - basedir = "/foo/bar" - d = dbspec.DBSpec.from_url("sqlite:///%(basedir)s/x/state.sqlite", basedir=basedir) - # no os.path.join here - it's string interpolation - self.failUnlessConnection(d, self.SQLITE_NAMES, - connargs=("%s/x/state.sqlite" % basedir,)) - - def test_fromURL_sqliteAbsolutePath(self): - basedir = "/foo/bar" - d = dbspec.DBSpec.from_url("sqlite:////tmp/state.sqlite", basedir=basedir) - self.failUnlessConnection(d, self.SQLITE_NAMES, - connargs=("/tmp/state.sqlite",)) - - def test_fromURL_sqliteAbsolutePathNoBasedir(self): - d = dbspec.DBSpec.from_url("sqlite:////tmp/state.sqlite") - self.failUnlessConnection(d, self.SQLITE_NAMES, - connargs=("/tmp/state.sqlite",)) - - def test_fromURL_sqliteMemory(self): - d = dbspec.DBSpec.from_url("sqlite://") - self.failUnlessConnection(d, self.SQLITE_NAMES, - connargs=(":memory:",)) - - def test_fromURL_sqliteArgs(self): - d = dbspec.DBSpec.from_url("sqlite:////tmp/state.sqlite?foo=bar") - self.failUnlessConnection(d, self.SQLITE_NAMES, - connargs=("/tmp/state.sqlite",), - connkw={'foo' : 'bar'}) - - def test_fromURL_noDriver(self): - self.failUnlessRaises(ValueError, dbspec.DBSpec.from_url, "state.sqlite") - - def test_fromURL_noColon(self): - self.failUnlessRaises(ValueError, dbspec.DBSpec.from_url, "sqlite/state.sqlite") - - def test_fromURL_noSlash(self): - self.failUnlessRaises(ValueError, dbspec.DBSpec.from_url, "sqlite:state.sqlite") - - def test_fromURL_singleSlash(self): - self.failUnlessRaises(ValueError, dbspec.DBSpec.from_url, "sqlite:/state.sqlite") - - def test_fromURL_unknownDriver(self): - self.failUnlessRaises(ValueError, dbspec.DBSpec.from_url, "unknowndb://foo/bar") - - def test_fromURL_mysqlLocal2Slashes(self): - self.failUnlessRaises(ValueError, dbspec.DBSpec.from_url, "mysql://foo") - - def test_fromURL_mysqlAlphaPort(self): - self.failUnlessRaises(ValueError, dbspec.DBSpec.from_url, "mysql://somehost.com:badport/db") - - def test_fromURL_mysql(self): - basedir = "/foo/bar" - d = dbspec.DBSpec.from_url("mysql://somehost.com/dbname", basedir=basedir) - self.failUnlessConnection(d, 'MySQLdb', - connkw=dict(host='somehost.com', db='dbname', use_unicode=True, charset='utf8')) - - def test_fromURL_mysqlNoBasedir(self): - d = dbspec.DBSpec.from_url("mysql://somehost.com/dbname") - self.failUnlessConnection(d, 'MySQLdb', - connkw=dict(host='somehost.com', db='dbname', use_unicode=True, charset='utf8')) - - def test_fromURL_mysqlPort(self): - d = dbspec.DBSpec.from_url("mysql://somehost.com:9000/dbname") - self.failUnlessConnection(d, 'MySQLdb', - connkw=dict(host='somehost.com', db='dbname', port=9000, use_unicode=True, charset='utf8')) - - def test_fromURL_mysqlLocal(self): - d = dbspec.DBSpec.from_url("mysql:///database_name") - self.failUnlessConnection(d, 'MySQLdb', - connkw=dict(host=None, db='database_name', use_unicode=True, charset='utf8')) - - def test_fromURL_mysqlAuth(self): - d = dbspec.DBSpec.from_url("mysql://user:pass@somehost.com/dbname") - self.failUnlessConnection(d, 'MySQLdb', - connkw=dict(host='somehost.com', db='dbname', user="user", passwd="pass", use_unicode=True, charset='utf8')) - - def test_fromURL_mysqlAuthNoPass(self): - d = dbspec.DBSpec.from_url("mysql://user@somehost.com/dbname") - self.failUnlessConnection(d, 'MySQLdb', - connkw=dict(host='somehost.com', db='dbname', user="user", use_unicode=True, charset='utf8')) - - def test_fromURL_mysqlAuthNoPassPort(self): - d = dbspec.DBSpec.from_url("mysql://user@somehost.com:8000/dbname") - self.failUnlessConnection(d, 'MySQLdb', - connkw=dict(host='somehost.com', db='dbname', user="user", port=8000, use_unicode=True, charset='utf8')) - - def test_fromURL_mysqlAuthNoPassPortArgs(self): - d = dbspec.DBSpec.from_url("mysql://user@somehost.com:8000/dbname?foo=moo") - self.failUnlessConnection(d, 'MySQLdb', - connkw=dict(host='somehost.com', db='dbname', user="user", - port=8000, foo="moo", use_unicode=True, charset='utf8')) - -class DBSpec_methods(threads.ThreadLeakMixin, unittest.TestCase): - - def setUp(self): - self.setUpThreadLeak() - self.spec = dbspec.DBSpec.from_url("sqlite://") - self.pools = [] - - def tearDown(self): - # be careful to stop all pools - for pool in self.pools: - pool.close() - self.tearDownThreadLeak() - - # track a pool that must be closed - def trackPool(self, pool): - self.pools.append(pool) - return pool - - # note that sync connections need not be cleaned up - - ## tests - - def test_get_dbapi_has_connect(self): - self.assertTrue(hasattr(self.spec.get_dbapi(), 'connect')) - - def test_get_sync_connection_has_cursor(self): - self.assertTrue(hasattr(self.spec.get_sync_connection(), 'cursor')) - - def test_get_async_connection_pool_has_runInteraction(self): - pool = self.spec.get_async_connection_pool() - self.trackPool(pool) - pool.start() - self.assertTrue(hasattr(pool, 'runInteraction')) diff --git a/master/buildbot/test/unit/test_db_model.py b/master/buildbot/test/unit/test_db_model.py new file mode 100644 index 00000000000..896130cf8f9 --- /dev/null +++ b/master/buildbot/test/unit/test_db_model.py @@ -0,0 +1,55 @@ +# This file is part of Buildbot. Buildbot is free software: you can +# redistribute it and/or modify it under the terms of the GNU General Public +# License as published by the Free Software Foundation, version 2. +# +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more +# details. +# +# You should have received a copy of the GNU General Public License along with +# this program; if not, write to the Free Software Foundation, Inc., 51 +# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Copyright Buildbot Team Members + +import os +import mock +from twisted.trial import unittest +from twisted.internet import defer +from buildbot.db import model, enginestrategy +from buildbot.test.util import db + +class DBConnector_Basic(db.RealDatabaseMixin, unittest.TestCase): + """ + Basic tests of the DBConnector class - all start with an empty DB + """ + + def setUp(self): + self.setUpRealDatabase() + engine = enginestrategy.create_engine(self.db_url, + basedir=os.path.abspath('basedir')) + + # mock out the pool, and set up the model + self.db = mock.Mock() + self.db.pool.do_with_engine = lambda thd : defer.maybeDeferred(thd,engine) + self.db.model = model.Model(self.db) + self.db.start() + + def tearDown(self): + self.db.stop() + self.tearDownRealDatabase() + + def test_is_current_empty(self): + d = self.db.model.is_current() + d.addCallback(lambda r : self.assertFalse(r)) + return d + + def test_is_current_full(self): + d = self.db.model.upgrade() + d.addCallback(lambda _ : self.db.model.is_current()) + d.addCallback(lambda r : self.assertTrue(r)) + return d + + # the upgrade method is very well-tested by the integration tests; the + # remainder of the object is just tables. diff --git a/master/buildbot/test/unit/test_db_pool.py b/master/buildbot/test/unit/test_db_pool.py index 5efd6741320..d98ca290527 100644 --- a/master/buildbot/test/unit/test_db_pool.py +++ b/master/buildbot/test/unit/test_db_pool.py @@ -1,5 +1,6 @@ import sqlalchemy from twisted.trial import unittest +from twisted.internet import defer from buildbot.db import pool class DBThreadPool(unittest.TestCase): @@ -7,7 +8,7 @@ def setUp(self): self.engine = sqlalchemy.create_engine('sqlite://') self.pool = pool.DBThreadPool(self.engine) - def test_simple(self): + def test_do(self): def add(conn, addend1, addend2): rp = conn.execute("SELECT %d + %d" % (addend1, addend2)) return rp.scalar() @@ -17,7 +18,7 @@ def check(res): d.addCallback(check) return d - def test_error(self): + def test_do_error(self): def fail(conn): rp = conn.execute("EAT COOKIES") return rp.scalar() @@ -29,7 +30,7 @@ def cb(r): d.addCallbacks(cb, eb) return d - def test_exception(self): + def test_do_exception(self): def raise_something(conn): raise RuntimeError("oh noes") d = self.pool.do(raise_something) @@ -40,3 +41,35 @@ def cb(r): self.fail("no exception propagated") d.addCallbacks(cb, eb) return d + + def test_do_with_engine(self): + def add(engine, addend1, addend2): + rp = engine.execute("SELECT %d + %d" % (addend1, addend2)) + return rp.scalar() + d = self.pool.do_with_engine(add, 10, 11) + def check(res): + self.assertEqual(res, 21) + d.addCallback(check) + return d + + def test_do_with_engine_exception(self): + def fail(engine): + rp = engine.execute("EAT COOKIES") + return rp.scalar() + d = self.pool.do_with_engine(fail) + def eb(f): + pass + def cb(r): + self.fail("no exception propagated") + d.addCallbacks(cb, eb) + return d + + def test_persistence_across_invocations(self): + d = defer.succeed(None) + def create_table(engine): + engine.execute("CREATE TABLE tmp ( a integer )") + d.addCallback( lambda r : self.pool.do_with_engine(create_table)) + def insert_into_table(engine): + engine.execute("INSERT INTO tmp values ( 1 )") + d.addCallback( lambda r : self.pool.do_with_engine(insert_into_table)) + return d diff --git a/master/buildbot/test/unit/test_db_schema_master.py b/master/buildbot/test/unit/test_db_schema_master.py deleted file mode 100644 index 98837a3bc29..00000000000 --- a/master/buildbot/test/unit/test_db_schema_master.py +++ /dev/null @@ -1,248 +0,0 @@ -# This file is part of Buildbot. Buildbot is free software: you can -# redistribute it and/or modify it under the terms of the GNU General Public -# License as published by the Free Software Foundation, version 2. -# -# This program is distributed in the hope that it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more -# details. -# -# You should have received a copy of the GNU General Public License along with -# this program; if not, write to the Free Software Foundation, Inc., 51 -# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. -# -# Copyright Buildbot Team Members - -import os -import shutil -import cPickle -import pprint - -from twisted.trial import unittest - -from buildbot.db.schema import manager -from buildbot.db import dbspec - -class Thing(object): - # simple object-with-attributes for use in faking pickled objects - def __init__(self, **kwargs): - self.__dict__.update(kwargs) - -class DBSchemaManager(unittest.TestCase): - - def setUp(self): - self.basedir = "DBSchemaManager" - if os.path.exists(self.basedir): - shutil.rmtree(self.basedir) - os.makedirs(self.basedir) - - self.spec = dbspec.DBSpec.from_url("sqlite:///state.sqlite", self.basedir) - - self.sm = manager.DBSchemaManager(self.spec, self.basedir) - - ## assertions and utils - - def assertDatabaseOKEmpty(self): - """ - assert that the database is an upgrade of an empty db - """ - errs = [] - c = self.spec.get_sync_connection().cursor() - - # check the version - c.execute("SELECT * FROM version") - if c.fetchall()[0][0] != self.sm.get_current_version(): - errs.append("VERSION is not up to date") - - # check that the remaining tables are empty - for empty_tbl in ('changes', 'change_links', 'change_files', - 'change_properties', 'schedulers', 'scheduler_changes', - 'scheduler_upstream_buildsets', 'sourcestamps', 'patches', - 'sourcestamp_changes', 'buildsets', 'buildset_properties', - 'buildrequests', 'builds'): - c.execute("SELECT * FROM %s" % empty_tbl) - if len(c.fetchall()) != 0: - errs.append("table '%s' is not empty" % empty_tbl) - - if errs: - self.fail("; ".join(errs)) - - # populate the basedir with data to test the upgrade process; this should be - # expanded as more and more data is migrated from the basedir to the database - def fill_basedir(self): - self.fill_basedir_changes() - - def fill_basedir_changes(self): - changes = [ - Thing(number=1, who='dustin', comments='hi, mom', isdir=1, - branch=None, revision=1233, revlink='http://buildbot.net', - when=1267419122, category=None, links=[], files=[], - properties=Thing(properties={})), - Thing(number=2, who='warner', comments='', isdir=0, - branch='schedulerdb', revision=1234, revlink='http://pypi.com', - when=1267419123, category='new', links=[], files=[], - properties=Thing(properties={})), - # a change with number=None should cause all changes to be renumbered - Thing(number=None, who='catlee', comments=None, isdir=0, - branch=None, revision=1235, revlink=None, - when=1267419132, category=None, links=[], files=[], - properties=Thing(properties={'name' : 'jimmy'})), - # a change with revision=None should be ignored - Thing(number=6, who='bhearsum', comments='', isdir=0, - branch='fixes', revision=None, revlink=None, - when=1267419134, category='nice', links=[], files=[], - properties=Thing(properties={})), - Thing(number=7, who='marcusl', comments='', isdir=0, - branch='jinja', revision=1239, revlink=None, - when=1267419134, category='cool', links=['http://github.com'], - files=['main.c', 'util.c', 'ext.c'], - properties=Thing(properties={ - 'failures' : 3, - 'tests' : [ 'bogus1', 'bogus2', 'bogus3' ]})), - ] - - # embed it in a Changes object and pickle it up - changesource = Thing(changes=changes) - f = open(os.path.join(self.basedir, "changes.pck"), "wb") - f.write(cPickle.dumps(changesource)) - - def assertDatabaseOKFull(self): - """ - assert that the database is an upgrade of the db created by fill_basedir - """ - errs = [] - c = self.spec.get_sync_connection().cursor() - - # check the version - c.execute("SELECT * FROM version") - if c.fetchall()[0][0] != self.sm.get_current_version(): - errs.append("VERSION is not up to date") - - # do a byte-for-byte comparison of the changes table and friends - c.execute("""SELECT changeid, author, comments, is_dir, branch, revision, - revlink, when_timestamp, category, repository, project - FROM changes order by revision""") - res = list(c.fetchall()) - if res != [ - (1, u'dustin', 'hi, mom', 1, u'', u'1233', - u'http://buildbot.net', 1267419122, u'', u'', u''), - (2, u'warner', u'', 0, u'schedulerdb', u'1234', - u'http://pypi.com', 1267419123, u'new', u'', u''), - (3, u'catlee', u'', 0, u'', u'1235', - u'', 1267419132, u'', u'', u''), - # note change by bhearsum is missing because its revision=None - (4, u'marcusl', u'', 0, u'jinja', u'1239', - u'', 1267419134, u'cool', u'', u''), - ]: - pprint.pprint(res) - errs.append("changes table does not match expectations") - - c.execute("""SELECT changeid, link from change_links order by changeid""") - res = list(c.fetchall()) - if res != [ - (4, u'http://github.com'), - ]: - pprint.pprint(res) - errs.append("change_links table does not match expectations") - - c.execute("""SELECT changeid, filename from change_files order by changeid""") - res = list(c.fetchall()) - if res != [ - (4, u'main.c'), - (4, u'util.c'), - (4, u'ext.c'), - ]: - pprint.pprint(res) - errs.append("change_files table does not match expectations") - - c.execute("""SELECT changeid, property_name, property_value - from change_properties order by changeid, property_name""") - res = list(c.fetchall()) - if res != [ - (3, u'name', u'"jimmy"'), - (4, u'failures', u'3'), - (4, u'tests', u'["bogus1", "bogus2", "bogus3"]'), - ]: - pprint.pprint(res) - errs.append("change_properties table does not match expectations") - - # check that the remaining tables are empty - for empty_tbl in ('schedulers', 'scheduler_changes', - 'scheduler_upstream_buildsets', 'sourcestamps', 'patches', - 'sourcestamp_changes', 'buildsets', 'buildset_properties', - 'buildrequests', 'builds'): - c.execute("SELECT * FROM %s" % empty_tbl) - if len(c.fetchall()) != 0: - errs.append("table '%s' is not empty" % empty_tbl) - - if errs: - self.fail("; ".join(errs)) - - - ## tests - - def test_get_current_version(self): - # this is as much a reminder to write tests for the new version - # as a test of the (very trivial) method - self.assertEqual(self.sm.get_current_version(), 6) - - def test_get_db_version_empty(self): - self.assertEqual(self.sm.get_db_version(), 0) - - def test_get_db_version_int(self): - conn = self.spec.get_sync_connection() - c = conn.cursor() - c.execute("CREATE TABLE version (`version` integer)") - c.execute("INSERT INTO version values (17)") - self.assertEqual(self.sm.get_db_version(conn), 17) - - def test_is_current_empty(self): - self.assertFalse(self.sm.is_current()) - - def test_is_current_empty_upgrade(self): - self.sm.upgrade(quiet=True) - self.assertTrue(self.sm.is_current()) - - def test_upgrade_empty(self): - self.sm.upgrade(quiet=True) - self.assertDatabaseOKEmpty() - - def test_upgrade_full(self): - self.fill_basedir() - self.sm.upgrade(quiet=True) - self.assertDatabaseOKFull() - - def test_scheduler_name_uniqueness(self): - self.sm.upgrade(quiet=True) - c = self.spec.get_sync_connection().cursor() - c.execute("""INSERT INTO schedulers (`name`, `class_name`, `state`) - VALUES ('s1', 'Nightly', '')""") - c.execute("""INSERT INTO schedulers (`name`, `class_name`, `state`) - VALUES ('s1', 'Periodic', '')""") - self.assertRaises(Exception, c.execute, - """INSERT INTO schedulers (`name`, `class_name`, `state`) - VALUES ('s1', 'Nightly', '')""") - -class MySQLDBSchemaManager(DBSchemaManager): - def setUp(self): - self.basedir = "MySQLDBSchemaManager" - if os.path.exists(self.basedir): - shutil.rmtree(self.basedir) - os.makedirs(self.basedir) - - self.spec = dbspec.DBSpec.from_url("mysql://buildbot_test:buildbot_test@localhost/buildbot_test") - - # Drop all previous tables - cur = self.spec.get_sync_connection().cursor() - cur.execute("SHOW TABLES") - for row in cur.fetchall(): - cur.execute("DROP TABLE %s" % row[0]) - cur.execute("COMMIT") - - self.sm = manager.DBSchemaManager(self.spec, self.basedir) - -try: - import MySQLdb - conn = MySQLdb.connect(user="buildbot_test", db="buildbot_test", passwd="buildbot_test", use_unicode=True, charset='utf8') -except: - MySQLDBSchemaManager.skip = "MySQLdb not installed" diff --git a/master/buildbot/test/unit/test_db_util.py b/master/buildbot/test/unit/test_db_util.py deleted file mode 100644 index 8b4f24fa146..00000000000 --- a/master/buildbot/test/unit/test_db_util.py +++ /dev/null @@ -1,56 +0,0 @@ -# This file is part of Buildbot. Buildbot is free software: you can -# redistribute it and/or modify it under the terms of the GNU General Public -# License as published by the Free Software Foundation, version 2. -# -# This program is distributed in the hope that it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more -# details. -# -# You should have received a copy of the GNU General Public License along with -# this program; if not, write to the Free Software Foundation, Inc., 51 -# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. -# -# Copyright Buildbot Team Members - - -from twisted.trial import unittest - -from buildbot.db import util - -class FakeDBAPI(object): - def __init__(self, paramstyle): - self.paramstyle = paramstyle - -class SQLUtils(unittest.TestCase): - - def test_sql_insert_single_qmark(self): - dbapi = FakeDBAPI('qmark') - self.assertEqual(util.sql_insert(dbapi, 'colors', ('color',)), - "INSERT INTO colors (color) VALUES (?)") - - def test_sql_insert_multi_qmark(self): - dbapi = FakeDBAPI('qmark') - self.assertEqual(util.sql_insert(dbapi, 'widgets', ('len', 'wid')), - "INSERT INTO widgets (len, wid) VALUES (?,?)") - - def test_sql_insert_single_numeric(self): - dbapi = FakeDBAPI('numeric') - self.assertEqual(util.sql_insert(dbapi, 'colors', ('color',)), - "INSERT INTO colors (color) VALUES (:1)") - - def test_sql_insert_multi_numeric(self): - dbapi = FakeDBAPI('numeric') - self.assertEqual(util.sql_insert(dbapi, 'widgets', ('len', 'wid')), - "INSERT INTO widgets (len, wid) VALUES (:1,:2)") - - def test_sql_insert_single_format(self): - dbapi = FakeDBAPI('format') - self.assertEqual(util.sql_insert(dbapi, 'colors', ('color',)), - "INSERT INTO colors (color) VALUES (%s)") - - def test_sql_insert_multi_format(self): - dbapi = FakeDBAPI('format') - self.assertEqual(util.sql_insert(dbapi, 'widgets', ('len', 'wid')), - "INSERT INTO widgets (len, wid) VALUES (%s,%s)") - diff --git a/master/buildbot/test/util/change_import.py b/master/buildbot/test/util/change_import.py new file mode 100644 index 00000000000..ea9e4a67f61 --- /dev/null +++ b/master/buildbot/test/util/change_import.py @@ -0,0 +1,56 @@ +# This file is part of Buildbot. Buildbot is free software: you can +# redistribute it and/or modify it under the terms of the GNU General Public +# License as published by the Free Software Foundation, version 2. +# +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more +# details. +# +# You should have received a copy of the GNU General Public License along with +# this program; if not, write to the Free Software Foundation, Inc., 51 +# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Copyright Buildbot Team Members + +import os +import shutil +import cPickle +from buildbot.test.util import db + +from buildbot.changes.changes import Change, OldChangeMaster + +class ChangeImportMixin(db.RealDatabaseMixin): + """ + We have a number of tests that examine the results of importing particular + flavors of Change objects. This class uses some pickling to make this easy + to test. + + This is a subclass of RealDatabaseMixin, so do not inherit from that class + separately! + + >>> self.make_pickle(self.make_change(who=u'jimmy'), self.make_change(who='johnny')) + """ + def make_pickle(self, *changes, **kwargs): + recode_fn = kwargs.pop('recode_fn', None) + cm = OldChangeMaster() + cm.changes = changes + if recode_fn: + recode_fn(cm) + cPickle.dump(cm, open(self.changes_pickle, "wb")) + + def make_change(self, **kwargs): + return Change(**kwargs) + + def setUpChangeImport(self): + self.basedir = os.path.abspath("basedir") + if os.path.exists(self.basedir): + shutil.rmtree(self.basedir) + os.makedirs(self.basedir) + self.changes_pickle = os.path.join(self.basedir, "changes.pck") + self.setUpRealDatabase() + + def tearDownChangeImport(self): + self.tearDownRealDatabase() + if os.path.exists(self.basedir): + shutil.rmtree(self.basedir) diff --git a/master/buildbot/test/util/db.py b/master/buildbot/test/util/db.py new file mode 100644 index 00000000000..de49fc3d97e --- /dev/null +++ b/master/buildbot/test/util/db.py @@ -0,0 +1,55 @@ +# This file is part of Buildbot. Buildbot is free software: you can +# redistribute it and/or modify it under the terms of the GNU General Public +# License as published by the Free Software Foundation, version 2. +# +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more +# details. +# +# You should have received a copy of the GNU General Public License along with +# this program; if not, write to the Free Software Foundation, Inc., 51 +# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Copyright Buildbot Team Members + +import os +import sqlalchemy +from sqlalchemy.schema import MetaData +from twisted.python import log + +class RealDatabaseMixin(object): + """ + A class that sets up a real database for testing. This sets self.db_url to + the URL for the database. By default, it specifies an in-memory SQLite + database, but if the BUILDBOT_TEST_DB_URL environment variable is set, it + will use the specified database, being careful to clean out *all* tables in + the database before and after the tests are run - so each test starts with + a clean database. + """ + def _clean_database(self): + log.msg("cleaning database %s" % self.db_url) + engine = sqlalchemy.create_engine(self.db_url) + + # get a list of all of the tables.. + meta = MetaData() + meta.reflect(bind=engine) + + # and drop them! + for table in reversed(meta.sorted_tables): + log.msg(" dropping table %s" % table) + table.drop(engine) + + engine.dispose() + + def setUpRealDatabase(self): + memory = 'sqlite://' + self.db_url = os.environ.get('BUILDBOT_TEST_DB_URL', 'sqlite:///%s' % (os.path.abspath('test.db'))) ### XXX TEMPORARY until sqlalchemification is complete + self._using_memory_db = (self.db_url == memory) + + if not self._using_memory_db: + self._clean_database() + + def tearDownRealDatabase(self): + if not self._using_memory_db: + self._clean_database() diff --git a/master/buildbot/test/util/threads.py b/master/buildbot/test/util/threads.py deleted file mode 100644 index 86b5a5cf6a2..00000000000 --- a/master/buildbot/test/util/threads.py +++ /dev/null @@ -1,37 +0,0 @@ -# This file is part of Buildbot. Buildbot is free software: you can -# redistribute it and/or modify it under the terms of the GNU General Public -# License as published by the Free Software Foundation, version 2. -# -# This program is distributed in the hope that it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more -# details. -# -# You should have received a copy of the GNU General Public License along with -# this program; if not, write to the Free Software Foundation, Inc., 51 -# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. -# -# Copyright Buildbot Team Members - -import threading -import time - -from twisted.python import log - -class ThreadLeakMixin(object): - """ - Monitor for leaking thread pools. Just call the setUp and tearDown methods! - """ - def setUpThreadLeak(self): - self.start_thdcount = len(threading.enumerate()) - - def tearDownThreadLeak(self): - # double-check we haven't left a ThreadPool open. Sometimes, threads - # take a while to go away, so this will wait up to 5s for that to occur - for _ in xrange(5): - cur_thdcount = len(threading.enumerate()) - if cur_thdcount - self.start_thdcount < 1: - return - log.msg("threadcount: %d (start) %d (now)" % (self.start_thdcount, cur_thdcount)) - time.sleep(1) - self.fail("leaked %d threads" % (cur_thdcount - self.start_thdcount)) diff --git a/master/buildbot/util/__init__.py b/master/buildbot/util/__init__.py index 7bc1863be71..fc7c534224b 100644 --- a/master/buildbot/util/__init__.py +++ b/master/buildbot/util/__init__.py @@ -37,7 +37,21 @@ def key_func(item): l = [ i[1] for i in keyed_l ] return l +def flatten(l): + """Flatten nested lists into a single-level list""" + if l and type(l[0]) == list: + rv = [] + for e in l: + if type(e) == list: + rv.extend(e) + else: + rv.append(e) + return rv + else: + return l + def now(_reactor=None): + """Get the time, using reactor.seconds or time.time""" if _reactor and hasattr(_reactor, "seconds"): return _reactor.seconds() else: diff --git a/master/docs/developer.texinfo b/master/docs/developer.texinfo index ef0aff91fc8..7eaab0a7b4a 100644 --- a/master/docs/developer.texinfo +++ b/master/docs/developer.texinfo @@ -168,22 +168,9 @@ instead of a more complex conditional import of @code{simplejson} or @node The Database @section The Database -@dvindex buildbot.db.dbspec.DBSpec @dvindex buildbot.db.connector.DBConnector -Buildbot stores its state in a database, using the Python DBAPI to access it. - -A database is specified with the @code{buildbot.db.dbspec.DBSpec} class, which -encapsulates all of the parameters necessary to create a DBAPI connection. The -DBSpec class can create either a single synchronous connection, or a twisted -adbapi connection pool. - -Most uses of the database in Buildbot are done through the -@code{buildbot.db.connector.DBConnector} class, which wraps the DBAPI to -provide purpose-specific functions. - -The database schema is managed by a special class, described in the next -section. +TODO @menu * Database Schema:: diff --git a/master/setup.py b/master/setup.py index 17ea75b3be4..31131442d77 100755 --- a/master/setup.py +++ b/master/setup.py @@ -223,7 +223,6 @@ def make_release_tree(self, base_dir, files): "buildbot.schedulers", "buildbot.scripts", "buildbot.db", - "buildbot.db.schema", "buildbot.util", "buildbot.test", "buildbot.test.fake", @@ -231,18 +230,28 @@ def make_release_tree(self, base_dir, files): "buildbot.test.util", "buildbot.test.regressions", ], - 'data_files': [("buildbot", ["buildbot/buildbot.png"]), - include("buildbot/db/schema", "*.sql"), - ("buildbot/clients", ["buildbot/clients/debug.glade"]), - ("buildbot/status/web/files", - ["buildbot/status/web/files/default.css", - "buildbot/status/web/files/bg_gradient.jpg", - "buildbot/status/web/files/robots.txt", - "buildbot/status/web/files/favicon.ico", - ]), + 'data_files': [ + ("buildbot", [ + "buildbot/buildbot.png", + ]), + ("buildbot/db/migrate", [ + "buildbot/db/migrate/migrate.cfg", + ]), + include("buildbot/db/migrate/versions", "*.py"), + ("buildbot/clients", [ + "buildbot/clients/debug.glade", + ]), + ("buildbot/status/web/files", [ + "buildbot/status/web/files/default.css", + "buildbot/status/web/files/bg_gradient.jpg", + "buildbot/status/web/files/robots.txt", + "buildbot/status/web/files/favicon.ico", + ]), include("buildbot/status/web/templates", '*.html'), include("buildbot/status/web/templates", '*.xml'), - ("buildbot/scripts", ["buildbot/scripts/sample.cfg"]), + ("buildbot/scripts", [ + "buildbot/scripts/sample.cfg", + ]), ], 'scripts': scripts, 'cmdclass': {'install_data': install_data_twisted, @@ -271,7 +280,9 @@ def make_release_tree(self, base_dir, files): setup_args['install_requires'] = [ 'twisted >= 2.0.0', 'Jinja2 >= 2.1', - 'sqlalchemy >= 0.6' + 'sqlalchemy >= 0.6', + # buildbot depends on sqlalchemy internals. See buildbot.db.model. + 'sqlalchemy-migrate == 0.6', ] # Python-2.6 and up includes json if not py_26: