diff --git a/datajoint/__init__.py b/datajoint/__init__.py
index e3a8007a6..b562aa8d1 100644
--- a/datajoint/__init__.py
+++ b/datajoint/__init__.py
@@ -17,24 +17,6 @@ class DataJointError(Exception):
pass
-class TransactionError(DataJointError):
- """
- Base class for errors specific to DataJoint internal operation.
- """
- def __init__(self, msg, f, args=None, kwargs=None):
- super(TransactionError, self).__init__(msg)
- self.operations = (f, args if args is not None else tuple(),
- kwargs if kwargs is not None else {})
-
- def resolve(self):
- f, args, kwargs = self.operations
- return f(*args, **kwargs)
-
- @property
- def culprit(self):
- return self.operations[0].__name__
-
-
# ----------- loads local configuration from file ----------------
from .settings import Config, CONFIGVAR, LOCALCONFIG, logger, log_levels
config = Config()
@@ -55,9 +37,10 @@ def culprit(self):
# ------------- flatten import hierarchy -------------------------
from .connection import conn, Connection
-from .user_relations import Manual, Lookup, Imported, Computed
-from .abstract_relation import Relation
+from .relation import Relation
+from .user_relations import Manual, Lookup, Imported, Computed, Subordinate
from .autopopulate import AutoPopulate
from . import blob
from .relational_operand import Not
-from .heading import Heading
\ No newline at end of file
+from .heading import Heading
+from .relation import schema
\ No newline at end of file
diff --git a/datajoint/abstract_relation.py b/datajoint/abstract_relation.py
deleted file mode 100644
index da191fef6..000000000
--- a/datajoint/abstract_relation.py
+++ /dev/null
@@ -1,422 +0,0 @@
-from collections.abc import MutableMapping, Mapping
-import numpy as np
-import logging
-import re
-import abc
-
-from . import DataJointError, config, TransactionError
-from .relational_operand import RelationalOperand
-from .blob import pack
-from .utils import user_choice
-from .parsing import parse_attribute_definition, field_to_sql, parse_index_definition
-from .heading import Heading
-
-logger = logging.getLogger(__name__)
-
-
-class Relation(RelationalOperand, metaclass=abc.ABCMeta):
- """
- Relation is an abstract class that represents a base relation, i.e. a table in the database.
- To make it a concrete class, override the abstract properties specifying the connection,
- table name, database, context, and definition.
- A Relation implements insert and delete methods in addition to inherited relational operators.
- It also loads table heading and dependencies from the database.
- It also handles the table declaration based on its definition property
- """
-
- __heading = None
-
- # ---------- abstract properties ------------ #
- @property
- @abc.abstractmethod
- def table_name(self):
- """
- :return: the name of the table in the database
- """
- pass
-
- @property
- @abc.abstractmethod
- def database(self):
- """
- :return: string containing the database name on the server
- """
- pass
-
- @property
- @abc.abstractmethod
- def definition(self):
- """
- :return: a string containing the table definition using the DataJoint DDL
- """
- pass
-
- @property
- @abc.abstractmethod
- def context(self):
- """
- :return: a dict with other relations that can be referenced by foreign keys
- """
- pass
-
- # --------- base relation functionality --------- #
- @property
- def is_declared(self):
- if self.__heading is not None:
- return True
- cur = self.query(
- 'SHOW TABLE STATUS FROM `{database}` WHERE name="{table_name}"'.format(
- table_name=self.table_name))
- return cur.rowcount == 1
-
- @property
- def heading(self):
- """
- Required by relational operand
- :return: a datajoint.Heading object
- """
- if self.__heading is None:
- if not self.is_declared and self.definition:
- self.declare()
- if self.is_declared:
- self.__heading = Heading.init_from_database(
- self.connection, self.database, self.table_name)
- return self.__heading
-
- @property
- def from_clause(self):
- """
- Required by the Relational class, this property specifies the contents of the FROM clause
- for the SQL SELECT statements.
- :return:
- """
- return '`%s`.`%s`' % (self.database, self.table_name)
-
- def declare(self):
- """
- Declare the table in database if it doesn't already exist.
-
- :raises: DataJointError if the table cannot be declared.
- """
- if not self.is_declared:
- self._declare()
- # verify that declaration completed successfully
- if not self.is_declared:
- raise DataJointError(
- 'Relation could not be declared for %s' % self.class_name)
-
- def iter_insert(self, rows, **kwargs):
- """
- Inserts an entire batch of entries. Additional keyword arguments are passed to insert.
-
- :param iter: Must be an iterator that generates a sequence of valid arguments for insert.
- """
- for row in rows:
- self.insert(row, **kwargs)
-
- def batch_insert(self, data, **kwargs):
- """
- Inserts an entire batch of entries. Additional keyword arguments are passed to insert.
-
- :param data: must be iterable, each row must be a valid argument for insert
- """
- self.iter_insert(data.__iter__(), **kwargs)
-
- def insert(self, tup, ignore_errors=False, replace=False):
- """
- Insert one data record or one Mapping (like a dictionary).
-
- :param tup: Data record, or a Mapping (like a dictionary).
- :param ignore_errors=False: Ignores errors if True.
- :param replace=False: Replaces data tuple if True.
-
- Example::
-
- b = djtest.Subject()
- b.insert(dict(subject_id = 7, species="mouse",\\
- real_id = 1007, date_of_birth = "2014-09-01"))
- """
-
- heading = self.heading
- if isinstance(tup, np.void):
- for fieldname in tup.dtype.fields:
- if fieldname not in heading:
- raise KeyError(u'{0:s} is not in the attribute list'.format(fieldname, ))
- value_list = ','.join([repr(tup[name]) if not heading[name].is_blob else '%s'
- for name in heading if name in tup.dtype.fields])
-
- args = tuple(pack(tup[name]) for name in heading
- if name in tup.dtype.fields and heading[name].is_blob)
- attribute_list = '`' + '`,`'.join(
- [q for q in heading if q in tup.dtype.fields]) + '`'
- elif isinstance(tup, Mapping):
- for fieldname in tup.keys():
- if fieldname not in heading:
- raise KeyError(u'{0:s} is not in the attribute list'.format(fieldname, ))
- value_list = ','.join([repr(tup[name]) if not heading[name].is_blob else '%s'
- for name in heading if name in tup])
- args = tuple(pack(tup[name]) for name in heading
- if name in tup and heading[name].is_blob)
- attribute_list = '`' + '`,`'.join(
- [name for name in heading if name in tup]) + '`'
- else:
- raise DataJointError('Datatype %s cannot be inserted' % type(tup))
- if replace:
- sql = 'REPLACE'
- elif ignore_errors:
- sql = 'INSERT IGNORE'
- else:
- sql = 'INSERT'
- sql += " INTO %s (%s) VALUES (%s)" % (self.full_table_name,
- attribute_list, value_list)
- logger.info(sql)
- self.connection.query(sql, args=args)
-
- def delete(self):
- if not config['safemode'] or user_choice(
- "You are about to delete data from a table. This operation cannot be undone.\n"
- "Proceed?", default='no') == 'yes':
- self.connection.query('DELETE FROM ' + self.from_clause + self.where_clause) # TODO: make cascading (issue #15)
-
- def drop(self):
- """
- Drops the table associated to this object.
- """
- if self.is_declared:
- if not config['safemode'] or user_choice(
- "You are about to drop an entire table. This operation cannot be undone.\n"
- "Proceed?", default='no') == 'yes':
- self.connection.query('DROP TABLE %s' % self.full_table_name) # TODO: make cascading (issue #16)
- self.connection.clear_dependencies(dbname=self.dbname)
- self.connection.load_headings(dbname=self.dbname, force=True)
- logger.info("Dropped table %s" % self.full_table_name)
-
- @property
- def size_on_disk(self):
- """
- :return: size of data and indices in MiB taken by the table on the storage device
- """
- cur = self.connection.query(
- 'SHOW TABLE STATUS FROM `{dbname}` WHERE NAME="{table}"'.format(
- dbname=self.dbname, table=self.table_name), as_dict=True)
- ret = cur.fetchone()
- return (ret['Data_length'] + ret['Index_length'])/1024**2
-
- def set_table_comment(self, comment):
- """
- Update the table comment in the table definition.
- :param comment: new comment as string
- """
- # TODO: add verification procedure (github issue #24)
- self.alter('COMMENT="%s"' % comment)
-
- def add_attribute(self, definition, after=None):
- """
- Add a new attribute to the table. A full line from the table definition
- is passed in as definition.
-
- The definition can specify where to place the new attribute. Use after=None
- to add the attribute as the first attribute or after='attribute' to place it
- after an existing attribute.
-
- :param definition: table definition
- :param after=None: After which attribute of the table the new attribute is inserted.
- If None, the attribute is inserted in front.
- """
- position = ' FIRST' if after is None else (
- ' AFTER %s' % after if after else '')
- sql = field_to_sql(parse_attribute_definition(definition))
- self._alter('ADD COLUMN %s%s' % (sql[:-2], position))
-
- def drop_attribute(self, attr_name):
- """
- Drops the attribute attrName from this table.
-
- :param attr_name: Name of the attribute that is dropped.
- """
- if not config['safemode'] or user_choice(
- "You are about to drop an attribute from a table."
- "This operation cannot be undone.\n"
- "Proceed?", default='no') == 'yes':
- self._alter('DROP COLUMN `%s`' % attr_name)
-
- def alter_attribute(self, attr_name, new_definition):
- """
- Alter the definition of the field attr_name in this table using the new definition.
-
- :param attr_name: field that is redefined
- :param new_definition: new definition of the field
- """
- sql = field_to_sql(parse_attribute_definition(new_definition))
- self._alter('CHANGE COLUMN `%s` %s' % (attr_name, sql[:-2]))
-
- def erd(self, subset=None):
- """
- Plot the schema's entity relationship diagram (ERD).
- """
-
- def _alter(self, alter_statement):
- """
- Execute ALTER TABLE statement for this table. The schema
- will be reloaded within the connection object.
-
- :param alter_statement: alter statement
- """
- if self._conn.in_transaction:
- raise TransactionError(
- u"_alter is currently in transaction. Operation not allowed to avoid implicit commits.",
- self._alter, args=(alter_statement,))
-
- sql = 'ALTER TABLE %s %s' % (self.full_table_name, alter_statement)
- self.connection.query(sql)
- self.connection.load_headings(self.dbname, force=True)
- # TODO: place table definition sync mechanism
-
- @staticmethod
- def _declare(self):
- """
- Declares the table in the database if no table in the database matches this object.
- """
- if self.connection.in_transaction:
- raise TransactionError(
- u"_declare is currently in transaction. Operation not allowed to avoid implicit commits.", self._declare)
-
- if not self.definition: # if empty definition was supplied
- raise DataJointError('Table definition is missing!')
- table_info, parents, referenced, field_defs, index_defs = self._parse_declaration()
-
- sql = 'CREATE TABLE %s (\n' % self.full_table_name
-
- # add inherited primary key fields
- primary_key_fields = set()
- non_key_fields = set()
- for p in parents:
- for key in p.primary_key:
- field = p.heading[key]
- if field.name not in primary_key_fields:
- primary_key_fields.add(field.name)
- sql += field_to_sql(field)
- else:
- logger.debug('Field definition of {} in {} ignored'.format(
- field.name, p.full_class_name))
-
- # add newly defined primary key fields
- for field in (f for f in field_defs if f.in_key):
- if field.nullable:
- raise DataJointError('Primary key attribute {} cannot be nullable'.format(
- field.name))
- if field.name in primary_key_fields:
- raise DataJointError('Duplicate declaration of the primary attribute {key}. '
- 'Ensure that the attribute is not already declared '
- 'in referenced tables'.format(key=field.name))
- primary_key_fields.add(field.name)
- sql += field_to_sql(field)
-
- # add secondary foreign key attributes
- for r in referenced:
- for key in r.primary_key:
- field = r.heading[key]
- if field.name not in primary_key_fields | non_key_fields:
- non_key_fields.add(field.name)
- sql += field_to_sql(field)
-
- # add dependent attributes
- for field in (f for f in field_defs if not f.in_key):
- non_key_fields.add(field.name)
- sql += field_to_sql(field)
-
- # add primary key declaration
- assert len(primary_key_fields) > 0, 'table must have a primary key'
- keys = ', '.join(primary_key_fields)
- sql += 'PRIMARY KEY (%s),\n' % keys
-
- # add foreign key declarations
- for ref in parents + referenced:
- keys = ', '.join(ref.primary_key)
- sql += 'FOREIGN KEY (%s) REFERENCES %s (%s) ON UPDATE CASCADE ON DELETE RESTRICT,\n' % \
- (keys, ref.full_table_name, keys)
-
- # add secondary index declarations
- # gather implicit indexes due to foreign keys first
- implicit_indices = []
- for fk_source in parents + referenced:
- implicit_indices.append(fk_source.primary_key)
-
- # for index in indexDefs:
- # TODO: finish this up...
-
- # close the declaration
- sql = '%s\n) ENGINE = InnoDB, COMMENT "%s"' % (
- sql[:-2], table_info['comment'])
-
- # make sure that the table does not alredy exist
- self.load_heading()
- if not self.is_declared:
- # execute declaration
- logger.debug('\n\n' + sql + '\n\n')
- self.connection.query(sql)
- self.load_heading()
-
- def _parse_declaration(self):
- """
- Parse declaration and create new SQL table accordingly.
- """
- parents = []
- referenced = []
- index_defs = []
- field_defs = []
- declaration = re.split(r'\s*\n\s*', self.definition.strip())
-
- # remove comment lines
- declaration = [x for x in declaration if not x.startswith('#')]
- ptrn = """
- \#\s*(?P.*)$ # comment
- """
- p = re.compile(ptrn, re.X)
- table_info = p.search(declaration[0]).groupdict()
-
- #table_info['tier'] = Role[table_info['tier']] # convert into enum
-
- in_key = True # parse primary keys
- attribute_regexp = re.compile("""
- ^[a-z][a-z\d_]*\s* # name
- (=\s*\S+(\s+\S+)*\s*)? # optional defaults
- :\s*\w.*$ # type, comment
- """, re.I + re.X) # ignore case and verbose
-
- for line in declaration[1:]:
- if line.startswith('---'):
- in_key = False # start parsing non-PK fields
- elif line.startswith('->'):
- # foreign key
- ref_name = line[2:].strip()
- ref_list = parents if in_key else referenced
- ref_list.append(self.lookup_name(ref_name))
- elif re.match(r'^(unique\s+)?index[^:]*$', line, re.I):
- index_defs.append(parse_index_definition(line))
- elif attribute_regexp.match(line):
- field_defs.append(parse_attribute_definition(line, in_key))
- else:
- raise DataJointError(
- 'Invalid table declaration line "%s"' % line)
-
- return table_info, parents, referenced, field_defs, index_defs
-
- def lookup_name(self, name):
- """
- Lookup the referenced name in the context dictionary
-
- e.g. for reference `common.Animals`, it will first check if `context` dictionary contains key
- `common`. If found, it then checks for attribute `Animals` in `common`, and returns the result.
- """
- parts = name.strip().split('.')
- try:
- ref = self.context.get(parts[0])
- for attr in parts[1:]:
- ref = getattr(ref, attr)
- except (KeyError, AttributeError):
- raise DataJointError(
- 'Foreign key reference to %s could not be resolved.'
- 'Please make sure the name exists'
- 'in the context of the class' % name)
- return ref
\ No newline at end of file
diff --git a/datajoint/autopopulate.py b/datajoint/autopopulate.py
index a266b62f9..cb4c0d673 100644
--- a/datajoint/autopopulate.py
+++ b/datajoint/autopopulate.py
@@ -1,5 +1,5 @@
from .relational_operand import RelationalOperand
-from . import DataJointError, TransactionError, Relation
+from . import DataJointError, Relation
import abc
import logging
@@ -37,7 +37,7 @@ def _make_tuples(self, key):
def target(self):
return self
- def populate(self, restriction=None, suppress_errors=False, reserve_jobs=False, max_attempts=10):
+ def populate(self, restriction=None, suppress_errors=False, reserve_jobs=False):
"""
rel.populate() calls rel._make_tuples(key) for every primary key in self.populate_relation
for which there is not already a tuple in rel.
@@ -45,7 +45,6 @@ def populate(self, restriction=None, suppress_errors=False, reserve_jobs=False,
:param restriction: restriction on rel.populate_relation - target
:param suppress_errors: suppresses error if true
:param reserve_jobs: currently not implemented
- :param max_attempts: maximal number of times a TransactionError is caught before populate gives up
"""
assert not reserve_jobs, NotImplemented # issue #5
@@ -53,40 +52,30 @@ def populate(self, restriction=None, suppress_errors=False, reserve_jobs=False,
if not isinstance(self.populate_relation, RelationalOperand):
raise DataJointError('Invalid populate_relation value')
- self.conn.cancel_transaction() # rollback previous transaction, if any
+ self.connection.cancel_transaction() # rollback previous transaction, if any
if not isinstance(self, Relation):
- raise DataJointError('Autopopulate is a mixin for Relation and must therefore subclass Relation')
+ raise DataJointError(
+ 'AutoPopulate is a mixin for Relation and must therefore subclass Relation')
unpopulated = (self.populate_relation - self.target) & restriction
for key in unpopulated.project():
- self.conn.start_transaction()
+ self.connection.start_transaction()
if key in self.target: # already populated
- self.conn.cancel_transaction()
+ self.connection.cancel_transaction()
else:
logger.info('Populating: ' + str(key))
try:
- for attempts in range(max_attempts):
- try:
- self._make_tuples(dict(key))
- break
- except TransactionError as tr_err:
- self.conn.cancel_transaction()
- tr_err.resolve()
- self.conn.start_transaction()
- logger.info('Transaction error in {0:s}.'.format(tr_err.culprit))
- else:
- raise DataJointError(
- '%s._make_tuples failed after %i attempts, giving up' % (self.__class__,max_attempts))
+ self._make_tuples(dict(key))
except Exception as error:
- self.conn.cancel_transaction()
+ self.connection.cancel_transaction()
if not suppress_errors:
raise
else:
logger.error(error)
error_list.append((key, error))
else:
- self.conn.commit_transaction()
+ self.connection.commit_transaction()
logger.info('Done populating.')
return error_list
diff --git a/datajoint/connection.py b/datajoint/connection.py
index 52dae7598..0ba65b532 100644
--- a/datajoint/connection.py
+++ b/datajoint/connection.py
@@ -1,11 +1,6 @@
import pymysql
-import re
-from .utils import to_camel_case
from . import DataJointError
-from .heading import Heading
-from .settings import prefix_to_role
import logging
-from .erd import DBConnGraph
from . import config
logger = logging.getLogger(__name__)
@@ -17,7 +12,7 @@ def conn_container():
"""
_connection = None # persistent connection object used by dj.conn()
- def conn_function(host=None, user=None, passwd=None, init_fun=None, reset=False):
+ def conn_function(host=None, user=None, passwd=None, init_fun=None, reset=False): # TODO: thin wrapping layer to mimic singleton
"""
Manage a persistent connection object.
This is one of several ways to configure and access a datajoint connection.
diff --git a/datajoint/declare.py b/datajoint/declare.py
new file mode 100644
index 000000000..c0c8e44a9
--- /dev/null
+++ b/datajoint/declare.py
@@ -0,0 +1,113 @@
+import re
+import pyparsing as pp
+import logging
+
+from . import DataJointError
+
+
+logger = logging.getLogger(__name__)
+
+
+
+def declare(full_table_name, definition, context):
+ """
+ Parse declaration and create new SQL table accordingly.
+ """
+ # split definition into lines
+ definition = re.split(r'\s*\n\s*', definition.strip())
+
+ table_comment = definition.pop(0)[1:].strip() if definition[0].startswith('#') else ''
+
+ in_key = True # parse primary keys
+ primary_key = []
+ attributes = []
+ attribute_sql = []
+ foreign_key_sql = []
+ index_sql = []
+
+ for line in definition:
+ if line.startswith('#'): # additional comments are ignored
+ pass
+ elif line.startswith('---'):
+ in_key = False # start parsing dependent attributes
+ elif line.startswith('->'):
+ # foreign key
+ ref = eval(line[2:], context)()
+ foreign_key_sql.append(
+ 'FOREIGN KEY ({primary_key})'
+ ' REFERENCES {ref} ({primary_key})'
+ ' ON UPDATE CASCADE ON DELETE RESTRICT'.format(
+ primary_key='`' + '`,`'.join(ref.primary_key) + '`', ref=ref.full_table_name)
+ )
+ for name in ref.primary_key:
+ if in_key and name not in primary_key:
+ primary_key.append(name)
+ if name not in attributes:
+ attributes.append(name)
+ attribute_sql.append(ref.heading[name].sql())
+ elif re.match(r'^(unique\s+)?index[^:]*$', line, re.I): # index
+ index_sql.append(line) # the SQL syntax is identical to DataJoint's
+ else:
+ name, sql = compile_attribute(line, in_key)
+ if in_key and name not in primary_key:
+ primary_key.append(name)
+ if name not in attributes:
+ attributes.append(name)
+ attribute_sql.append(sql)
+
+ # compile SQL
+ if not primary_key:
+ raise DataJointError('Table must have a primary key')
+ sql = 'CREATE TABLE %s (\n ' % full_table_name
+ sql += ',\n '.join(attribute_sql)
+ sql += ',\n PRIMARY KEY (`' + '`,`'.join(primary_key) + '`)'
+ if foreign_key_sql:
+ sql += ', \n' + ', \n'.join(foreign_key_sql)
+ if index_sql:
+ sql += ', \n' + ', \n'.join(index_sql)
+ sql += '\n) ENGINE = InnoDB, COMMENT "%s"' % table_comment
+ return sql
+
+
+
+
+def compile_attribute(line, in_key=False):
+ """
+ Convert attribute definition from DataJoint format to SQL
+ :param line: attribution line
+ :param in_key: set to True if attribute is in primary key set
+ :returns: (name, sql) -- attribute name and sql code for its declaration
+ """
+ quoted = pp.Or(pp.QuotedString('"'), pp.QuotedString("'"))
+ colon = pp.Literal(':').suppress()
+ attribute_name = pp.Word(pp.srange('[a-z]'), pp.srange('[a-z0-9_]')).setResultsName('name')
+
+ data_type = pp.Combine(pp.Word(pp.alphas)+pp.SkipTo("#", ignore=quoted)).setResultsName('type')
+ default = pp.Literal('=').suppress() + pp.SkipTo(colon, ignore=quoted).setResultsName('default')
+ comment = pp.Literal('#').suppress() + pp.restOfLine.setResultsName('comment')
+
+ attribute_parser = attribute_name + pp.Optional(default) + colon + data_type + comment
+
+ match = attribute_parser.parseString(line+'#', parseAll=True)
+ match['comment'] = match['comment'].rstrip('#')
+ if 'default' not in match:
+ match['default'] = ''
+ match = {k: v.strip() for k, v in match.items()}
+ match['nullable'] = match['default'].lower() == 'null'
+
+ literals = ['CURRENT_TIMESTAMP'] # not to be enclosed in quotes
+ if match['nullable']:
+ if in_key:
+ raise DataJointError('Primary key attributes cannot be nullable in line %s' % line)
+ match['default'] = 'DEFAULT NULL' # nullable attributes default to null
+ else:
+ if match['default']:
+ quote = match['default'].upper() not in literals and match['default'][0] not in '"\''
+ match['default'] = ('NOT NULL DEFAULT ' +
+ ('"%s"' if quote else "%s") % match['default'])
+ else:
+ match['default'] = 'NOT NULL'
+ match['comment'] = match['comment'].replace('"', '\\"') # escape double quotes in comment
+ sql = ('`{name}` {type} {default}' + (' COMMENT "{comment}"' if match['comment'] else '')
+ ).format(**match)
+ return match['name'], sql
diff --git a/datajoint/heading.py b/datajoint/heading.py
index 73d6c30f2..5c332fff6 100644
--- a/datajoint/heading.py
+++ b/datajoint/heading.py
@@ -11,17 +11,46 @@ class Heading:
Heading contains the property attributes, which is an OrderedDict in which the keys are
the attribute names and the values are AttrTuples.
"""
- AttrTuple = namedtuple('AttrTuple',
+
+ class AttrTuple(namedtuple('AttrTuple',
('name', 'type', 'in_key', 'nullable', 'default',
'comment', 'autoincrement', 'numeric', 'string', 'is_blob',
- 'computation', 'dtype'))
- AttrTuple.as_dict = AttrTuple._asdict # renaming to make public
-
- def __init__(self, attributes):
+ 'computation', 'dtype'))):
+ def _asdict(self):
+ """
+ for some reason the inherted _asdict does not work after subclassing from namedtuple
+ """
+ return OrderedDict((name, self[i]) for i, name in enumerate(self._fields))
+
+ def sql(self):
+ """
+ Convert attribute tuple into its SQL CREATE TABLE clause.
+ :rtype : SQL code
+ """
+ literals = ['CURRENT_TIMESTAMP']
+ if self.nullable:
+ default = 'DEFAULT NULL'
+ else:
+ default = 'NOT NULL'
+ if self.default:
+ # enclose value in quotes except special SQL values or already enclosed
+ quote = self.default.upper() not in literals and self.default[0] not in '"\''
+ default += ' DEFAULT ' + ('"%s"' if quote else "%s") % self.default
+ if any((c in r'\"' for c in self.comment)):
+ raise DataJointError('Illegal characters in attribute comment "%s"' % self.comment)
+ return '`{name}` {type} {default} COMMENT "{comment}"'.format(
+ name=self.name, type=self.type, default=default, comment=self.comment)
+
+ def __init__(self, attributes=None):
"""
:param attributes: a list of dicts with the same keys as AttrTuple
"""
- self.attributes = OrderedDict([(q['name'], Heading.AttrTuple(**q)) for q in attributes])
+ if attributes:
+ attributes = OrderedDict([(q['name'], Heading.AttrTuple(**q)) for q in attributes])
+ self.attributes = attributes
+
+ def __bool__(self):
+ return self.attributes is not None
@property
def names(self):
@@ -52,12 +81,14 @@ def __getitem__(self, name):
return self.attributes[name]
def __repr__(self):
- autoincrement_string = {False: '', True: ' auto_increment'}
- return '\n'.join(['%-20s : %-28s # %s' % (
- k if v.default is None else '%s="%s"' % (k, v.default),
- '%s%s' % (v.type, autoincrement_string[v.autoincrement]),
- v.comment)
- for k, v in self.attributes.items()])
+ if self.attributes is None:
+ return 'Empty heading'
+ else:
+ return '\n'.join(['%-20s : %-28s # %s' % (
+ k if v.default is None else '%s="%s"' % (k, v.default),
+ '%s%s' % (v.type, 'auto_increment' if v.autoincrement else ''),
+ v.comment)
+ for k, v in self.attributes.items()])
@property
def as_dtype(self):
@@ -90,14 +121,14 @@ def items(self):
def __iter__(self):
return iter(self.attributes)
- @classmethod
- def init_from_database(cls, conn, database, table_name):
+ def init_from_database(self, conn, database, table_name):
"""
- initialize heading from a database table
+ initialize heading from a database table. The table must exist already.
"""
cur = conn.query(
'SHOW FULL COLUMNS FROM `{table_name}` IN `{database}`'.format(
table_name=table_name, database=database), as_dict=True)
+
attributes = cur.fetchall()
rename_map = {
@@ -163,8 +194,7 @@ def init_from_database(cls, conn, database, table_name):
t = re.sub(r' unsigned$', '', t) # remove unsigned
assert (t, is_unsigned) in numeric_types, 'dtype not found for type %s' % t
attr['dtype'] = numeric_types[(t, is_unsigned)]
-
- return cls(attributes)
+ self.attributes = OrderedDict([(q['name'], Heading.AttrTuple(**q)) for q in attributes])
def project(self, *attribute_list, **renamed_attributes):
"""
@@ -181,14 +211,14 @@ def project(self, *attribute_list, **renamed_attributes):
attribute_list = self.primary_key + [a for a in attribute_list if a not in self.primary_key]
# convert attribute_list into a list of dicts but exclude renamed attributes
- attribute_list = [v.as_dict() for k, v in self.attributes.items()
+ attribute_list = [v._asdict() for k, v in self.attributes.items()
if k in attribute_list and k not in renamed_attributes.values()]
# add renamed and computed attributes
for new_name, computation in renamed_attributes.items():
if computation in self.names:
# renamed attribute
- new_attr = self.attributes[computation].as_dict()
+ new_attr = self.attributes[computation]._asdict()
new_attr['name'] = new_name
new_attr['computation'] = '`' + computation + '`'
else:
@@ -215,14 +245,14 @@ def __add__(self, other):
join two headings
"""
assert isinstance(other, Heading)
- attribute_list = [v.as_dict() for v in self.attributes.values()]
+ attribute_list = [v._asdict() for v in self.attributes.values()]
for name in other.names:
if name not in self.names:
- attribute_list.append(other.attributes[name].as_dict())
+ attribute_list.append(other.attributes[name]._asdict())
return Heading(attribute_list)
def resolve(self):
"""
Remove attribute computations after they have been resolved in a subquery
"""
- return Heading([dict(v.as_dict(), computation=None) for v in self.attributes.values()])
\ No newline at end of file
+ return Heading([dict(v._asdict(), computation=None) for v in self.attributes.values()])
\ No newline at end of file
diff --git a/datajoint/parsing.py b/datajoint/parsing.py
deleted file mode 100644
index 85e367c96..000000000
--- a/datajoint/parsing.py
+++ /dev/null
@@ -1,88 +0,0 @@
-import re
-from . import DataJointError
-from .heading import Heading
-
-
-def parse_attribute_definition(line, in_key=False):
- """
- Parse attribute definition line in the declaration and returns
- an attribute tuple.
-
- :param line: attribution line
- :param in_key: set to True if attribute is in primary key set
- :returns: attribute tuple
- """
- line = line.strip()
- attribute_regexp = re.compile("""
- ^(?P[a-z][a-z\d_]*)\s* # field name
- (=\s*(?P\S+(\s+\S+)*?)\s*)? # default value
- :\s*(?P\w[^\#]*[^\#\s])\s* # datatype
- (\#\s*(?P\S*(\s+\S+)*)\s*)?$ # comment
- """, re.X)
- m = attribute_regexp.match(line)
- if not m:
- raise DataJointError('Invalid field declaration "%s"' % line)
- attr_info = m.groupdict()
- if not attr_info['comment']:
- attr_info['comment'] = ''
- if not attr_info['default']:
- attr_info['default'] = ''
- attr_info['nullable'] = attr_info['default'].lower() == 'null'
- assert (not re.match(r'^bigint', attr_info['type'], re.I) or not attr_info['nullable']), \
- 'BIGINT attributes cannot be nullable in "%s"' % line
-
- return Heading.AttrTuple(
- in_key=in_key,
- autoincrement=None,
- numeric=None,
- string=None,
- is_blob=None,
- computation=None,
- dtype=None,
- **attr_info
- )
-
-
-def field_to_sql(field): # TODO move this into Attribute Tuple
- """
- Converts an attribute definition tuple into SQL code.
- :param field: attribute definition
- :rtype : SQL code
- """
- mysql_constants = ['CURRENT_TIMESTAMP']
- if field.nullable:
- default = 'DEFAULT NULL'
- else:
- default = 'NOT NULL'
- # if some default specified
- if field.default:
- # enclose value in quotes except special SQL values or already enclosed
- quote = field.default.upper() not in mysql_constants and field.default[0] not in '"\''
- default += ' DEFAULT ' + ('"%s"' if quote else "%s") % field.default
- if any((c in r'\"' for c in field.comment)):
- raise DataJointError('Illegal characters in attribute comment "%s"' % field.comment)
-
- return '`{name}` {type} {default} COMMENT "{comment}",\n'.format(
- name=field.name, type=field.type, default=default, comment=field.comment)
-
-
-def parse_index_definition(line):
- """
- Parses index definition.
-
- :param line: definition line
- :return: groupdict with index info
- """
- line = line.strip()
- index_regexp = re.compile("""
- ^(?PUNIQUE)?\s*INDEX\s* # [UNIQUE] INDEX
- \((?P[^\)]+)\)$ # (attr1, attr2)
- """, re.I + re.X)
- m = index_regexp.match(line)
- assert m, 'Invalid index declaration "%s"' % line
- index_info = m.groupdict()
- attributes = re.split(r'\s*,\s*', index_info['attributes'].strip())
- index_info['attributes'] = attributes
- assert len(attributes) == len(set(attributes)), \
- 'Duplicate attributes in index declaration "%s"' % line
- return index_info
diff --git a/datajoint/relation.py b/datajoint/relation.py
new file mode 100644
index 000000000..8f558831f
--- /dev/null
+++ b/datajoint/relation.py
@@ -0,0 +1,272 @@
+from collections import namedtuple
+from collections.abc import Mapping
+import numpy as np
+import logging
+import abc
+import pymysql
+
+from . import DataJointError, config, conn
+from .declare import declare, compile_attribute
+from .relational_operand import RelationalOperand
+from .blob import pack
+from .utils import user_choice
+from .heading import Heading
+
+logger = logging.getLogger(__name__)
+
+
+def schema(database, context, connection=None):
+ """
+ Returns a decorator that can be used to associate a Relation class to a database.
+
+ :param database: name of the database to associate the decorated class with
+ :param context: dictionary for looking up foreign keys references, usually set to locals()
+ :param connection: Connection object. Defaults to datajoint.conn()
+ :return: a decorator for Relation subclasses
+ """
+ if connection is None:
+ connection = conn()
+
+ # if the database does not exist, create it
+ cur = connection.query("SHOW DATABASES LIKE '{database}'".format(database=database))
+ if cur.rowcount == 0:
+ logger.info("Database `{database}` could not be found. "
+ "Attempting to create the database.".format(database=database))
+ try:
+ connection.query("CREATE DATABASE `{database}`".format(database=database))
+ logger.info('Created database `{database}`.'.format(database=database))
+ except pymysql.OperationalError:
+ raise DataJointError("Database named `{database}` was not defined, and"
+ "an attempt to create has failed. Check"
+ " permissions.".format(database=database))
+
+ def decorator(cls):
+ """
+ The decorator declares the table and binds the class to the database table
+ """
+ cls.database = database
+ cls._connection = connection
+ cls._heading = Heading()
+ instance = cls() if isinstance(cls, type) else cls
+ if not instance.heading:
+ connection.query(
+ declare(
+ full_table_name=instance.full_table_name,
+ definition=instance.definition,
+ context=context))
+ return cls
+
+ return decorator
+
+
+
+class Relation(RelationalOperand, metaclass=abc.ABCMeta):
+ """
+ Relation is an abstract class that represents a base relation, i.e. a table in the database.
+ To make it a concrete class, override the abstract properties specifying the connection,
+ table name, database, context, and definition.
+ A Relation implements insert and delete methods in addition to inherited relational operators.
+ """
+
+ # ---------- abstract properties ------------ #
+ @property
+ @abc.abstractmethod
+ def table_name(self):
+ """
+ :return: the name of the table in the database
+ """
+ raise NotImplementedError('Relation subclasses must define property table_name')
+
+ @property
+ @abc.abstractmethod
+ def definition(self):
+ """
+ :return: a string containing the table definition using the DataJoint DDL
+ """
+ pass
+
+ # -------------- required by RelationalOperand ----------------- #
+ @property
+ def connection(self):
+ return self._connection
+
+ @property
+ def heading(self):
+ if not self._heading and self.is_declared:
+ self._heading.init_from_database(self.connection, self.database, self.table_name)
+ return self._heading
+
+ @property
+ def from_clause(self):
+ """
+ :return: the FROM clause of SQL SELECT statements.
+ """
+ return self.full_table_name
+
+ def iter_insert(self, rows, **kwargs):
+ """
+ Inserts a collection of tuples. Additional keyword arguments are passed to insert.
+
+ :param iter: Must be an iterator that generates a sequence of valid arguments for insert.
+ """
+ for row in rows:
+ self.insert(row, **kwargs)
+
+ # --------- SQL functionality --------- #
+ @property
+ def is_declared(self):
+ cur = self.connection.query(
+ 'SHOW TABLES in `{database}`LIKE "{table_name}"'.format(
+ database=self.database, table_name=self.table_name))
+ return cur.rowcount>0
+
+ def batch_insert(self, data, **kwargs):
+ """
+ Inserts an entire batch of entries. Additional keyword arguments are passed to insert.
+
+ :param data: must be iterable, each row must be a valid argument for insert
+ """
+ self.iter_insert(data.__iter__(), **kwargs)
+
+ @property
+ def full_table_name(self):
+ return r"`{0:s}`.`{1:s}`".format(self.database, self.table_name)
+
+ def insert(self, tup, ignore_errors=False, replace=False):
+ """
+ Insert one data record or one Mapping (like a dictionary).
+
+ :param tup: Data record, or a Mapping (like a dictionary).
+ :param ignore_errors=False: Ignores errors if True.
+ :param replace=False: Replaces data tuple if True.
+
+ Example::
+
+ b = djtest.Subject()
+ b.insert(dict(subject_id = 7, species="mouse",\\
+ real_id = 1007, date_of_birth = "2014-09-01"))
+ """
+
+ heading = self.heading
+ if isinstance(tup, np.void):
+ for fieldname in tup.dtype.fields:
+ if fieldname not in heading:
+ raise KeyError(u'{0:s} is not in the attribute list'.format(fieldname, ))
+ value_list = ','.join([repr(tup[name]) if not heading[name].is_blob else '%s'
+ for name in heading if name in tup.dtype.fields])
+
+ args = tuple(pack(tup[name]) for name in heading
+ if name in tup.dtype.fields and heading[name].is_blob)
+ attribute_list = '`' + '`,`'.join(q for q in heading if q in tup.dtype.fields) + '`'
+ elif isinstance(tup, Mapping):
+ for fieldname in tup.keys():
+ if fieldname not in heading:
+ raise KeyError(u'{0:s} is not in the attribute list'.format(fieldname, ))
+ value_list = ','.join(repr(tup[name]) if not heading[name].is_blob else '%s'
+ for name in heading if name in tup)
+ args = tuple(pack(tup[name]) for name in heading
+ if name in tup and heading[name].is_blob)
+ attribute_list = '`' + '`,`'.join(name for name in heading if name in tup) + '`'
+ else:
+ raise DataJointError('Datatype %s cannot be inserted' % type(tup))
+ if replace:
+ sql = 'REPLACE'
+ elif ignore_errors:
+ sql = 'INSERT IGNORE'
+ else:
+ sql = 'INSERT'
+ sql += " INTO %s (%s) VALUES (%s)" % (self.from_clause, attribute_list, value_list)
+ logger.info(sql)
+ self.connection.query(sql, args=args)
+
+ def delete(self):
+ if not config['safemode'] or user_choice(
+ "You are about to delete data from a table. This operation cannot be undone.\n"
+ "Proceed?", default='no') == 'yes':
+ self.connection.query('DELETE FROM ' + self.from_clause + self.where_clause) # TODO: make cascading (issue #15)
+
+ def drop(self):
+ """
+ Drops the table associated to this class.
+ """
+ if self.is_declared:
+ if not config['safemode'] or user_choice(
+ "You are about to drop an entire table. This operation cannot be undone.\n"
+ "Proceed?", default='no') == 'yes':
+ self.connection.query('DROP TABLE %s' % self.full_table_name) # TODO: make cascading (issue #16)
+ # cls.connection.clear_dependencies(dbname=cls.dbname) #TODO: reimplement because clear_dependencies will be gone
+ # cls.connection.load_headings(dbname=cls.dbname, force=True) #TODO: reimplement because load_headings is gone
+ logger.info("Dropped table %s" % self.full_table_name)
+
+ def size_on_disk(self):
+ """
+ :return: size of data and indices in GiB taken by the table on the storage device
+ """
+ ret = self.connection.query(
+ 'SHOW TABLE STATUS FROM `{database}` WHERE NAME="{table}"'.format(
+ database=self.database, table=self.table_name), as_dict=True
+ ).fetchone()
+ return (ret['Data_length'] + ret['Index_length'])/1024**2
+
+ def set_table_comment(self, comment):
+ """
+ Update the table comment in the table definition.
+ :param comment: new comment as string
+ """
+ self._alter('COMMENT="%s"' % comment)
+
+ def add_attribute(self, definition, after=None):
+ """
+ Add a new attribute to the table. A full line from the table definition
+ is passed in as definition.
+
+ The definition can specify where to place the new attribute. Use after=None
+ to add the attribute as the first attribute or after='attribute' to place it
+ after an existing attribute.
+
+ :param definition: table definition
+ :param after=None: After which attribute of the table the new attribute is inserted.
+ If None, the attribute is inserted in front.
+ """
+ position = ' FIRST' if after is None else (
+ ' AFTER %s' % after if after else '')
+ sql = compile_attribute(definition)[1]
+ self._alter('ADD COLUMN %s%s' % (sql, position))
+
+ def drop_attribute(self, attribute_name):
+ """
+ Drops the attribute attrName from this table.
+ :param attribute_name: Name of the attribute that is dropped.
+ """
+ if not config['safemode'] or user_choice(
+ "You are about to drop an attribute from a table."
+ "This operation cannot be undone.\n"
+ "Proceed?", default='no') == 'yes':
+ self._alter('DROP COLUMN `%s`' % attribute_name)
+
+ def alter_attribute(self, attribute_name, definition):
+ """
+ Alter attribute definition
+
+ :param attribute_name: field that is redefined
+ :param definition: new definition of the field
+ """
+ sql = compile_attribute(definition)[1]
+ self._alter('CHANGE COLUMN `%s` %s' % (attribute_name, sql))
+
+ def erd(self, subset=None):
+ """
+ Plot the schema's entity relationship diagram (ERD).
+ """
+ NotImplemented
+
+ def _alter(self, alter_statement):
+ """
+ Execute an ALTER TABLE statement.
+ :param alter_statement: alter statement
+ """
+ if self.connection.in_transaction:
+ raise DataJointError("Table definition cannot be altered during a transaction.")
+ sql = 'ALTER TABLE %s %s' % (self.full_table_name, alter_statement)
+ self.connection.query(sql)
+ self.heading.init_from_database(self.connection, self.database, self.table_name)
\ No newline at end of file
diff --git a/datajoint/relations.py b/datajoint/relations.py
deleted file mode 100644
index 08f3f8900..000000000
--- a/datajoint/relations.py
+++ /dev/null
@@ -1,94 +0,0 @@
-import abc
-import logging
-from collections import namedtuple
-import pymysql
-from .connection import conn
-from .abstract_relation import Relation
-from . import DataJointError
-
-
-logger = logging.getLogger(__name__)
-
-
-SharedInfo = namedtuple(
- 'SharedInfo',
- ('database', 'context', 'connection', 'heading', 'parents', 'children', 'references', 'referenced'))
-
-
-def schema(database, context, connection=None):
- """
- Returns a schema decorator that can be used to associate a Relation class to a
- specific database with :param name:. Name reference to other tables in the table definition
- will be resolved by looking up the corresponding key entry in the passed in context dictionary.
- It is most common to set context equal to the return value of call to locals() in the module.
- For more details, please refer to the tutorial online.
-
- :param database: name of the database to associate the decorated class with
- :param context: dictionary used to resolve (any) name references within the table definition string
- :param connection: connection object to the database server. If ommited, will try to establish connection according to
- config values
- :return: a decorator function to be used on Relation derivative classes
- """
- if connection is None:
- connection = conn()
-
- # if the database does not exist, create it
- cur = connection.query("SHOW DATABASES LIKE '{database}'".format(database=database))
- if cur.rowcount == 0:
- logger.info("Database `{database}` could not be found. "
- "Attempting to create the database.".format(database=database))
- try:
- connection.query("CREATE DATABASE `{database}`".format(database=database))
- logger.info('Created database `{database}`.'.format(database=database))
- except pymysql.OperationalError:
- raise DataJointError("Database named `{database}` was not defined, and"
- "an attempt to create has failed. Check"
- " permissions.".format(database=database))
-
- def decorator(cls):
- cls._shared_info = SharedInfo(
- database=database,
- context=context,
- connection=connection,
- heading=None,
- parents=[],
- children=[],
- references=[],
- referenced=[]
- )
- return cls
-
- return decorator
-
-
-class ClassBoundRelation(Relation):
- """
- Abstract class for dedicated table classes.
- Subclasses of ClassBoundRelation are dedicated interfaces to a single table.
- The main purpose of ClassBoundRelation is to encapsulated sharedInfo containing the table heading
- and dependency information shared by all instances of
- """
-
- _shared_info = None
-
- def __init__(self):
- if self._shared_info is None:
- raise DataJointError('The class must define _shared_info')
-
- @property
- def database(self):
- return self._shared_info.database
-
- @property
- def connection(self):
- return self._shared_info.connection
-
- @property
- def context(self):
- return self._shared_info.context
-
- @property
- def heading(self):
- if self._shared_info.heading is None:
- self._shared_info.heading = super().heading
- return self._shared_info.heading
diff --git a/datajoint/user_relations.py b/datajoint/user_relations.py
index 7fb5aba1a..1b0352a03 100644
--- a/datajoint/user_relations.py
+++ b/datajoint/user_relations.py
@@ -1,31 +1,60 @@
-from .relations import ClassBoundRelation
+import re
+import abc
+from datajoint.relation import Relation
from .autopopulate import AutoPopulate
-from .utils import from_camel_case
+from . import DataJointError
-class Manual(ClassBoundRelation):
+class Manual(Relation):
@property
- @classmethod
- def table_name(cls):
- return from_camel_case(cls.__name__)
+ def table_name(self):
+ return from_camel_case(self.__class__.__name__)
-class Lookup(ClassBoundRelation):
+class Lookup(Relation):
@property
- @classmethod
- def table_name(cls):
- return '#' + from_camel_case(cls.__name__)
+ def table_name(self):
+ return '#' + from_camel_case(self.__class__.__name__)
-class Imported(ClassBoundRelation, AutoPopulate):
+class Imported(Relation, AutoPopulate):
@property
- @classmethod
- def table_name(cls):
- return "_" + from_camel_case(cls.__name__)
+ def table_name(self):
+ return "_" + from_camel_case(self.__class__.__name__)
-class Computed(ClassBoundRelation, AutoPopulate):
+class Computed(Relation, AutoPopulate):
@property
- @classmethod
- def table_name(cls):
- return "__" + from_camel_case(cls.__name__)
\ No newline at end of file
+ def table_name(self):
+ return "__" + from_camel_case(self.__class__.__name__)
+
+
+class Subordinate:
+ """
+ Mix-in to make computed tables subordinate
+ """
+ @property
+ def populate_relation(self):
+ return None
+
+ def _make_tuples(self, key):
+ raise NotImplementedError('Subtables should not be populated directly.')
+
+
+# ---------------- utilities --------------------
+def from_camel_case(s):
+ """
+ Convert names in camel case into underscore (_) separated names
+
+ Example:
+ >>>from_camel_case("TableName")
+ "table_name"
+ """
+ def convert(match):
+ return ('_' if match.groups()[0] else '') + match.group(0).lower()
+
+ if not re.match(r'[A-Z][a-zA-Z0-9]*', s):
+ raise DataJointError(
+ 'ClassName must be alphanumeric in CamelCase, begin with a capital letter')
+ return re.sub(r'(\B[A-Z])|(\b[A-Z])', convert, s)
+
diff --git a/datajoint/utils.py b/datajoint/utils.py
index ec506472e..f4b0edb57 100644
--- a/datajoint/utils.py
+++ b/datajoint/utils.py
@@ -1,37 +1,3 @@
-import re
-from . import DataJointError
-
-
-def to_camel_case(s):
- """
- Convert names with under score (_) separation
- into camel case names.
-
- Example:
- >>>to_camel_case("table_name")
- "TableName"
- """
- def to_upper(match):
- return match.group(0)[-1].upper()
- return re.sub('(^|[_\W])+[a-zA-Z]', to_upper, s)
-
-
-def from_camel_case(s):
- """
- Convert names in camel case into underscore (_) separated names
-
- Example:
- >>>from_camel_case("TableName")
- "table_name"
- """
- def convert(match):
- return ('_' if match.groups()[0] else '') + match.group(0).lower()
-
- if not re.match(r'[A-Z][a-zA-Z0-9]*', s):
- raise DataJointError(
- 'ClassName must be alphanumeric in CamelCase, begin with a capital letter')
- return re.sub(r'(\B[A-Z])|(\b[A-Z])', convert, s)
-
def user_choice(prompt, choices=("yes", "no"), default=None):
"""
diff --git a/demos/demo1.py b/demos/demo1.py
index e85d6ead3..46ab53fa6 100644
--- a/demos/demo1.py
+++ b/demos/demo1.py
@@ -1,21 +1,15 @@
# -*- coding: utf-8 -*-
-"""
-Created on Tue Aug 26 17:42:52 2014
-
-@author: dimitri
-"""
import datajoint as dj
print("Welcome to the database 'demo1'")
-conn = dj.conn() # connect to database; conn must be defined in module namespace
-conn.bind(module=__name__, dbname='dj_test') # bind this module to the database
-
+schema = dj.schema('dj_test', locals())
-class Subject(dj.Relation):
+@schema
+class Subject(dj.Manual):
definition = """
- demo1.Subject (manual) # Basic subject info
+ # Basic subject info
subject_id : int # internal subject id
---
real_id : varchar(40) # real-world name
@@ -26,11 +20,16 @@ class Subject(dj.Relation):
animal_notes="" : varchar(4096) # strain, genetic manipulations, etc
"""
+s = Subject()
+p = s.primary_key
+
-class Experiment(dj.Relation):
+@schema
+class Experiment(dj.Manual):
definition = """
- demo1.Experiment (manual) # Basic subject info
- -> demo1.Subject
+ # Basic subject info
+
+ -> Subject
experiment : smallint # experiment number for this subject
---
experiment_folder : varchar(255) # folder path
@@ -40,10 +39,12 @@ class Experiment(dj.Relation):
"""
-class Session(dj.Relation):
+@schema
+class Session(dj.Manual):
definition = """
- demo1.Session (manual) # a two-photon imaging session
- -> demo1.Experiment
+ # a two-photon imaging session
+
+ -> Experiment
session_id : tinyint # two-photon session within this experiment
-----------
setup : tinyint # experimental setup
@@ -51,11 +52,12 @@ class Session(dj.Relation):
"""
-class Scan(dj.Relation):
+@schema
+class Scan(dj.Manual):
definition = """
- demo1.Scan (manual) # a two-photon imaging session
- -> demo1.Session
- -> Config
+ # a two-photon imaging session
+
+ -> Session
scan_id : tinyint # two-photon session within this experiment
----
depth : float # depth from surface
@@ -63,16 +65,3 @@ class Scan(dj.Relation):
mwatts: numeric(4,1) # (mW) laser power to brain
"""
-class Config(dj.Relation):
- definition = """
- demo1.Config (manual) # configuration for scanner
- config_id : tinyint # unique id for config setup
- ---
- ->ConfigParam
- """
-
-class ConfigParam(dj.Relation):
- definition = """
- demo1.ConfigParam (lookup) # params for configurations
- param_set_id : tinyint # id for params
- """
\ No newline at end of file
diff --git a/requirements.txt b/requirements.txt
index 82abfa7ea..af4d48f13 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,5 +1,6 @@
numpy
pymysql
+pyparsing
networkx
matplotlib
sphinx_rtd_theme
diff --git a/tests/__init__.py b/tests/__init__.py
index 09e358e98..4d4101116 100644
--- a/tests/__init__.py
+++ b/tests/__init__.py
@@ -18,6 +18,9 @@
'user': environ.get('DJ_TEST_USER', 'datajoint'),
'passwd': environ.get('DJ_TEST_PASSWORD', 'datajoint')
}
+
+conn = dj.conn(**CONN_INFO)
+
# Prefix for all databases used during testing
PREFIX = environ.get('DJ_TEST_DB_PREFIX', 'dj')
# Bare connection used for verification of query results
@@ -32,7 +35,6 @@ def setup():
def teardown():
cleanup()
-
def cleanup():
"""
Removes all databases with name starting with the prefix.
@@ -52,36 +54,36 @@ def cleanup():
cur.execute('DROP DATABASE `{}`'.format(db))
cur.execute('SET FOREIGN_KEY_CHECKS=1') # set foreign key check back on
cur.execute("COMMIT")
-
-def setup_sample_db():
- """
- Helper method to setup databases with tables to be used
- during the test
- """
- cur = BASE_CONN.cursor()
- cur.execute("CREATE DATABASE IF NOT EXISTS `{}_test1`".format(PREFIX))
- cur.execute("CREATE DATABASE IF NOT EXISTS `{}_test2`".format(PREFIX))
- query1 = """
- CREATE TABLE `{prefix}_test1`.`subjects`
- (
- subject_id SMALLINT COMMENT 'Unique subject ID',
- subject_name VARCHAR(255) COMMENT 'Subject name',
- subject_email VARCHAR(255) COMMENT 'Subject email address',
- PRIMARY KEY (subject_id)
- )
- """.format(prefix=PREFIX)
- cur.execute(query1)
- query2 = """
- CREATE TABLE `{prefix}_test2`.`experimenter`
- (
- experimenter_id SMALLINT COMMENT 'Unique experimenter ID',
- experimenter_name VARCHAR(255) COMMENT 'Experimenter name',
- PRIMARY KEY (experimenter_id)
- )""".format(prefix=PREFIX)
- cur.execute(query2)
-
-
-
-
-
-
+#
+# def setup_sample_db():
+# """
+# Helper method to setup databases with tables to be used
+# during the test
+# """
+# cur = BASE_CONN.cursor()
+# cur.execute("CREATE DATABASE IF NOT EXISTS `{}_test1`".format(PREFIX))
+# cur.execute("CREATE DATABASE IF NOT EXISTS `{}_test2`".format(PREFIX))
+# query1 = """
+# CREATE TABLE `{prefix}_test1`.`subjects`
+# (
+# subject_id SMALLINT COMMENT 'Unique subject ID',
+# subject_name VARCHAR(255) COMMENT 'Subject name',
+# subject_email VARCHAR(255) COMMENT 'Subject email address',
+# PRIMARY KEY (subject_id)
+# )
+# """.format(prefix=PREFIX)
+# cur.execute(query1)
+# query2 = """
+# CREATE TABLE `{prefix}_test2`.`experimenter`
+# (
+# experimenter_id SMALLINT COMMENT 'Unique experimenter ID',
+# experimenter_name VARCHAR(255) COMMENT 'Experimenter name',
+# PRIMARY KEY (experimenter_id)
+# )""".format(prefix=PREFIX)
+# cur.execute(query2)
+#
+#
+#
+#
+#
+#
diff --git a/tests/schemata/__init__.py b/tests/schemata/__init__.py
index 6f391d065..9fa8b9ad1 100644
--- a/tests/schemata/__init__.py
+++ b/tests/schemata/__init__.py
@@ -1 +1 @@
-__author__ = "eywalker"
\ No newline at end of file
+__author__ = "eywalker, fabiansinz"
\ No newline at end of file
diff --git a/tests/schemata/schema1/__init__.py b/tests/schemata/schema1/__init__.py
index 6032e7bd6..cae90cec9 100644
--- a/tests/schemata/schema1/__init__.py
+++ b/tests/schemata/schema1/__init__.py
@@ -1,5 +1,5 @@
-__author__ = 'eywalker'
-import datajoint as dj
-
-print(__name__)
-from .test3 import *
\ No newline at end of file
+# __author__ = 'eywalker'
+# import datajoint as dj
+#
+# print(__name__)
+# from .test3 import *
\ No newline at end of file
diff --git a/tests/schemata/schema1/test1.py b/tests/schemata/schema1/test1.py
deleted file mode 100644
index 4c8df082f..000000000
--- a/tests/schemata/schema1/test1.py
+++ /dev/null
@@ -1,166 +0,0 @@
-"""
-Test 1 Schema definition
-"""
-__author__ = 'eywalker'
-
-import datajoint as dj
-from .. import schema2
-
-
-class Subjects(dj.Relation):
- definition = """
- test1.Subjects (manual) # Basic subject info
-
- subject_id : int # unique subject id
- ---
- real_id : varchar(40) # real-world name
- species = "mouse" : enum('mouse', 'monkey', 'human') # species
- """
-
-# test for shorthand
-class Animals(dj.Relation):
- definition = """
- test1.Animals (manual) # Listing of all info
-
- -> Subjects
- ---
- animal_dob :date # date of birth
- """
-
-
-class Trials(dj.Relation):
- definition = """
- test1.Trials (manual) # info about trials
-
- -> test1.Subjects
- trial_id : int
- ---
- outcome : int # result of experiment
-
- notes="" : varchar(4096) # other comments
- trial_ts=CURRENT_TIMESTAMP : timestamp # automatic
- """
-
-
-
-class SquaredScore(dj.Relation, dj.AutoPopulate):
- definition = """
- test1.SquaredScore (computed) # cumulative outcome of trials
-
- -> test1.Subjects
- -> test1.Trials
- ---
- squared : int # squared result of Trials outcome
- """
-
- @property
- def populate_relation(self):
- return Subjects() * Trials()
-
- def _make_tuples(self, key):
- tmp = (Trials() & key).fetch1()
- tmp2 = SquaredSubtable() & key
-
- self.insert(dict(key, squared=tmp['outcome']**2))
-
- ss = SquaredSubtable()
-
- for i in range(10):
- key['dummy'] = i
- ss.insert(key)
-
-
-class WrongImplementation(dj.Relation, dj.AutoPopulate):
- definition = """
- test1.WrongImplementation (computed) # ignore
-
- -> test1.Subjects
- -> test1.Trials
- ---
- dummy : int # ignore
- """
-
- @property
- def populate_relation(self):
- return {'subject_id':2}
-
- def _make_tuples(self, key):
- pass
-
-
-
-class ErrorGenerator(dj.Relation, dj.AutoPopulate):
- definition = """
- test1.ErrorGenerator (computed) # ignore
-
- -> test1.Subjects
- -> test1.Trials
- ---
- dummy : int # ignore
- """
-
- @property
- def populate_relation(self):
- return Subjects() * Trials()
-
- def _make_tuples(self, key):
- raise Exception("This is for testing")
-
-
-
-
-
-
-class SquaredSubtable(dj.Relation):
- definition = """
- test1.SquaredSubtable (computed) # cumulative outcome of trials
-
- -> test1.SquaredScore
- dummy : int # dummy primary attribute
- ---
- """
-
-
-# test reference to another table in same schema
-class Experiments(dj.Relation):
- definition = """
- test1.Experiments (imported) # Experiment info
- -> test1.Subjects
- exp_id : int # unique id for experiment
- ---
- exp_data_file : varchar(255) # data file
- """
-
-
-# refers to a table in dj_test2 (bound to test2) but without a class
-class Sessions(dj.Relation):
- definition = """
- test1.Sessions (manual) # Experiment sessions
- -> test1.Subjects
- -> test2.Experimenter
- session_id : int # unique session id
- ---
- session_comment : varchar(255) # comment about the session
- """
-
-
-class Match(dj.Relation):
- definition = """
- test1.Match (manual) # Match between subject and color
- -> schema2.Subjects
- ---
- dob : date # date of birth
- """
-
-
-# this tries to reference a table in database directly without ORM
-class TrainingSession(dj.Relation):
- definition = """
- test1.TrainingSession (manual) # training sessions
- -> `dj_test2`.Experimenter
- session_id : int # training session id
- """
-
-
-class Empty(dj.Relation):
- pass
diff --git a/tests/schemata/schema1/test2.py b/tests/schemata/schema1/test2.py
index 563fe6b52..aded2e4fb 100644
--- a/tests/schemata/schema1/test2.py
+++ b/tests/schemata/schema1/test2.py
@@ -1,48 +1,48 @@
-"""
-Test 2 Schema definition
-"""
-__author__ = 'eywalker'
-
-import datajoint as dj
-from . import test1 as alias
-#from ..schema2 import test2 as test1
-
-
-# references to another schema
-class Experiments(dj.Relation):
- definition = """
- test2.Experiments (manual) # Basic subject info
- -> test1.Subjects
- experiment_id : int # unique experiment id
- ---
- real_id : varchar(40) # real-world name
- species = "mouse" : enum('mouse', 'monkey', 'human') # species
- """
-
-
-# references to another schema
-class Conditions(dj.Relation):
- definition = """
- test2.Conditions (manual) # Subject conditions
- -> alias.Subjects
- condition_name : varchar(255) # description of the condition
- """
-
-
-class FoodPreference(dj.Relation):
- definition = """
- test2.FoodPreference (manual) # Food preference of each subject
- -> animals.Subjects
- preferred_food : enum('banana', 'apple', 'oranges')
- """
-
-
-class Session(dj.Relation):
- definition = """
- test2.Session (manual) # Experiment sessions
- -> test1.Subjects
- -> test2.Experimenter
- session_id : int # unique session id
- ---
- session_comment : varchar(255) # comment about the session
- """
\ No newline at end of file
+# """
+# Test 2 Schema definition
+# """
+# __author__ = 'eywalker'
+#
+# import datajoint as dj
+# from . import test1 as alias
+# #from ..schema2 import test2 as test1
+#
+#
+# # references to another schema
+# class Experiments(dj.Relation):
+# definition = """
+# test2.Experiments (manual) # Basic subject info
+# -> test1.Subjects
+# experiment_id : int # unique experiment id
+# ---
+# real_id : varchar(40) # real-world name
+# species = "mouse" : enum('mouse', 'monkey', 'human') # species
+# """
+#
+#
+# # references to another schema
+# class Conditions(dj.Relation):
+# definition = """
+# test2.Conditions (manual) # Subject conditions
+# -> alias.Subjects
+# condition_name : varchar(255) # description of the condition
+# """
+#
+#
+# class FoodPreference(dj.Relation):
+# definition = """
+# test2.FoodPreference (manual) # Food preference of each subject
+# -> animals.Subjects
+# preferred_food : enum('banana', 'apple', 'oranges')
+# """
+#
+#
+# class Session(dj.Relation):
+# definition = """
+# test2.Session (manual) # Experiment sessions
+# -> test1.Subjects
+# -> test2.Experimenter
+# session_id : int # unique session id
+# ---
+# session_comment : varchar(255) # comment about the session
+# """
\ No newline at end of file
diff --git a/tests/schemata/schema1/test3.py b/tests/schemata/schema1/test3.py
index 2004a8736..e00a01afb 100644
--- a/tests/schemata/schema1/test3.py
+++ b/tests/schemata/schema1/test3.py
@@ -1,21 +1,21 @@
-"""
-Test 3 Schema definition - no binding, no conn
-
-To be bound at the package level
-"""
-__author__ = 'eywalker'
-
-import datajoint as dj
-
-
-class Subjects(dj.Relation):
- definition = """
- schema1.Subjects (manual) # Basic subject info
-
- subject_id : int # unique subject id
- dob : date # date of birth
- ---
- real_id : varchar(40) # real-world name
- species = "mouse" : enum('mouse', 'monkey', 'human') # species
- """
-
+# """
+# Test 3 Schema definition - no binding, no conn
+#
+# To be bound at the package level
+# """
+# __author__ = 'eywalker'
+#
+# import datajoint as dj
+#
+#
+# class Subjects(dj.Relation):
+# definition = """
+# schema1.Subjects (manual) # Basic subject info
+#
+# subject_id : int # unique subject id
+# dob : date # date of birth
+# ---
+# real_id : varchar(40) # real-world name
+# species = "mouse" : enum('mouse', 'monkey', 'human') # species
+# """
+#
diff --git a/tests/schemata/schema1/test4.py b/tests/schemata/schema1/test4.py
index a2004affd..9860cb030 100644
--- a/tests/schemata/schema1/test4.py
+++ b/tests/schemata/schema1/test4.py
@@ -1,17 +1,17 @@
-"""
-Test 1 Schema definition - fully bound and has connection object
-"""
-__author__ = 'fabee'
-
-import datajoint as dj
-
-
-class Matrix(dj.Relation):
- definition = """
- test4.Matrix (manual) # Some numpy array
-
- matrix_id : int # unique matrix id
- ---
- data : longblob # data
- comment : varchar(1000) # comment
- """
+# """
+# Test 1 Schema definition - fully bound and has connection object
+# """
+# __author__ = 'fabee'
+#
+# import datajoint as dj
+#
+#
+# class Matrix(dj.Relation):
+# definition = """
+# test4.Matrix (manual) # Some numpy array
+#
+# matrix_id : int # unique matrix id
+# ---
+# data : longblob # data
+# comment : varchar(1000) # comment
+# """
diff --git a/tests/schemata/schema2/__init__.py b/tests/schemata/schema2/__init__.py
index e6b482590..d79e02cc3 100644
--- a/tests/schemata/schema2/__init__.py
+++ b/tests/schemata/schema2/__init__.py
@@ -1,2 +1,2 @@
-__author__ = 'eywalker'
-from .test1 import *
\ No newline at end of file
+# __author__ = 'eywalker'
+# from .test1 import *
\ No newline at end of file
diff --git a/tests/schemata/schema2/test1.py b/tests/schemata/schema2/test1.py
index 83bb3a19e..4005aa670 100644
--- a/tests/schemata/schema2/test1.py
+++ b/tests/schemata/schema2/test1.py
@@ -1,16 +1,16 @@
-"""
-Test 2 Schema definition
-"""
-__author__ = 'eywalker'
-
-import datajoint as dj
-
-
-class Subjects(dj.Relation):
- definition = """
- schema2.Subjects (manual) # Basic subject info
- pop_id : int # unique experiment id
- ---
- real_id : varchar(40) # real-world name
- species = "mouse" : enum('mouse', 'monkey', 'human') # species
- """
\ No newline at end of file
+# """
+# Test 2 Schema definition
+# """
+# __author__ = 'eywalker'
+#
+# import datajoint as dj
+#
+#
+# class Subjects(dj.Relation):
+# definition = """
+# schema2.Subjects (manual) # Basic subject info
+# pop_id : int # unique experiment id
+# ---
+# real_id : varchar(40) # real-world name
+# species = "mouse" : enum('mouse', 'monkey', 'human') # species
+# """
\ No newline at end of file
diff --git a/tests/schemata/test1.py b/tests/schemata/test1.py
new file mode 100644
index 000000000..5b4ac723a
--- /dev/null
+++ b/tests/schemata/test1.py
@@ -0,0 +1,169 @@
+"""
+Test 1 Schema definition
+"""
+__author__ = 'eywalker'
+
+import datajoint as dj
+# from .. import schema2
+from .. import PREFIX
+
+testschema = dj.schema(PREFIX + '_test1', locals())
+
+@testschema
+class Subjects(dj.Manual):
+ definition = """
+ # Basic subject info
+
+ subject_id : int # unique subject id
+ ---
+ real_id : varchar(40) # real-world name
+ species = "mouse" : enum('mouse', 'monkey', 'human') # species
+ """
+
+# # test for shorthand
+# class Animals(dj.Relation):
+# definition = """
+# test1.Animals (manual) # Listing of all info
+#
+# -> Subjects
+# ---
+# animal_dob :date # date of birth
+# """
+#
+#
+# class Trials(dj.Relation):
+# definition = """
+# test1.Trials (manual) # info about trials
+#
+# -> test1.Subjects
+# trial_id : int
+# ---
+# outcome : int # result of experiment
+#
+# notes="" : varchar(4096) # other comments
+# trial_ts=CURRENT_TIMESTAMP : timestamp # automatic
+# """
+#
+#
+#
+# class SquaredScore(dj.Relation, dj.AutoPopulate):
+# definition = """
+# test1.SquaredScore (computed) # cumulative outcome of trials
+#
+# -> test1.Subjects
+# -> test1.Trials
+# ---
+# squared : int # squared result of Trials outcome
+# """
+#
+# @property
+# def populate_relation(self):
+# return Subjects() * Trials()
+#
+# def _make_tuples(self, key):
+# tmp = (Trials() & key).fetch1()
+# tmp2 = SquaredSubtable() & key
+#
+# self.insert(dict(key, squared=tmp['outcome']**2))
+#
+# ss = SquaredSubtable()
+#
+# for i in range(10):
+# key['dummy'] = i
+# ss.insert(key)
+#
+#
+# class WrongImplementation(dj.Relation, dj.AutoPopulate):
+# definition = """
+# test1.WrongImplementation (computed) # ignore
+#
+# -> test1.Subjects
+# -> test1.Trials
+# ---
+# dummy : int # ignore
+# """
+#
+# @property
+# def populate_relation(self):
+# return {'subject_id':2}
+#
+# def _make_tuples(self, key):
+# pass
+#
+#
+#
+# class ErrorGenerator(dj.Relation, dj.AutoPopulate):
+# definition = """
+# test1.ErrorGenerator (computed) # ignore
+#
+# -> test1.Subjects
+# -> test1.Trials
+# ---
+# dummy : int # ignore
+# """
+#
+# @property
+# def populate_relation(self):
+# return Subjects() * Trials()
+#
+# def _make_tuples(self, key):
+# raise Exception("This is for testing")
+#
+#
+#
+#
+#
+#
+# class SquaredSubtable(dj.Relation):
+# definition = """
+# test1.SquaredSubtable (computed) # cumulative outcome of trials
+#
+# -> test1.SquaredScore
+# dummy : int # dummy primary attribute
+# ---
+# """
+#
+#
+# # test reference to another table in same schema
+# class Experiments(dj.Relation):
+# definition = """
+# test1.Experiments (imported) # Experiment info
+# -> test1.Subjects
+# exp_id : int # unique id for experiment
+# ---
+# exp_data_file : varchar(255) # data file
+# """
+#
+#
+# # refers to a table in dj_test2 (bound to test2) but without a class
+# class Sessions(dj.Relation):
+# definition = """
+# test1.Sessions (manual) # Experiment sessions
+# -> test1.Subjects
+# -> test2.Experimenter
+# session_id : int # unique session id
+# ---
+# session_comment : varchar(255) # comment about the session
+# """
+#
+#
+# class Match(dj.Relation):
+# definition = """
+# test1.Match (manual) # Match between subject and color
+# -> schema2.Subjects
+# ---
+# dob : date # date of birth
+# """
+#
+#
+# # this tries to reference a table in database directly without ORM
+# class TrainingSession(dj.Relation):
+# definition = """
+# test1.TrainingSession (manual) # training sessions
+# -> `dj_test2`.Experimenter
+# session_id : int # training session id
+# """
+#
+#
+# class Empty(dj.Relation):
+# pass
diff --git a/tests/test_connection.py b/tests/test_connection.py
index 1fb581468..29fee4f64 100644
--- a/tests/test_connection.py
+++ b/tests/test_connection.py
@@ -1,257 +1,257 @@
-"""
-Collection of test cases to test connection module.
-"""
-from .schemata import schema1
-from .schemata.schema1 import test1
-import numpy as np
-
-__author__ = 'eywalker, fabee'
-from . import (CONN_INFO, PREFIX, BASE_CONN, cleanup)
-from nose.tools import assert_true, assert_raises, assert_equal, raises
-import datajoint as dj
-from datajoint.utils import DataJointError
-
-
-def setup():
- cleanup()
-
-
-def test_dj_conn():
- """
- Should be able to establish a connection
- """
- c = dj.conn(**CONN_INFO)
- assert c.is_connected
-
-
-def test_persistent_dj_conn():
- """
- conn() method should provide persistent connection
- across calls.
- """
- c1 = dj.conn(**CONN_INFO)
- c2 = dj.conn()
- assert_true(c1 is c2)
-
-
-def test_dj_conn_reset():
- """
- Passing in reset=True should allow for new persistent
- connection to be created.
- """
- c1 = dj.conn(**CONN_INFO)
- c2 = dj.conn(reset=True, **CONN_INFO)
- assert_true(c1 is not c2)
-
-
-
-def setup_sample_db():
- """
- Helper method to setup databases with tables to be used
- during the test
- """
- cur = BASE_CONN.cursor()
- cur.execute("CREATE DATABASE `{}_test1`".format(PREFIX))
- cur.execute("CREATE DATABASE `{}_test2`".format(PREFIX))
- query1 = """
- CREATE TABLE `{prefix}_test1`.`subjects`
- (
- subject_id SMALLINT COMMENT 'Unique subject ID',
- subject_name VARCHAR(255) COMMENT 'Subject name',
- subject_email VARCHAR(255) COMMENT 'Subject email address',
- PRIMARY KEY (subject_id)
- )
- """.format(prefix=PREFIX)
- cur.execute(query1)
- # query2 = """
- # CREATE TABLE `{prefix}_test2`.`experiments`
- # (
- # experiment_id SMALLINT COMMENT 'Unique experiment ID',
- # experiment_name VARCHAR(255) COMMENT 'Experiment name',
- # subject_id SMALLINT,
- # CONSTRAINT FOREIGN KEY (`subject_id`) REFERENCES `dj_test1`.`subjects` (`subject_id`) ON UPDATE CASCADE ON DELETE RESTRICT,
- # PRIMARY KEY (subject_id, experiment_id)
- # )""".format(prefix=PREFIX)
- # cur.execute(query2)
-
-
-class TestConnectionWithoutBindings(object):
- """
- Test methods from Connection that does not
- depend on presence of module to database bindings.
- This includes tests for `bind` method itself.
- """
- def setup(self):
- self.conn = dj.Connection(**CONN_INFO)
- test1.__dict__.pop('conn', None)
- schema1.__dict__.pop('conn', None)
- setup_sample_db()
-
- def teardown(self):
- cleanup()
-
- def check_binding(self, db_name, module):
- """
- Helper method to check if the specified database-module pairing exists
- """
- assert_equal(self.conn.db_to_mod[db_name], module)
- assert_equal(self.conn.mod_to_db[module], db_name)
-
- def test_bind_to_existing_database(self):
- """
- Should be able to bind a module to an existing database
- """
- db_name = PREFIX + '_test1'
- module = test1.__name__
- self.conn.bind(module, db_name)
- self.check_binding(db_name, module)
-
- def test_bind_at_package_level(self):
- db_name = PREFIX + '_test1'
- package = schema1.__name__
- self.conn.bind(package, db_name)
- self.check_binding(db_name, package)
-
- def test_bind_to_non_existing_database(self):
- """
- Should be able to bind a module to a non-existing database by creating target
- """
- db_name = PREFIX + '_test3'
- module = test1.__name__
- cur = BASE_CONN.cursor()
-
- # Ensure target database doesn't exist
- if cur.execute("SHOW DATABASES LIKE '{}'".format(db_name)):
- cur.execute("DROP DATABASE IF EXISTS `{}`".format(db_name))
- # Bind module to non-existing database
- self.conn.bind(module, db_name)
- # Check that target database was created
- assert_equal(cur.execute("SHOW DATABASES LIKE '{}'".format(db_name)), 1)
- self.check_binding(db_name, module)
- # Remove the target database
- cur.execute("DROP DATABASE IF EXISTS `{}`".format(db_name))
-
- def test_cannot_bind_to_multiple_databases(self):
- """
- Bind will fail when db_name is a pattern that
- matches multiple databases
- """
- db_name = PREFIX + "_test%%"
- module = test1.__name__
- with assert_raises(DataJointError):
- self.conn.bind(module, db_name)
-
- def test_basic_sql_query(self):
- """
- Test execution of basic SQL query using connection
- object.
- """
- cur = self.conn.query('SHOW DATABASES')
- results1 = cur.fetchall()
- cur2 = BASE_CONN.cursor()
- cur2.execute('SHOW DATABASES')
- results2 = cur2.fetchall()
- assert_equal(results1, results2)
-
- def test_transaction_commit(self):
- """
- Test transaction commit
- """
- table_name = PREFIX + '_test1.subjects'
- self.conn.start_transaction()
- self.conn.query("INSERT INTO {table} VALUES (0, 'dj_user', 'dj_user@example.com')".format(table=table_name))
- cur = BASE_CONN.cursor()
- assert_equal(cur.execute("SELECT * FROM {}".format(table_name)), 0)
- self.conn.commit_transaction()
- assert_equal(cur.execute("SELECT * FROM {}".format(table_name)), 1)
-
- def test_transaction_rollback(self):
- """
- Test transaction rollback
- """
- table_name = PREFIX + '_test1.subjects'
- self.conn.start_transaction()
- self.conn.query("INSERT INTO {table} VALUES (0, 'dj_user', 'dj_user@example.com')".format(table=table_name))
- cur = BASE_CONN.cursor()
- assert_equal(cur.execute("SELECT * FROM {}".format(table_name)), 0)
- self.conn.cancel_transaction()
- assert_equal(cur.execute("SELECT * FROM {}".format(table_name)), 0)
-
-# class TestContextManager(object):
-# def __init__(self):
-# self.relvar = None
-# self.setup()
+# """
+# Collection of test cases to test connection module.
+# """
+# from .schemata import schema1
+# from .schemata.schema1 import test1
+# import numpy as np
#
+# __author__ = 'eywalker, fabee'
+# from . import (CONN_INFO, PREFIX, BASE_CONN, cleanup)
+# from nose.tools import assert_true, assert_raises, assert_equal, raises
+# import datajoint as dj
+# from datajoint.utils import DataJointError
+#
+#
+# def setup():
+# cleanup()
+#
+#
+# def test_dj_conn():
# """
-# Test cases for FreeRelation objects
+# Should be able to establish a connection
# """
+# c = dj.conn(**CONN_INFO)
+# assert c.is_connected
#
-# def setup(self):
-# """
-# Create a connection object and prepare test modules
-# as follows:
-# test1 - has conn and bounded
-# """
-# cleanup() # drop all databases with PREFIX
-# test1.__dict__.pop('conn', None)
#
-# self.conn = dj.Connection(**CONN_INFO)
-# test1.conn = self.conn
-# self.conn.bind(test1.__name__, PREFIX + '_test1')
-# self.relvar = test1.Subjects()
+# def test_persistent_dj_conn():
+# """
+# conn() method should provide persistent connection
+# across calls.
+# """
+# c1 = dj.conn(**CONN_INFO)
+# c2 = dj.conn()
+# assert_true(c1 is c2)
#
-# def teardown(self):
-# cleanup()
#
-# # def test_active(self):
-# # with self.conn.transaction() as tr:
-# # assert_true(tr.is_active, "Transaction is not active")
-#
-# # def test_rollback(self):
-# #
-# # tmp = np.array([(1,'Peter','mouse'),(2, 'Klara', 'monkey')],
-# # dtype=[('subject_id', '>i4'), ('real_id', 'O'), ('species', 'O')])
-# #
-# # self.relvar.insert(tmp[0])
-# # try:
-# # with self.conn.transaction():
-# # self.relvar.insert(tmp[1])
-# # raise DataJointError("Just to test")
-# # except DataJointError as e:
-# # pass
-# #
-# # testt2 = (self.relvar & 'subject_id = 2').fetch()
-# # assert_equal(len(testt2), 0, "Length is not 0. Expected because rollback should have happened.")
-#
-# # def test_cancel(self):
-# # """Tests cancelling a transaction"""
-# # tmp = np.array([(1,'Peter','mouse'),(2, 'Klara', 'monkey')],
-# # dtype=[('subject_id', '>i4'), ('real_id', 'O'), ('species', 'O')])
-# #
-# # self.relvar.insert(tmp[0])
-# # with self.conn.transaction() as transaction:
-# # self.relvar.insert(tmp[1])
-# # transaction.cancel()
-# #
-# # testt2 = (self.relvar & 'subject_id = 2').fetch()
-# # assert_equal(len(testt2), 0, "Length is not 0. Expected because rollback should have happened.")
-
-
-
-# class TestConnectionWithBindings(object):
+# def test_dj_conn_reset():
+# """
+# Passing in reset=True should allow for new persistent
+# connection to be created.
+# """
+# c1 = dj.conn(**CONN_INFO)
+# c2 = dj.conn(reset=True, **CONN_INFO)
+# assert_true(c1 is not c2)
+#
+#
+#
+# def setup_sample_db():
+# """
+# Helper method to setup databases with tables to be used
+# during the test
+# """
+# cur = BASE_CONN.cursor()
+# cur.execute("CREATE DATABASE `{}_test1`".format(PREFIX))
+# cur.execute("CREATE DATABASE `{}_test2`".format(PREFIX))
+# query1 = """
+# CREATE TABLE `{prefix}_test1`.`subjects`
+# (
+# subject_id SMALLINT COMMENT 'Unique subject ID',
+# subject_name VARCHAR(255) COMMENT 'Subject name',
+# subject_email VARCHAR(255) COMMENT 'Subject email address',
+# PRIMARY KEY (subject_id)
+# )
+# """.format(prefix=PREFIX)
+# cur.execute(query1)
+# # query2 = """
+# # CREATE TABLE `{prefix}_test2`.`experiments`
+# # (
+# # experiment_id SMALLINT COMMENT 'Unique experiment ID',
+# # experiment_name VARCHAR(255) COMMENT 'Experiment name',
+# # subject_id SMALLINT,
+# # CONSTRAINT FOREIGN KEY (`subject_id`) REFERENCES `dj_test1`.`subjects` (`subject_id`) ON UPDATE CASCADE ON DELETE RESTRICT,
+# # PRIMARY KEY (subject_id, experiment_id)
+# # )""".format(prefix=PREFIX)
+# # cur.execute(query2)
+#
+#
+# class TestConnectionWithoutBindings(object):
# """
-# Tests heading and dependency loadings
+# Test methods from Connection that does not
+# depend on presence of module to database bindings.
+# This includes tests for `bind` method itself.
# """
# def setup(self):
# self.conn = dj.Connection(**CONN_INFO)
-# cur.execute(query)
-
-
-
-
-
-
-
-
-
-
+# test1.__dict__.pop('conn', None)
+# schema1.__dict__.pop('conn', None)
+# setup_sample_db()
+#
+# def teardown(self):
+# cleanup()
+#
+# def check_binding(self, db_name, module):
+# """
+# Helper method to check if the specified database-module pairing exists
+# """
+# assert_equal(self.conn.db_to_mod[db_name], module)
+# assert_equal(self.conn.mod_to_db[module], db_name)
+#
+# def test_bind_to_existing_database(self):
+# """
+# Should be able to bind a module to an existing database
+# """
+# db_name = PREFIX + '_test1'
+# module = test1.__name__
+# self.conn.bind(module, db_name)
+# self.check_binding(db_name, module)
+#
+# def test_bind_at_package_level(self):
+# db_name = PREFIX + '_test1'
+# package = schema1.__name__
+# self.conn.bind(package, db_name)
+# self.check_binding(db_name, package)
+#
+# def test_bind_to_non_existing_database(self):
+# """
+# Should be able to bind a module to a non-existing database by creating target
+# """
+# db_name = PREFIX + '_test3'
+# module = test1.__name__
+# cur = BASE_CONN.cursor()
+#
+# # Ensure target database doesn't exist
+# if cur.execute("SHOW DATABASES LIKE '{}'".format(db_name)):
+# cur.execute("DROP DATABASE IF EXISTS `{}`".format(db_name))
+# # Bind module to non-existing database
+# self.conn.bind(module, db_name)
+# # Check that target database was created
+# assert_equal(cur.execute("SHOW DATABASES LIKE '{}'".format(db_name)), 1)
+# self.check_binding(db_name, module)
+# # Remove the target database
+# cur.execute("DROP DATABASE IF EXISTS `{}`".format(db_name))
+#
+# def test_cannot_bind_to_multiple_databases(self):
+# """
+# Bind will fail when db_name is a pattern that
+# matches multiple databases
+# """
+# db_name = PREFIX + "_test%%"
+# module = test1.__name__
+# with assert_raises(DataJointError):
+# self.conn.bind(module, db_name)
+#
+# def test_basic_sql_query(self):
+# """
+# Test execution of basic SQL query using connection
+# object.
+# """
+# cur = self.conn.query('SHOW DATABASES')
+# results1 = cur.fetchall()
+# cur2 = BASE_CONN.cursor()
+# cur2.execute('SHOW DATABASES')
+# results2 = cur2.fetchall()
+# assert_equal(results1, results2)
+#
+# def test_transaction_commit(self):
+# """
+# Test transaction commit
+# """
+# table_name = PREFIX + '_test1.subjects'
+# self.conn.start_transaction()
+# self.conn.query("INSERT INTO {table} VALUES (0, 'dj_user', 'dj_user@example.com')".format(table=table_name))
+# cur = BASE_CONN.cursor()
+# assert_equal(cur.execute("SELECT * FROM {}".format(table_name)), 0)
+# self.conn.commit_transaction()
+# assert_equal(cur.execute("SELECT * FROM {}".format(table_name)), 1)
+#
+# def test_transaction_rollback(self):
+# """
+# Test transaction rollback
+# """
+# table_name = PREFIX + '_test1.subjects'
+# self.conn.start_transaction()
+# self.conn.query("INSERT INTO {table} VALUES (0, 'dj_user', 'dj_user@example.com')".format(table=table_name))
+# cur = BASE_CONN.cursor()
+# assert_equal(cur.execute("SELECT * FROM {}".format(table_name)), 0)
+# self.conn.cancel_transaction()
+# assert_equal(cur.execute("SELECT * FROM {}".format(table_name)), 0)
+#
+# # class TestContextManager(object):
+# # def __init__(self):
+# # self.relvar = None
+# # self.setup()
+# #
+# # """
+# # Test cases for FreeRelation objects
+# # """
+# #
+# # def setup(self):
+# # """
+# # Create a connection object and prepare test modules
+# # as follows:
+# # test1 - has conn and bounded
+# # """
+# # cleanup() # drop all databases with PREFIX
+# # test1.__dict__.pop('conn', None)
+# #
+# # self.conn = dj.Connection(**CONN_INFO)
+# # test1.conn = self.conn
+# # self.conn.bind(test1.__name__, PREFIX + '_test1')
+# # self.relvar = test1.Subjects()
+# #
+# # def teardown(self):
+# # cleanup()
+# #
+# # # def test_active(self):
+# # # with self.conn.transaction() as tr:
+# # # assert_true(tr.is_active, "Transaction is not active")
+# #
+# # # def test_rollback(self):
+# # #
+# # # tmp = np.array([(1,'Peter','mouse'),(2, 'Klara', 'monkey')],
+# # # dtype=[('subject_id', '>i4'), ('real_id', 'O'), ('species', 'O')])
+# # #
+# # # self.relvar.insert(tmp[0])
+# # # try:
+# # # with self.conn.transaction():
+# # # self.relvar.insert(tmp[1])
+# # # raise DataJointError("Just to test")
+# # # except DataJointError as e:
+# # # pass
+# # #
+# # # testt2 = (self.relvar & 'subject_id = 2').fetch()
+# # # assert_equal(len(testt2), 0, "Length is not 0. Expected because rollback should have happened.")
+# #
+# # # def test_cancel(self):
+# # # """Tests cancelling a transaction"""
+# # # tmp = np.array([(1,'Peter','mouse'),(2, 'Klara', 'monkey')],
+# # # dtype=[('subject_id', '>i4'), ('real_id', 'O'), ('species', 'O')])
+# # #
+# # # self.relvar.insert(tmp[0])
+# # # with self.conn.transaction() as transaction:
+# # # self.relvar.insert(tmp[1])
+# # # transaction.cancel()
+# # #
+# # # testt2 = (self.relvar & 'subject_id = 2').fetch()
+# # # assert_equal(len(testt2), 0, "Length is not 0. Expected because rollback should have happened.")
+#
+#
+#
+# # class TestConnectionWithBindings(object):
+# # """
+# # Tests heading and dependency loadings
+# # """
+# # def setup(self):
+# # self.conn = dj.Connection(**CONN_INFO)
+# # cur.execute(query)
+#
+#
+#
+#
+#
+#
+#
+#
+#
+#
diff --git a/tests/test_free_relation.py b/tests/test_free_relation.py
index e4ebaa872..285bf7487 100644
--- a/tests/test_free_relation.py
+++ b/tests/test_free_relation.py
@@ -1,205 +1,205 @@
-"""
-Collection of test cases for base module. Tests functionalities such as
-creating tables using docstring table declarations
-"""
-from .schemata import schema1, schema2
-from .schemata.schema1 import test1, test2, test3
-
-
-__author__ = 'eywalker'
-
-from . import BASE_CONN, CONN_INFO, PREFIX, cleanup, setup_sample_db
-from datajoint.connection import Connection
-from nose.tools import assert_raises, assert_equal, assert_regexp_matches, assert_false, assert_true, raises
-from datajoint import DataJointError
-
-
-def setup():
- """
- Setup connections and bindings
- """
- pass
-
-
-class TestRelationInstantiations(object):
- """
- Test cases for instantiating Relation objects
- """
- def __init__(self):
- self.conn = None
-
- def setup(self):
- """
- Create a connection object and prepare test modules
- as follows:
- test1 - has conn and bounded
- """
- self.conn = Connection(**CONN_INFO)
- cleanup() # drop all databases with PREFIX
- #test1.conn = self.conn
- #self.conn.bind(test1.__name__, PREFIX+'_test1')
-
- #test2.conn = self.conn
-
- #test3.__dict__.pop('conn', None) # make sure conn is not defined in test3
- test1.__dict__.pop('conn', None)
- schema1.__dict__.pop('conn', None) # make sure conn is not defined at schema level
-
-
- def teardown(self):
- cleanup()
-
-
- def test_instantiation_from_unbound_module_should_fail(self):
- """
- Attempting to instantiate a Relation derivative from a module with
- connection defined but not bound to a database should raise error
- """
- test1.conn = self.conn
- with assert_raises(DataJointError) as e:
- test1.Subjects()
- assert_regexp_matches(e.exception.args[0], r".*not bound.*")
-
- def test_instantiation_from_module_without_conn_should_fail(self):
- """
- Attempting to instantiate a Relation derivative from a module that lacks
- `conn` object should raise error
- """
- with assert_raises(DataJointError) as e:
- test1.Subjects()
- assert_regexp_matches(e.exception.args[0], r".*define.*conn.*")
-
- def test_instantiation_of_base_derivatives(self):
- """
- Test instantiation and initialization of objects derived from
- Relation class
- """
- test1.conn = self.conn
- self.conn.bind(test1.__name__, PREFIX + '_test1')
- s = test1.Subjects()
- assert_equal(s.dbname, PREFIX + '_test1')
- assert_equal(s.conn, self.conn)
- assert_equal(s.definition, test1.Subjects.definition)
-
- def test_packagelevel_binding(self):
- schema2.conn = self.conn
- self.conn.bind(schema2.__name__, PREFIX + '_test1')
- s = schema2.test1.Subjects()
-
-
-class TestRelationDeclaration(object):
- """
- Test declaration (creation of table) from
- definition in Relation under various circumstances
- """
-
- def setup(self):
- cleanup()
-
- self.conn = Connection(**CONN_INFO)
- test1.conn = self.conn
- self.conn.bind(test1.__name__, PREFIX + '_test1')
- test2.conn = self.conn
- self.conn.bind(test2.__name__, PREFIX + '_test2')
-
- def test_is_declared(self):
- """
- The table should not be created immediately after instantiation,
- but should be created when declare method is called
- :return:
- """
- s = test1.Subjects()
- assert_false(s.is_declared)
- s.declare()
- assert_true(s.is_declared)
-
- def test_calling_heading_should_trigger_declaration(self):
- s = test1.Subjects()
- assert_false(s.is_declared)
- a = s.heading
- assert_true(s.is_declared)
-
- def test_foreign_key_ref_in_same_schema(self):
- s = test1.Experiments()
- assert_true('subject_id' in s.heading.primary_key)
-
- def test_foreign_key_ref_in_another_schema(self):
- s = test2.Experiments()
- assert_true('subject_id' in s.heading.primary_key)
-
- def test_aliased_module_name_should_resolve(self):
- """
- Module names that were aliased in the definition should
- be properly resolved.
- """
- s = test2.Conditions()
- assert_true('subject_id' in s.heading.primary_key)
-
- def test_reference_to_unknown_module_in_definition_should_fail(self):
- """
- Module names in table definition that is not aliased via import
- results in error
- """
- s = test2.FoodPreference()
- with assert_raises(DataJointError) as e:
- s.declare()
-
-
-class TestRelationWithExistingTables(object):
- """
- Test base derivatives behaviors when some of the tables
- already exists in the database
- """
- def setup(self):
- cleanup()
- self.conn = Connection(**CONN_INFO)
- setup_sample_db()
- test1.conn = self.conn
- self.conn.bind(test1.__name__, PREFIX + '_test1')
- test2.conn = self.conn
- self.conn.bind(test2.__name__, PREFIX + '_test2')
- self.conn.load_headings(force=True)
-
- schema2.conn = self.conn
- self.conn.bind(schema2.__name__, PREFIX + '_package')
-
- def teardown(selfself):
- schema1.__dict__.pop('conn', None)
- cleanup()
-
- def test_detection_of_existing_table(self):
- """
- The Relation instance should be able to detect if the
- corresponding table already exists in the database
- """
- s = test1.Subjects()
- assert_true(s.is_declared)
-
- def test_definition_referring_to_existing_table_without_class(self):
- s1 = test1.Sessions()
- assert_true('experimenter_id' in s1.primary_key)
-
- s2 = test2.Session()
- assert_true('experimenter_id' in s2.primary_key)
-
- def test_reference_to_package_level_table(self):
- s = test1.Match()
- s.declare()
- assert_true('pop_id' in s.primary_key)
-
- def test_direct_reference_to_existing_table_should_fail(self):
- """
- When deriving from Relation, definition should not contain direct reference
- to a database name
- """
- s = test1.TrainingSession()
- with assert_raises(DataJointError):
- s.declare()
-
-@raises(TypeError)
-def test_instantiation_of_base_derivative_without_definition_should_fail():
- test1.Empty()
-
-
-
-
+# """
+# Collection of test cases for base module. Tests functionalities such as
+# creating tables using docstring table declarations
+# """
+# from .schemata import schema1, schema2
+# from .schemata.schema1 import test1, test2, test3
+#
+#
+# __author__ = 'eywalker'
+#
+# from . import BASE_CONN, CONN_INFO, PREFIX, cleanup, setup_sample_db
+# from datajoint.connection import Connection
+# from nose.tools import assert_raises, assert_equal, assert_regexp_matches, assert_false, assert_true, raises
+# from datajoint import DataJointError
+#
+#
+# def setup():
+# """
+# Setup connections and bindings
+# """
+# pass
+#
+#
+# class TestRelationInstantiations(object):
+# """
+# Test cases for instantiating Relation objects
+# """
+# def __init__(self):
+# self.conn = None
+#
+# def setup(self):
+# """
+# Create a connection object and prepare test modules
+# as follows:
+# test1 - has conn and bounded
+# """
+# self.conn = Connection(**CONN_INFO)
+# cleanup() # drop all databases with PREFIX
+# #test1.conn = self.conn
+# #self.conn.bind(test1.__name__, PREFIX+'_test1')
+#
+# #test2.conn = self.conn
+#
+# #test3.__dict__.pop('conn', None) # make sure conn is not defined in test3
+# test1.__dict__.pop('conn', None)
+# schema1.__dict__.pop('conn', None) # make sure conn is not defined at schema level
+#
+#
+# def teardown(self):
+# cleanup()
+#
+#
+# def test_instantiation_from_unbound_module_should_fail(self):
+# """
+# Attempting to instantiate a Relation derivative from a module with
+# connection defined but not bound to a database should raise error
+# """
+# test1.conn = self.conn
+# with assert_raises(DataJointError) as e:
+# test1.Subjects()
+# assert_regexp_matches(e.exception.args[0], r".*not bound.*")
+#
+# def test_instantiation_from_module_without_conn_should_fail(self):
+# """
+# Attempting to instantiate a Relation derivative from a module that lacks
+# `conn` object should raise error
+# """
+# with assert_raises(DataJointError) as e:
+# test1.Subjects()
+# assert_regexp_matches(e.exception.args[0], r".*define.*conn.*")
+#
+# def test_instantiation_of_base_derivatives(self):
+# """
+# Test instantiation and initialization of objects derived from
+# Relation class
+# """
+# test1.conn = self.conn
+# self.conn.bind(test1.__name__, PREFIX + '_test1')
+# s = test1.Subjects()
+# assert_equal(s.dbname, PREFIX + '_test1')
+# assert_equal(s.conn, self.conn)
+# assert_equal(s.definition, test1.Subjects.definition)
+#
+# def test_packagelevel_binding(self):
+# schema2.conn = self.conn
+# self.conn.bind(schema2.__name__, PREFIX + '_test1')
+# s = schema2.test1.Subjects()
+#
+#
+# class TestRelationDeclaration(object):
+# """
+# Test declaration (creation of table) from
+# definition in Relation under various circumstances
+# """
+#
+# def setup(self):
+# cleanup()
+#
+# self.conn = Connection(**CONN_INFO)
+# test1.conn = self.conn
+# self.conn.bind(test1.__name__, PREFIX + '_test1')
+# test2.conn = self.conn
+# self.conn.bind(test2.__name__, PREFIX + '_test2')
+#
+# def test_is_declared(self):
+# """
+# The table should not be created immediately after instantiation,
+# but should be created when declare method is called
+# :return:
+# """
+# s = test1.Subjects()
+# assert_false(s.is_declared)
+# s.declare()
+# assert_true(s.is_declared)
+#
+# def test_calling_heading_should_trigger_declaration(self):
+# s = test1.Subjects()
+# assert_false(s.is_declared)
+# a = s.heading
+# assert_true(s.is_declared)
+#
+# def test_foreign_key_ref_in_same_schema(self):
+# s = test1.Experiments()
+# assert_true('subject_id' in s.heading.primary_key)
+#
+# def test_foreign_key_ref_in_another_schema(self):
+# s = test2.Experiments()
+# assert_true('subject_id' in s.heading.primary_key)
+#
+# def test_aliased_module_name_should_resolve(self):
+# """
+# Module names that were aliased in the definition should
+# be properly resolved.
+# """
+# s = test2.Conditions()
+# assert_true('subject_id' in s.heading.primary_key)
+#
+# def test_reference_to_unknown_module_in_definition_should_fail(self):
+# """
+# Module names in table definition that is not aliased via import
+# results in error
+# """
+# s = test2.FoodPreference()
+# with assert_raises(DataJointError) as e:
+# s.declare()
+#
+#
+# class TestRelationWithExistingTables(object):
+# """
+# Test base derivatives behaviors when some of the tables
+# already exists in the database
+# """
+# def setup(self):
+# cleanup()
+# self.conn = Connection(**CONN_INFO)
+# setup_sample_db()
+# test1.conn = self.conn
+# self.conn.bind(test1.__name__, PREFIX + '_test1')
+# test2.conn = self.conn
+# self.conn.bind(test2.__name__, PREFIX + '_test2')
+# self.conn.load_headings(force=True)
+#
+# schema2.conn = self.conn
+# self.conn.bind(schema2.__name__, PREFIX + '_package')
+#
+# def teardown(selfself):
+# schema1.__dict__.pop('conn', None)
+# cleanup()
+#
+# def test_detection_of_existing_table(self):
+# """
+# The Relation instance should be able to detect if the
+# corresponding table already exists in the database
+# """
+# s = test1.Subjects()
+# assert_true(s.is_declared)
+#
+# def test_definition_referring_to_existing_table_without_class(self):
+# s1 = test1.Sessions()
+# assert_true('experimenter_id' in s1.primary_key)
+#
+# s2 = test2.Session()
+# assert_true('experimenter_id' in s2.primary_key)
+#
+# def test_reference_to_package_level_table(self):
+# s = test1.Match()
+# s.declare()
+# assert_true('pop_id' in s.primary_key)
+#
+# def test_direct_reference_to_existing_table_should_fail(self):
+# """
+# When deriving from Relation, definition should not contain direct reference
+# to a database name
+# """
+# s = test1.TrainingSession()
+# with assert_raises(DataJointError):
+# s.declare()
+#
+# @raises(TypeError)
+# def test_instantiation_of_base_derivative_without_definition_should_fail():
+# test1.Empty()
+#
+#
+#
+#
diff --git a/tests/test_relation.py b/tests/test_relation.py
index 3facc6721..76f87f707 100644
--- a/tests/test_relation.py
+++ b/tests/test_relation.py
@@ -1,490 +1,496 @@
-import random
-import string
-
-__author__ = 'fabee'
-
-from .schemata.schema1 import test1, test4
-
-from . import BASE_CONN, CONN_INFO, PREFIX, cleanup
-from datajoint.connection import Connection
-from nose.tools import assert_raises, assert_equal, assert_regexp_matches, assert_false, assert_true, assert_list_equal,\
- assert_tuple_equal, assert_dict_equal, raises
-from datajoint import DataJointError, TransactionError, AutoPopulate, Relation
-import numpy as np
-from numpy.testing import assert_array_equal
-from datajoint.abstract_relation import FreeRelation
-import numpy as np
-
-
-def trial_faker(n=10):
- def iter():
- for s in [1, 2]:
- for i in range(n):
- yield dict(trial_id=i, subject_id=s, outcome=int(np.random.randint(10)), notes= 'no comment')
- return iter()
-
-
-def setup():
- """
- Setup connections and bindings
- """
- pass
-
-
-class TestTableObject(object):
- def __init__(self):
- self.subjects = None
- self.setup()
-
- """
- Test cases for FreeRelation objects
- """
-
- def setup(self):
- """
- Create a connection object and prepare test modules
- as follows:
- test1 - has conn and bounded
- """
- cleanup() # drop all databases with PREFIX
- test1.__dict__.pop('conn', None)
- test4.__dict__.pop('conn', None) # make sure conn is not defined at schema level
-
- self.conn = Connection(**CONN_INFO)
- test1.conn = self.conn
- test4.conn = self.conn
- self.conn.bind(test1.__name__, PREFIX + '_test1')
- self.conn.bind(test4.__name__, PREFIX + '_test4')
- self.subjects = test1.Subjects()
- self.animals = test1.Animals()
- self.relvar_blob = test4.Matrix()
- self.trials = test1.Trials()
-
- def teardown(self):
- cleanup()
-
- def test_compound_restriction(self):
- s = self.subjects
- t = self.trials
-
- s.insert(dict(subject_id=1, real_id='M'))
- s.insert(dict(subject_id=2, real_id='F'))
- t.iter_insert(trial_faker(20))
-
- tM = t & (s & "real_id = 'M'")
- t1 = t & "subject_id = 1"
-
- assert_equal(len(tM), len(t1), "Results of compound request does not have same length")
-
- for t1_item, tM_item in zip(sorted(t1, key=lambda item: item['trial_id']),
- sorted(tM, key=lambda item: item['trial_id'])):
- assert_dict_equal(t1_item, tM_item,
- 'Dictionary elements do not agree in compound statement')
-
- def test_record_insert(self):
- "Test whether record insert works"
- tmp = np.array([(2, 'Klara', 'monkey')],
- dtype=[('subject_id', '>i4'), ('real_id', 'O'), ('species', 'O')])
-
- self.subjects.insert(tmp[0])
- testt2 = (self.subjects & 'subject_id = 2').fetch()[0]
- assert_equal(tuple(tmp[0]), tuple(testt2), "Inserted and fetched record do not match!")
-
- def test_delete(self):
- "Test whether delete works"
- tmp = np.array([(2, 'Klara', 'monkey'), (1,'Peter', 'mouse')],
- dtype=[('subject_id', '>i4'), ('real_id', 'O'), ('species', 'O')])
-
- self.subjects.batch_insert(tmp)
- assert_true(len(self.subjects) == 2, 'Length does not match 2.')
- self.subjects.delete()
- assert_true(len(self.subjects) == 0, 'Length does not match 0.')
-
- # def test_cascading_delete(self):
- # "Test whether delete works"
- # tmp = np.array([(2, 'Klara', 'monkey'), (1,'Peter', 'mouse')],
- # dtype=[('subject_id', '>i4'), ('real_id', 'O'), ('species', 'O')])
- #
- # self.subjects.batch_insert(tmp)
- #
- # self.trials.insert(dict(subject_id=1, trial_id=1, outcome=0))
- # self.trials.insert(dict(subject_id=1, trial_id=2, outcome=1))
- # self.trials.insert(dict(subject_id=2, trial_id=3, outcome=2))
- # assert_true(len(self.subjects) == 2, 'Length does not match 2.')
- # assert_true(len(self.trials) == 3, 'Length does not match 3.')
- # (self.subjects & 'subject_id=1').delete()
- # assert_true(len(self.subjects) == 1, 'Length does not match 1.')
- # assert_true(len(self.trials) == 1, 'Length does not match 1.')
-
- def test_short_hand_foreign_reference(self):
- self.animals.heading
-
-
-
- def test_record_insert_different_order(self):
- "Test whether record insert works"
- tmp = np.array([('Klara', 2, 'monkey')],
- dtype=[('real_id', 'O'), ('subject_id', '>i4'), ('species', 'O')])
-
- self.subjects.insert(tmp[0])
- testt2 = (self.subjects & 'subject_id = 2').fetch()[0]
- assert_equal((2, 'Klara', 'monkey'), tuple(testt2),
- "Inserted and fetched record do not match!")
-
- @raises(TransactionError)
- def test_transaction_error(self):
- "Test whether declaration in transaction is prohibited"
-
- tmp = np.array([('Klara', 2, 'monkey')],
- dtype=[('real_id', 'O'), ('subject_id', '>i4'), ('species', 'O')])
- self.conn.start_transaction()
- self.subjects.insert(tmp[0])
-
- # def test_transaction_suppress_error(self):
- # "Test whether ignore_errors ignores the errors."
- #
- # tmp = np.array([('Klara', 2, 'monkey')],
- # dtype=[('real_id', 'O'), ('subject_id', '>i4'), ('species', 'O')])
- # with self.conn.transaction(ignore_errors=True) as tr:
- # self.subjects.insert(tmp[0])
-
-
- @raises(TransactionError)
- def test_transaction_error_not_resolve(self):
- "Test whether declaration in transaction is prohibited"
-
- tmp = np.array([('Klara', 2, 'monkey'), ('Klara', 3, 'monkey')],
- dtype=[('real_id', 'O'), ('subject_id', '>i4'), ('species', 'O')])
- try:
- self.conn.start_transaction()
- self.subjects.insert(tmp[0])
- except TransactionError as te:
- self.conn.cancel_transaction()
-
- self.conn.start_transaction()
- self.subjects.insert(tmp[0])
-
- def test_transaction_error_resolve(self):
- "Test whether declaration in transaction is prohibited"
-
- tmp = np.array([('Klara', 2, 'monkey'), ('Klara', 3, 'monkey')],
- dtype=[('real_id', 'O'), ('subject_id', '>i4'), ('species', 'O')])
- try:
- self.conn.start_transaction()
- self.subjects.insert(tmp[0])
- except TransactionError as te:
- self.conn.cancel_transaction()
- te.resolve()
-
- self.conn.start_transaction()
- self.subjects.insert(tmp[0])
- self.conn.commit_transaction()
-
- def test_transaction_error2(self):
- "If table is declared, we are allowed to insert within a transaction"
-
- tmp = np.array([('Klara', 2, 'monkey'), ('Klara', 3, 'monkey')],
- dtype=[('real_id', 'O'), ('subject_id', '>i4'), ('species', 'O')])
- self.subjects.insert(tmp[0])
-
- self.conn.start_transaction()
- self.subjects.insert(tmp[1])
- self.conn.commit_transaction()
-
-
- @raises(KeyError)
- def test_wrong_key_insert_records(self):
- "Test whether record insert works"
- tmp = np.array([('Klara', 2, 'monkey')],
- dtype=[('real_deal', 'O'), ('subject_id', '>i4'), ('species', 'O')])
-
- self.subjects.insert(tmp[0])
-
-
- def test_dict_insert(self):
- "Test whether record insert works"
- tmp = {'real_id': 'Brunhilda',
- 'subject_id': 3,
- 'species': 'human'}
-
- self.subjects.insert(tmp)
- testt2 = (self.subjects & 'subject_id = 3').fetch()[0]
- assert_equal((3, 'Brunhilda', 'human'), tuple(testt2), "Inserted and fetched record do not match!")
-
- @raises(KeyError)
- def test_wrong_key_insert(self):
- "Test whether a correct error is generated when inserting wrong attribute name"
- tmp = {'real_deal': 'Brunhilda',
- 'subject_database': 3,
- 'species': 'human'}
-
- self.subjects.insert(tmp)
-
- def test_batch_insert(self):
- "Test whether record insert works"
- tmp = np.array([('Klara', 2, 'monkey'), ('Brunhilda', 3, 'mouse'), ('Mickey', 1, 'human')],
- dtype=[('real_id', 'O'), ('subject_id', '>i4'), ('species', 'O')])
-
- self.subjects.batch_insert(tmp)
-
- expected = np.array([(1, 'Mickey', 'human'), (2, 'Klara', 'monkey'),
- (3, 'Brunhilda', 'mouse')],
- dtype=[('subject_id', 'i4'), ('species', 'O')])
-
- self.subjects.iter_insert(tmp.__iter__())
-
- expected = np.array([(1, 'Mickey', 'human'), (2, 'Klara', 'monkey'),
- (3, 'Brunhilda', 'mouse')],
- dtype=[('subject_id', ' `dj_free`.Animals
- rec_session_id : int # recording session identifier
- """
- table = FreeRelation(self.conn, 'dj_free', 'Recordings', definition)
- assert_raises(DataJointError, table.declare)
-
- def test_reference_to_existing_table(self):
- definition1 = """
- `dj_free`.Animals (manual) # my animal table
- animal_id : int # unique id for the animal
- ---
- animal_name : varchar(128) # name of the animal
- """
- table1 = FreeRelation(self.conn, 'dj_free', 'Animals', definition1)
- table1.declare()
-
- definition2 = """
- `dj_free`.Recordings (manual) # recordings
- -> `dj_free`.Animals
- rec_session_id : int # recording session identifier
- """
- table2 = FreeRelation(self.conn, 'dj_free', 'Recordings', definition2)
- table2.declare()
- assert_true('animal_id' in table2.primary_key)
-
-
-def id_generator(size=6, chars=string.ascii_uppercase + string.digits):
- return ''.join(random.choice(chars) for _ in range(size))
-
-class TestIterator(object):
- def __init__(self):
- self.relvar = None
- self.setup()
-
- """
- Test cases for Iterators in Relations objects
- """
-
- def setup(self):
- """
- Create a connection object and prepare test modules
- as follows:
- test1 - has conn and bounded
- """
- cleanup() # drop all databases with PREFIX
- test4.__dict__.pop('conn', None) # make sure conn is not defined at schema level
-
- self.conn = Connection(**CONN_INFO)
- test4.conn = self.conn
- self.conn.bind(test4.__name__, PREFIX + '_test4')
- self.relvar_blob = test4.Matrix()
-
- def teardown(self):
- cleanup()
-
-
- def test_blob_iteration(self):
- "Tests the basic call of the iterator"
-
- dicts = []
- for i in range(10):
-
- c = id_generator()
-
- t = {'matrix_id':i,
- 'data': np.random.randn(4,4,4),
- 'comment': c}
- self.relvar_blob.insert(t)
- dicts.append(t)
-
- for t, t2 in zip(dicts, self.relvar_blob):
- assert_true(isinstance(t2, dict), 'iterator does not return dict')
-
- assert_equal(t['matrix_id'], t2['matrix_id'], 'inserted and retrieved tuples do not match')
- assert_equal(t['comment'], t2['comment'], 'inserted and retrieved tuples do not match')
- assert_true(np.all(t['data'] == t2['data']), 'inserted and retrieved tuples do not match')
-
- def test_fetch(self):
- dicts = []
- for i in range(10):
-
- c = id_generator()
-
- t = {'matrix_id':i,
- 'data': np.random.randn(4,4,4),
- 'comment': c}
- self.relvar_blob.insert(t)
- dicts.append(t)
-
- tuples2 = self.relvar_blob.fetch()
- assert_true(isinstance(tuples2, np.ndarray), "Return value of fetch does not have proper type.")
- assert_true(isinstance(tuples2[0], np.void), "Return value of fetch does not have proper type.")
- for t, t2 in zip(dicts, tuples2):
-
- assert_equal(t['matrix_id'], t2['matrix_id'], 'inserted and retrieved tuples do not match')
- assert_equal(t['comment'], t2['comment'], 'inserted and retrieved tuples do not match')
- assert_true(np.all(t['data'] == t2['data']), 'inserted and retrieved tuples do not match')
-
- def test_fetch_dicts(self):
- dicts = []
- for i in range(10):
-
- c = id_generator()
-
- t = {'matrix_id':i,
- 'data': np.random.randn(4,4,4),
- 'comment': c}
- self.relvar_blob.insert(t)
- dicts.append(t)
-
- tuples2 = self.relvar_blob.fetch(as_dict=True)
- assert_true(isinstance(tuples2, list), "Return value of fetch with as_dict=True does not have proper type.")
- assert_true(isinstance(tuples2[0], dict), "Return value of fetch with as_dict=True does not have proper type.")
- for t, t2 in zip(dicts, tuples2):
- assert_equal(t['matrix_id'], t2['matrix_id'], 'inserted and retrieved dicts do not match')
- assert_equal(t['comment'], t2['comment'], 'inserted and retrieved dicts do not match')
- assert_true(np.all(t['data'] == t2['data']), 'inserted and retrieved dicts do not match')
-
-
-
-class TestAutopopulate(object):
- def __init__(self):
- self.relvar = None
- self.setup()
-
- """
- Test cases for Iterators in Relations objects
- """
-
- def setup(self):
- """
- Create a connection object and prepare test modules
- as follows:
- test1 - has conn and bounded
- """
- cleanup() # drop all databases with PREFIX
- test1.__dict__.pop('conn', None) # make sure conn is not defined at schema level
-
- self.conn = Connection(**CONN_INFO)
- test1.conn = self.conn
- self.conn.bind(test1.__name__, PREFIX + '_test1')
-
- self.subjects = test1.Subjects()
- self.trials = test1.Trials()
- self.squared = test1.SquaredScore()
- self.dummy = test1.SquaredSubtable()
- self.dummy1 = test1.WrongImplementation()
- self.error_generator = test1.ErrorGenerator()
- self.fill_relation()
-
-
-
- def fill_relation(self):
- tmp = np.array([('Klara', 2, 'monkey'), ('Peter', 3, 'mouse')],
- dtype=[('real_id', 'O'), ('subject_id', '>i4'), ('species', 'O')])
- self.subjects.batch_insert(tmp)
-
- for trial_id in range(1,11):
- self.trials.insert(dict(subject_id=2, trial_id=trial_id, outcome=np.random.randint(0,10)))
-
- def teardown(self):
- cleanup()
-
- def test_autopopulate(self):
- self.squared.populate()
- assert_equal(len(self.squared), 10)
-
- for trial in self.trials*self.squared:
- assert_equal(trial['outcome']**2, trial['squared'])
-
- def test_autopopulate_restriction(self):
- self.squared.populate(restriction='trial_id <= 5')
- assert_equal(len(self.squared), 5)
-
- for trial in self.trials*self.squared:
- assert_equal(trial['outcome']**2, trial['squared'])
-
-
- # def test_autopopulate_transaction_error(self):
- # errors = self.squared.populate(suppress_errors=True)
- # assert_equal(len(errors), 1)
- # assert_true(isinstance(errors[0][1], TransactionError))
-
- @raises(DataJointError)
- def test_autopopulate_relation_check(self):
-
- class dummy(AutoPopulate):
-
- def populate_relation(self):
- return None
-
- def _make_tuples(self, key):
- pass
-
- du = dummy()
- du.populate() \
-
- @raises(DataJointError)
- def test_autopopulate_relation_check(self):
- self.dummy1.populate()
-
- @raises(Exception)
- def test_autopopulate_relation_check(self):
- self.error_generator.populate()\
-
- @raises(Exception)
- def test_autopopulate_relation_check2(self):
- tmp = self.dummy2.populate(suppress_errors=True)
- assert_equal(len(tmp), 1, 'Error list should have length 1.')
+# import random
+# import string
+#
+# __author__ = 'fabee'
+#
+# from .schemata.schema1 import test1, test4
+from .schemata.test1 import Subjects
+
+
+def test_instantiate_relation():
+ s = Subjects()
+
+#
+# from . import BASE_CONN, CONN_INFO, PREFIX, cleanup
+# from datajoint.connection import Connection
+# from nose.tools import assert_raises, assert_equal, assert_regexp_matches, assert_false, assert_true, assert_list_equal,\
+# assert_tuple_equal, assert_dict_equal, raises
+# from datajoint import DataJointError, TransactionError, AutoPopulate, Relation
+# import numpy as np
+# from numpy.testing import assert_array_equal
+# from datajoint.relation import FreeRelation
+# import numpy as np
+#
+#
+# def trial_faker(n=10):
+# def iter():
+# for s in [1, 2]:
+# for i in range(n):
+# yield dict(trial_id=i, subject_id=s, outcome=int(np.random.randint(10)), notes= 'no comment')
+# return iter()
+#
+#
+# def setup():
+# """
+# Setup connections and bindings
+# """
+# pass
+#
+#
+# class TestTableObject(object):
+# def __init__(self):
+# self.subjects = None
+# self.setup()
+#
+# """
+# Test cases for FreeRelation objects
+# """
+#
+# def setup(self):
+# """
+# Create a connection object and prepare test modules
+# as follows:
+# test1 - has conn and bounded
+# """
+# cleanup() # drop all databases with PREFIX
+# test1.__dict__.pop('conn', None)
+# test4.__dict__.pop('conn', None) # make sure conn is not defined at schema level
+#
+# self.conn = Connection(**CONN_INFO)
+# test1.conn = self.conn
+# test4.conn = self.conn
+# self.conn.bind(test1.__name__, PREFIX + '_test1')
+# self.conn.bind(test4.__name__, PREFIX + '_test4')
+# self.subjects = test1.Subjects()
+# self.animals = test1.Animals()
+# self.relvar_blob = test4.Matrix()
+# self.trials = test1.Trials()
+#
+# def teardown(self):
+# cleanup()
+#
+# def test_compound_restriction(self):
+# s = self.subjects
+# t = self.trials
+#
+# s.insert(dict(subject_id=1, real_id='M'))
+# s.insert(dict(subject_id=2, real_id='F'))
+# t.iter_insert(trial_faker(20))
+#
+# tM = t & (s & "real_id = 'M'")
+# t1 = t & "subject_id = 1"
+#
+# assert_equal(len(tM), len(t1), "Results of compound request does not have same length")
+#
+# for t1_item, tM_item in zip(sorted(t1, key=lambda item: item['trial_id']),
+# sorted(tM, key=lambda item: item['trial_id'])):
+# assert_dict_equal(t1_item, tM_item,
+# 'Dictionary elements do not agree in compound statement')
+#
+# def test_record_insert(self):
+# "Test whether record insert works"
+# tmp = np.array([(2, 'Klara', 'monkey')],
+# dtype=[('subject_id', '>i4'), ('real_id', 'O'), ('species', 'O')])
+#
+# self.subjects.insert(tmp[0])
+# testt2 = (self.subjects & 'subject_id = 2').fetch()[0]
+# assert_equal(tuple(tmp[0]), tuple(testt2), "Inserted and fetched record do not match!")
+#
+# def test_delete(self):
+# "Test whether delete works"
+# tmp = np.array([(2, 'Klara', 'monkey'), (1,'Peter', 'mouse')],
+# dtype=[('subject_id', '>i4'), ('real_id', 'O'), ('species', 'O')])
+#
+# self.subjects.batch_insert(tmp)
+# assert_true(len(self.subjects) == 2, 'Length does not match 2.')
+# self.subjects.delete()
+# assert_true(len(self.subjects) == 0, 'Length does not match 0.')
+#
+# # def test_cascading_delete(self):
+# # "Test whether delete works"
+# # tmp = np.array([(2, 'Klara', 'monkey'), (1,'Peter', 'mouse')],
+# # dtype=[('subject_id', '>i4'), ('real_id', 'O'), ('species', 'O')])
+# #
+# # self.subjects.batch_insert(tmp)
+# #
+# # self.trials.insert(dict(subject_id=1, trial_id=1, outcome=0))
+# # self.trials.insert(dict(subject_id=1, trial_id=2, outcome=1))
+# # self.trials.insert(dict(subject_id=2, trial_id=3, outcome=2))
+# # assert_true(len(self.subjects) == 2, 'Length does not match 2.')
+# # assert_true(len(self.trials) == 3, 'Length does not match 3.')
+# # (self.subjects & 'subject_id=1').delete()
+# # assert_true(len(self.subjects) == 1, 'Length does not match 1.')
+# # assert_true(len(self.trials) == 1, 'Length does not match 1.')
+#
+# def test_short_hand_foreign_reference(self):
+# self.animals.heading
+#
+#
+#
+# def test_record_insert_different_order(self):
+# "Test whether record insert works"
+# tmp = np.array([('Klara', 2, 'monkey')],
+# dtype=[('real_id', 'O'), ('subject_id', '>i4'), ('species', 'O')])
+#
+# self.subjects.insert(tmp[0])
+# testt2 = (self.subjects & 'subject_id = 2').fetch()[0]
+# assert_equal((2, 'Klara', 'monkey'), tuple(testt2),
+# "Inserted and fetched record do not match!")
+#
+# @raises(TransactionError)
+# def test_transaction_error(self):
+# "Test whether declaration in transaction is prohibited"
+#
+# tmp = np.array([('Klara', 2, 'monkey')],
+# dtype=[('real_id', 'O'), ('subject_id', '>i4'), ('species', 'O')])
+# self.conn.start_transaction()
+# self.subjects.insert(tmp[0])
+#
+# # def test_transaction_suppress_error(self):
+# # "Test whether ignore_errors ignores the errors."
+# #
+# # tmp = np.array([('Klara', 2, 'monkey')],
+# # dtype=[('real_id', 'O'), ('subject_id', '>i4'), ('species', 'O')])
+# # with self.conn.transaction(ignore_errors=True) as tr:
+# # self.subjects.insert(tmp[0])
+#
+#
+# @raises(TransactionError)
+# def test_transaction_error_not_resolve(self):
+# "Test whether declaration in transaction is prohibited"
+#
+# tmp = np.array([('Klara', 2, 'monkey'), ('Klara', 3, 'monkey')],
+# dtype=[('real_id', 'O'), ('subject_id', '>i4'), ('species', 'O')])
+# try:
+# self.conn.start_transaction()
+# self.subjects.insert(tmp[0])
+# except TransactionError as te:
+# self.conn.cancel_transaction()
+#
+# self.conn.start_transaction()
+# self.subjects.insert(tmp[0])
+#
+# def test_transaction_error_resolve(self):
+# "Test whether declaration in transaction is prohibited"
+#
+# tmp = np.array([('Klara', 2, 'monkey'), ('Klara', 3, 'monkey')],
+# dtype=[('real_id', 'O'), ('subject_id', '>i4'), ('species', 'O')])
+# try:
+# self.conn.start_transaction()
+# self.subjects.insert(tmp[0])
+# except TransactionError as te:
+# self.conn.cancel_transaction()
+# te.resolve()
+#
+# self.conn.start_transaction()
+# self.subjects.insert(tmp[0])
+# self.conn.commit_transaction()
+#
+# def test_transaction_error2(self):
+# "If table is declared, we are allowed to insert within a transaction"
+#
+# tmp = np.array([('Klara', 2, 'monkey'), ('Klara', 3, 'monkey')],
+# dtype=[('real_id', 'O'), ('subject_id', '>i4'), ('species', 'O')])
+# self.subjects.insert(tmp[0])
+#
+# self.conn.start_transaction()
+# self.subjects.insert(tmp[1])
+# self.conn.commit_transaction()
+#
+#
+# @raises(KeyError)
+# def test_wrong_key_insert_records(self):
+# "Test whether record insert works"
+# tmp = np.array([('Klara', 2, 'monkey')],
+# dtype=[('real_deal', 'O'), ('subject_id', '>i4'), ('species', 'O')])
+#
+# self.subjects.insert(tmp[0])
+#
+#
+# def test_dict_insert(self):
+# "Test whether record insert works"
+# tmp = {'real_id': 'Brunhilda',
+# 'subject_id': 3,
+# 'species': 'human'}
+#
+# self.subjects.insert(tmp)
+# testt2 = (self.subjects & 'subject_id = 3').fetch()[0]
+# assert_equal((3, 'Brunhilda', 'human'), tuple(testt2), "Inserted and fetched record do not match!")
+#
+# @raises(KeyError)
+# def test_wrong_key_insert(self):
+# "Test whether a correct error is generated when inserting wrong attribute name"
+# tmp = {'real_deal': 'Brunhilda',
+# 'subject_database': 3,
+# 'species': 'human'}
+#
+# self.subjects.insert(tmp)
+#
+# def test_batch_insert(self):
+# "Test whether record insert works"
+# tmp = np.array([('Klara', 2, 'monkey'), ('Brunhilda', 3, 'mouse'), ('Mickey', 1, 'human')],
+# dtype=[('real_id', 'O'), ('subject_id', '>i4'), ('species', 'O')])
+#
+# self.subjects.batch_insert(tmp)
+#
+# expected = np.array([(1, 'Mickey', 'human'), (2, 'Klara', 'monkey'),
+# (3, 'Brunhilda', 'mouse')],
+# dtype=[('subject_id', 'i4'), ('species', 'O')])
+#
+# self.subjects.iter_insert(tmp.__iter__())
+#
+# expected = np.array([(1, 'Mickey', 'human'), (2, 'Klara', 'monkey'),
+# (3, 'Brunhilda', 'mouse')],
+# dtype=[('subject_id', ' `dj_free`.Animals
+# rec_session_id : int # recording session identifier
+# """
+# table = FreeRelation(self.conn, 'dj_free', 'Recordings', definition)
+# assert_raises(DataJointError, table.declare)
+#
+# def test_reference_to_existing_table(self):
+# definition1 = """
+# `dj_free`.Animals (manual) # my animal table
+# animal_id : int # unique id for the animal
+# ---
+# animal_name : varchar(128) # name of the animal
+# """
+# table1 = FreeRelation(self.conn, 'dj_free', 'Animals', definition1)
+# table1.declare()
+#
+# definition2 = """
+# `dj_free`.Recordings (manual) # recordings
+# -> `dj_free`.Animals
+# rec_session_id : int # recording session identifier
+# """
+# table2 = FreeRelation(self.conn, 'dj_free', 'Recordings', definition2)
+# table2.declare()
+# assert_true('animal_id' in table2.primary_key)
+#
+#
+# def id_generator(size=6, chars=string.ascii_uppercase + string.digits):
+# return ''.join(random.choice(chars) for _ in range(size))
+#
+# class TestIterator(object):
+# def __init__(self):
+# self.relvar = None
+# self.setup()
+#
+# """
+# Test cases for Iterators in Relations objects
+# """
+#
+# def setup(self):
+# """
+# Create a connection object and prepare test modules
+# as follows:
+# test1 - has conn and bounded
+# """
+# cleanup() # drop all databases with PREFIX
+# test4.__dict__.pop('conn', None) # make sure conn is not defined at schema level
+#
+# self.conn = Connection(**CONN_INFO)
+# test4.conn = self.conn
+# self.conn.bind(test4.__name__, PREFIX + '_test4')
+# self.relvar_blob = test4.Matrix()
+#
+# def teardown(self):
+# cleanup()
+#
+#
+# def test_blob_iteration(self):
+# "Tests the basic call of the iterator"
+#
+# dicts = []
+# for i in range(10):
+#
+# c = id_generator()
+#
+# t = {'matrix_id':i,
+# 'data': np.random.randn(4,4,4),
+# 'comment': c}
+# self.relvar_blob.insert(t)
+# dicts.append(t)
+#
+# for t, t2 in zip(dicts, self.relvar_blob):
+# assert_true(isinstance(t2, dict), 'iterator does not return dict')
+#
+# assert_equal(t['matrix_id'], t2['matrix_id'], 'inserted and retrieved tuples do not match')
+# assert_equal(t['comment'], t2['comment'], 'inserted and retrieved tuples do not match')
+# assert_true(np.all(t['data'] == t2['data']), 'inserted and retrieved tuples do not match')
+#
+# def test_fetch(self):
+# dicts = []
+# for i in range(10):
+#
+# c = id_generator()
+#
+# t = {'matrix_id':i,
+# 'data': np.random.randn(4,4,4),
+# 'comment': c}
+# self.relvar_blob.insert(t)
+# dicts.append(t)
+#
+# tuples2 = self.relvar_blob.fetch()
+# assert_true(isinstance(tuples2, np.ndarray), "Return value of fetch does not have proper type.")
+# assert_true(isinstance(tuples2[0], np.void), "Return value of fetch does not have proper type.")
+# for t, t2 in zip(dicts, tuples2):
+#
+# assert_equal(t['matrix_id'], t2['matrix_id'], 'inserted and retrieved tuples do not match')
+# assert_equal(t['comment'], t2['comment'], 'inserted and retrieved tuples do not match')
+# assert_true(np.all(t['data'] == t2['data']), 'inserted and retrieved tuples do not match')
+#
+# def test_fetch_dicts(self):
+# dicts = []
+# for i in range(10):
+#
+# c = id_generator()
+#
+# t = {'matrix_id':i,
+# 'data': np.random.randn(4,4,4),
+# 'comment': c}
+# self.relvar_blob.insert(t)
+# dicts.append(t)
+#
+# tuples2 = self.relvar_blob.fetch(as_dict=True)
+# assert_true(isinstance(tuples2, list), "Return value of fetch with as_dict=True does not have proper type.")
+# assert_true(isinstance(tuples2[0], dict), "Return value of fetch with as_dict=True does not have proper type.")
+# for t, t2 in zip(dicts, tuples2):
+# assert_equal(t['matrix_id'], t2['matrix_id'], 'inserted and retrieved dicts do not match')
+# assert_equal(t['comment'], t2['comment'], 'inserted and retrieved dicts do not match')
+# assert_true(np.all(t['data'] == t2['data']), 'inserted and retrieved dicts do not match')
+#
+#
+#
+# class TestAutopopulate(object):
+# def __init__(self):
+# self.relvar = None
+# self.setup()
+#
+# """
+# Test cases for Iterators in Relations objects
+# """
+#
+# def setup(self):
+# """
+# Create a connection object and prepare test modules
+# as follows:
+# test1 - has conn and bounded
+# """
+# cleanup() # drop all databases with PREFIX
+# test1.__dict__.pop('conn', None) # make sure conn is not defined at schema level
+#
+# self.conn = Connection(**CONN_INFO)
+# test1.conn = self.conn
+# self.conn.bind(test1.__name__, PREFIX + '_test1')
+#
+# self.subjects = test1.Subjects()
+# self.trials = test1.Trials()
+# self.squared = test1.SquaredScore()
+# self.dummy = test1.SquaredSubtable()
+# self.dummy1 = test1.WrongImplementation()
+# self.error_generator = test1.ErrorGenerator()
+# self.fill_relation()
+#
+#
+#
+# def fill_relation(self):
+# tmp = np.array([('Klara', 2, 'monkey'), ('Peter', 3, 'mouse')],
+# dtype=[('real_id', 'O'), ('subject_id', '>i4'), ('species', 'O')])
+# self.subjects.batch_insert(tmp)
+#
+# for trial_id in range(1,11):
+# self.trials.insert(dict(subject_id=2, trial_id=trial_id, outcome=np.random.randint(0,10)))
+#
+# def teardown(self):
+# cleanup()
+#
+# def test_autopopulate(self):
+# self.squared.populate()
+# assert_equal(len(self.squared), 10)
+#
+# for trial in self.trials*self.squared:
+# assert_equal(trial['outcome']**2, trial['squared'])
+#
+# def test_autopopulate_restriction(self):
+# self.squared.populate(restriction='trial_id <= 5')
+# assert_equal(len(self.squared), 5)
+#
+# for trial in self.trials*self.squared:
+# assert_equal(trial['outcome']**2, trial['squared'])
+#
+#
+# # def test_autopopulate_transaction_error(self):
+# # errors = self.squared.populate(suppress_errors=True)
+# # assert_equal(len(errors), 1)
+# # assert_true(isinstance(errors[0][1], TransactionError))
+#
+# @raises(DataJointError)
+# def test_autopopulate_relation_check(self):
+#
+# class dummy(AutoPopulate):
+#
+# def populate_relation(self):
+# return None
+#
+# def _make_tuples(self, key):
+# pass
+#
+# du = dummy()
+# du.populate() \
+#
+# @raises(DataJointError)
+# def test_autopopulate_relation_check(self):
+# self.dummy1.populate()
+#
+# @raises(Exception)
+# def test_autopopulate_relation_check(self):
+# self.error_generator.populate()\
+#
+# @raises(Exception)
+# def test_autopopulate_relation_check2(self):
+# tmp = self.dummy2.populate(suppress_errors=True)
+# assert_equal(len(tmp), 1, 'Error list should have length 1.')
diff --git a/tests/test_relational_operand.py b/tests/test_relational_operand.py
index 3a25a87d0..3ceeb4964 100644
--- a/tests/test_relational_operand.py
+++ b/tests/test_relational_operand.py
@@ -1,47 +1,47 @@
-"""
-Collection of test cases to test relational methods
-"""
-
-__author__ = 'eywalker'
-
-
-def setup():
- """
- Setup
- :return:
- """
-
-class TestRelationalAlgebra(object):
-
- def setup(self):
- pass
-
- def test_mul(self):
- pass
-
- def test_project(self):
- pass
-
- def test_iand(self):
- pass
-
- def test_isub(self):
- pass
-
- def test_sub(self):
- pass
-
- def test_len(self):
- pass
-
- def test_fetch(self):
- pass
-
- def test_repr(self):
- pass
-
- def test_iter(self):
- pass
-
- def test_not(self):
- pass
\ No newline at end of file
+# """
+# Collection of test cases to test relational methods
+# """
+#
+# __author__ = 'eywalker'
+#
+#
+# def setup():
+# """
+# Setup
+# :return:
+# """
+#
+# class TestRelationalAlgebra(object):
+#
+# def setup(self):
+# pass
+#
+# def test_mul(self):
+# pass
+#
+# def test_project(self):
+# pass
+#
+# def test_iand(self):
+# pass
+#
+# def test_isub(self):
+# pass
+#
+# def test_sub(self):
+# pass
+#
+# def test_len(self):
+# pass
+#
+# def test_fetch(self):
+# pass
+#
+# def test_repr(self):
+# pass
+#
+# def test_iter(self):
+# pass
+#
+# def test_not(self):
+# pass
\ No newline at end of file