Skip to content

Commit

Permalink
Merge pull request #2 from zopefoundation/zope4-brains_and_extension_…
Browse files Browse the repository at this point in the history
…class

Rationalize brains vs. extension class conflicts.
  • Loading branch information
dataflake committed May 14, 2017
2 parents 7d22a30 + 594c633 commit d8e206a
Show file tree
Hide file tree
Showing 5 changed files with 256 additions and 211 deletions.
3 changes: 2 additions & 1 deletion setup.py
Expand Up @@ -37,13 +37,14 @@
install_requires=[
'setuptools',
'Zope2 >= 4.0dev',
'RestrictedPython >= 4.0dev',
'AccessControl >= 4.0dev',
'Persistence >= 3.0dev',
'Acquisition',
'DateTime',
'DocumentTemplate',
'ExtensionClass >= 4.1a1',
'Missing',
'Persistence',
'Record',
'transaction',
'zope.interface',
Expand Down
6 changes: 3 additions & 3 deletions src/Shared/DC/ZRDB/DA.py
Expand Up @@ -236,7 +236,7 @@ def getObject(module, name, reload=0,
"in module, '%s'." % (name, module))


class NoBrains:
class NoBrains(Base):
pass


Expand Down Expand Up @@ -720,9 +720,9 @@ def __call__(self, REQUEST=None, __ick__=None, src__=0, test__=0, **kw):
f=StringIO()
f.write(result)
f.seek(0)
result = File(f,brain,p, None)
result = File(f, brain, p)
else:
result = Results(result, brain, p, None)
result = Results(result, brain, p)
columns = result._searchable_result_columns()
if test__ and columns != self._col:
self._col=columns
Expand Down
257 changes: 142 additions & 115 deletions src/Shared/DC/ZRDB/RDB.py
Expand Up @@ -12,96 +12,116 @@
##############################################################################
'''Class for reading RDB files'''

from string import split, strip, lower, upper, atof, atoi, atol, find, join
import DateTime,re
import array
import re
import string

from Acquisition import Implicit
from DateTime import DateTime
from ExtensionClass import Base
from Missing import MV
from array import array
from Record import Record
from Acquisition import Implicit
import ExtensionClass

def parse_text(s):
if find(s,'\\') < 0 and (find(s,'\\t') < 0 and find(s,'\\n') < 0): return s
r=[]
for x in split(s,'\\\\'):
x=join(split(x,'\\n'),'\n')
r.append(join(split(x,'\\t'),'\t'))
return join(r,'\\')
if s.find('\\') < 0 and s.find('\\t') < 0 and s.find('\\n') < 0:
return s
r = []
for x in s.split('\\\\'):
x = '\n'.join(x.split('\\n'))
r.append('\t'.join(x.split('\\t')))
return '\\'.join(r)


Parsers = {
'n': string.atof,
'i': string.atoi,
'l': string.atol,
'd': DateTime,
't': parse_text,
}

Parsers={'n': atof,
'i': atoi,
'l': atol,
'd': DateTime.DateTime,
't': parse_text,
}
class SQLAlias(Base):

class SQLAlias(ExtensionClass.Base):
def __init__(self, name): self._n=name
def __of__(self, parent): return getattr(parent, self._n)
def __init__(self, name):
self._n = name

class NoBrains: pass
def __of__(self, parent):
return getattr(parent, self._n)

class DatabaseResults:
class NoBrains(Base):
pass

class DatabaseResults(object):
"""Class for reading RDB files
"""
_index=None
_index = None

# We need to allow access to not-explicitly-protected
# individual record objects contained in the result.
__allow_access_to_unprotected_subobjects__=1

def __init__(self,file,brains=NoBrains, parent=None, zbrains=None):
__allow_access_to_unprotected_subobjects__ = 1

self._file=file
readline=file.readline
line=readline()
self._parent=parent
if zbrains is None: zbrains=NoBrains
def __init__(self, file, brains=NoBrains, parent=None, zbrains=NoBrains):

self._file = file
readline = file.readline
line = readline()
self._parent = parent

while line and line.find('#') != -1 : line=readline()
while line and line.find('#') != -1 :
line = readline()

line=line[:-1]
if line and line[-1:] in '\r\n': line=line[:-1]
self._names=names=split(line,'\t')
if not names: raise ValueError, 'No column names'
line = line[:-1]
if line and line[-1:] in '\r\n':
line = line[:-1]
self._names = names = [name.strip() for name in line.split('\t')]
if not names:
raise ValueError, 'No column names'

aliases=[]
self._schema=schema={}
i=0
aliases = []
self._schema = schema = {}
i = 0
for name in names:
name=strip(name)
if not name:
raise ValueError, 'Empty column name, %s' % name

if schema.has_key(name):
raise ValueError, 'Duplicate column name, %s' % name
schema[name]=i
n=lower(name)
if n != name: aliases.append((n, SQLAlias(name)))
n=upper(name)
if n != name: aliases.append((n, SQLAlias(name)))
i=i+1

self._nv=nv=len(names)
line=readline()
line=line[:-1]
if line[-1:] in '\r\n': line=line[:-1]

self._defs=defs=split(line,'\t')
if not defs: raise ValueError, 'No column definitions'

schema[name] = i
i = i + 1

n = name.lower()
if n != name:
aliases.append((n, SQLAlias(name)))

n = name.upper()
if n != name:
aliases.append((n, SQLAlias(name)))

self._nv = nv = len(names)
line = readline()
line = line[:-1]
if line[-1:] in '\r\n':
line = line[:-1]

self._defs = defs = [_def.strip() for _def in line.split('\t')]

if not defs:
raise ValueError, 'No column definitions'

if len(defs) != nv:
raise ValueError, (
"""The number of column names and the number of column
definitions are different.""")

i=0
self._parsers=parsers=[]
defre=re.compile(r'([0-9]*)([a-zA-Z])?')
self._data_dictionary=dd={}
self.__items__=items=[]
i = 0
self._parsers = parsers = []
defre = re.compile(r'([0-9]*)([a-zA-Z])?')
self._data_dictionary = dd = {}
self.__items__ = items = []

for _def in defs:
_def=strip(_def)

if not _def:
raise ValueError, ('Empty column definition for %s' % names[i])

Expand All @@ -110,99 +130,106 @@ def __init__(self,file,brains=NoBrains, parent=None, zbrains=None):
raise ValueError, (
'Invalid column definition for, %s, for %s'
% _def, names[i])

type = mo.group(2).lower()
width = mo.group(1)
if width: width=atoi(width)
else: width=8

try: parser=Parsers[type]
except: parser=str
if width:
width = string.atoi(width)
else:
width = 8

name=names[i]
d={'name': name, 'type': type, 'width': width, 'parser': parser}
parser = Parsers.get(type, str)

name = names[i]
d = {'name': name, 'type': type, 'width': width, 'parser': parser}
items.append(d)
dd[name]=d
dd[name] = d

parsers.append((i,parser))
i=i+1
i += 1

# Create a record class to hold the records.
names=tuple(names)
names = tuple(names)

class r(Record, Implicit, brains, zbrains):
'Result record class'

r.__record_schema__=schema
for k in filter(lambda k: k[:2]=='__', Record.__dict__.keys()):
setattr(r,k,getattr(Record,k))
for k, v in Record.__dict__.items():
if k[:2] == '__':
setattr(r, k, v)

r.__record_schema__ = schema

# Add SQL Aliases
for k, v in aliases:
if not hasattr(r,k):
if getattr(r, k, self) is self:
setattr(r, k, v)

if hasattr(brains, '__init__'):
binit=brains.__init__
if hasattr(binit,'im_func'): binit=binit.im_func
def __init__(self, data, parent, binit=binit):
Record.__init__(self,data)
binit(self.__of__(parent))

setattr(r, '__init__', __init__)

self._class=r
self._class = r

# OK, we've read meta data, now get line indexes

p=file.tell()
save=self._lines=array('i')
save=save.append
l=readline()
p = file.tell()
save = self._lines = array.array('i')
save = save.append
l = readline()
while l:
save(p)
p=p+len(l)
l=readline()
p += len(l)
l = readline()

def _searchable_result_columns(self):
return self.__items__

def _searchable_result_columns(self): return self.__items__
def names(self): return self._names
def data_dictionary(self): return self._data_dictionary
def names(self):
return self._names

def __len__(self): return len(self._lines)
def data_dictionary(self):
return self._data_dictionary

def __len__(self):
return len(self._lines)

def __getitem__(self,index):
if index==self._index: return self._row
file=self._file
if index == self._index:
return self._row
file = self._file
file.seek(self._lines[index])
line=file.readline()
line=line[:-1]
if line and line[-1:] in '\r\n': line=line[:-1]
fields=split(line,'\t')
l=len(fields)
nv=self._nv
line = file.readline()
line = line[:-1]
if line and line[-1:] in '\r\n':
line = line[:-1]
fields = [field.strip() for field in line.split('\t')]
l = len(fields)
nv = self._nv
if l != nv:
if l < nv:
fields=fields+['']*(nv-l)
fields = fields+['']*(nv-l)
else:
raise ValueError, (
"""The number of items in record %s is invalid
<pre>%s\n%s\n%s\n%s</pre>
"""
% (index, ('='*40), line, ('='*40), fields))
% (index, ('=' * 40), line, ('=' * 40), fields))
for i, parser in self._parsers:
try: v=parser(fields[i])
try:
v = parser(fields[i])
except:
if fields[i]:
raise ValueError, (
"""Invalid value, %s, for %s in record %s"""
% (fields[i], self._names[i], index))
else: v=MV
fields[i]=v

parent=self._parent
fields=self._class(fields, parent)
self._index=index
self._row=fields
if parent is None: return fields
return fields.__of__(parent)

File=DatabaseResults
else:
v = MV
fields[i] = v

parent = self._parent
record = self._class(fields, parent)
self._index = index
self._row = record
if parent is None:
return record
return record.__of__(parent)

File = DatabaseResults

0 comments on commit d8e206a

Please sign in to comment.