Browse files

Add a blank line before and after functions (PEP8 E301).

E301: expected 1 blank line, found 0
  • Loading branch information...
1 parent 4d2350a commit bfa8b2564e67b537105c54778ad8d453e87a4dc6 @cbrueffer cbrueffer committed with peterjc Dec 5, 2012
Showing with 181 additions and 1 deletion.
  1. +1 −0 Bio/Alphabet/__init__.py
  2. +4 −0 Bio/Application/__init__.py
  3. +3 −0 Bio/Blast/NCBIStandalone.py
  4. +9 −0 Bio/Blast/ParseBlastTable.py
  5. +2 −0 Bio/Blast/Record.py
  6. +1 −0 Bio/Data/CodonTable.py
  7. +1 −0 Bio/DocSQL.py
  8. +6 −0 Bio/Entrez/Parser.py
  9. +4 −0 Bio/ExPASy/ScanProsite.py
  10. +7 −1 Bio/FSSP/__init__.py
  11. +1 −0 Bio/FSSP/fssp_rec.py
  12. +1 −0 Bio/File.py
  13. +1 −0 Bio/GenBank/utils.py
  14. +3 −0 Bio/Index.py
  15. +7 −0 Bio/KEGG/Compound/__init__.py
  16. +17 −0 Bio/KEGG/Enzyme/__init__.py
  17. +2 −0 Bio/MarkovModel.py
  18. +2 −0 Bio/Nexus/Nexus.py
  19. +1 −0 Bio/Nexus/Trees.py
  20. +2 −0 Bio/ParserSupport.py
  21. +8 −0 Bio/Phylo/BaseTree.py
  22. +1 −0 Bio/Phylo/NewickIO.py
  23. +1 −0 Bio/Phylo/NexusIO.py
  24. +3 −0 Bio/Phylo/_utils.py
  25. +4 −0 Bio/PopGen/FDist/Utils.py
  26. +17 −0 Bio/PopGen/GenePop/Controller.py
  27. +3 −0 Bio/Restriction/_Update/RestrictionCompiler.py
  28. +2 −0 Bio/Search.py
  29. +2 −0 Bio/SearchIO/__init__.py
  30. +1 −0 Bio/Seq.py
  31. +6 −0 Bio/SeqFeature.py
  32. +2 −0 Bio/SeqIO/__init__.py
  33. +8 −0 Bio/_py3k/__init__.py
  34. +3 −0 Bio/pairwise2.py
  35. +1 −0 BioSQL/BioSeq.py
  36. +3 −0 Scripts/debug/debug_blast_parser.py
  37. +1 −0 Scripts/xbbtools/nextorf.py
  38. +1 −0 Tests/test_ColorSpiral.py
  39. +1 −0 Tests/test_Emboss.py
  40. +2 −0 Tests/test_PDB.py
  41. +2 −0 Tests/test_PDB_KDTree.py
  42. +6 −0 Tests/test_PhyloXML.py
  43. +6 −0 Tests/test_SearchIO_model.py
  44. +8 −0 Tests/test_SeqIO_FastaIO.py
  45. +2 −0 Tests/test_SeqIO_QualityIO.py
  46. +5 −0 Tests/test_SeqIO_convert.py
  47. +1 −0 Tests/test_SeqIO_features.py
  48. +3 −0 Tests/test_SffIO.py
  49. +2 −0 Tests/test_Tutorial.py
  50. +1 −0 setup.py
View
1 Bio/Alphabet/__init__.py
@@ -125,6 +125,7 @@ def __init__(self, alphabet, new_letters):
self.letters = alphabet.letters + new_letters
else:
self.letters = None
+
def __getattr__(self, key):
if key[:2] == "__" and key[-2:] == "__":
raise AttributeError(key)
View
4 Bio/Application/__init__.py
@@ -192,13 +192,17 @@ def __init__(self, cmd, **kwargs):
"an argument or property name due to the "
"way the AbstractCommandline class works"
% repr(name))
+
#Beware of binding-versus-assignment confusion issues
def getter(name):
return lambda x : x._get_parameter(name)
+
def setter(name):
return lambda x, value : x.set_parameter(name, value)
+
def deleter(name):
return lambda x : x._clear_parameter(name)
+
doc = p.description
if isinstance(p, _Switch):
doc += "\n\nThis property controls the addition of the %s " \
View
3 Bio/Blast/NCBIStandalone.py
@@ -1210,6 +1210,7 @@ def frame(self, line):
# line below modified by Yair Benita, Sep 2004
# Note that the colon is not always present. 2006
_query_re = re.compile(r"Query(:?) \s*(\d+)\s*(.+) (\d+)")
+
def query(self, line):
m = self._query_re.search(line)
if m is None:
@@ -1243,6 +1244,7 @@ def align(self, line):
# To match how we do the query, cache the regular expression.
# Note that the colon is not always present.
_sbjct_re = re.compile(r"Sbjct(:?) \s*(\d+)\s*(.+) (\d+)")
+
def sbjct(self, line):
m = self._sbjct_re.search(line)
if m is None:
@@ -2126,6 +2128,7 @@ def _security_check_parameters(param_dict):
class _BlastErrorConsumer(_BlastConsumer):
def __init__(self):
_BlastConsumer.__init__(self)
+
def noevent(self, line):
if 'Query must be at least wordsize' in line:
raise ShortQueryBlastError("Query must be at least wordsize")
View
9 Bio/Blast/ParseBlastTable.py
@@ -31,6 +31,7 @@ def __init__(self):
self.query = None
self.database = None
self.entries = []
+
def add_entry(self, entry):
self.entries.append(entry)
@@ -49,6 +50,7 @@ def __init__(self, handle):
self._lookahead = inline
self._n = 0
self._in_header = 1
+
def next(self):
self.table_record = BlastTableRec()
self._n += 1
@@ -73,28 +75,35 @@ def next(self):
def _consume_entry(self, inline):
current_entry = BlastTableEntry(inline)
self.table_record.add_entry(current_entry)
+
def _consume_header(self, inline):
for keyword in reader_keywords:
if keyword in inline:
in_header = self._Parse('_parse_%s' % reader_keywords[keyword],inline)
break
return in_header
+
def _parse_version(self, inline):
program, version, date = inline.split()[1:]
self.table_record.program = program
self.table_record.version = version
self.table_record.date = date
return 1
+
def _parse_iteration(self, inline):
self.table_record.iteration = int(inline.split()[2])
return 1
+
def _parse_query(self, inline):
self.table_record.query = inline.split()[2:]
return 1
+
def _parse_database(self, inline):
self.table_record.database = inline.split()[2]
return 1
+
def _parse_fields(self, inline):
return 0
+
def _Parse(self, method_name, inline):
return getattr(self,method_name)(inline)
View
2 Bio/Blast/Record.py
@@ -68,6 +68,7 @@ def __init__(self):
self.bits = None
self.e = None
self.num_alignments = None
+
def __str__(self):
return "%-66s %5s %s" % (self.title, self.score, self.e)
@@ -88,6 +89,7 @@ def __init__(self):
self.hit_def = ''
self.length = None
self.hsps = []
+
def __str__(self):
lines = self.title.split('\n')
lines.append("Length = %s\n" % self.length)
View
1 Bio/Data/CodonTable.py
@@ -46,6 +46,7 @@ class CodonTable(object):
back_table = {} # for back translations
start_codons = []
stop_codons = []
+
# Not always called from derived classes!
def __init__(self, nucleotide_alphabet = nucleotide_alphabet,
protein_alphabet = protein_alphabet,
View
1 Bio/DocSQL.py
@@ -140,6 +140,7 @@ def next(self):
class QuerySingle(Query, QueryRow):
ignore_warnings = 0
+
def __init__(self, *args, **keywds):
message = self.MSG_FAILURE
Query.__init__(self, *args, **keywds)
View
6 Bio/Entrez/Parser.py
@@ -98,11 +98,13 @@ def __init__(self, keys):
for key in keys:
dict.__setitem__(self, key, [])
self.listkeys = keys
+
def __setitem__(self, key, value):
if key in self.listkeys:
self[key].append(value)
else:
dict.__setitem__(self, key, value)
+
def __repr__(self):
text = dict.__repr__(self)
try:
@@ -115,13 +117,15 @@ def __repr__(self):
class NotXMLError(ValueError):
def __init__(self, message):
self.msg = message
+
def __str__(self):
return "Failed to parse the XML data (%s). Please make sure that the input data are in XML format." % self.msg
class CorruptedXMLError(ValueError):
def __init__(self, message):
self.msg = message
+
def __str__(self):
return "Failed to parse the XML data (%s). Please make sure that the input data are not corrupted." % self.msg
@@ -130,6 +134,7 @@ class ValidationError(ValueError):
"""Validating parsers raise this error if the parser finds a tag in the XML that is not defined in the DTD. Non-validating parsers do not raise this error. The Bio.Entrez.read and Bio.Entrez.parse functions use validating parsers by default (see those functions for more information)"""
def __init__(self, name):
self.name = name
+
def __str__(self):
return "Failed to find tag '%s' in the DTD. To skip all tags that are not represented in the DTD, please call Bio.Entrez.read or Bio.Entrez.parse with validate=False." % self.name
@@ -396,6 +401,7 @@ def elementDecl(self, name, model):
# The 'count' function is called recursively to make sure all the
# children in this model are counted. Error keys are ignored;
# they raise an exception in Python.
+
def count(model):
quantifier, name, children = model[1:]
if name is None:
View
4 Bio/ExPASy/ScanProsite.py
@@ -82,8 +82,10 @@ class ContentHandler(handler.ContentHandler):
"signature_ac",
"level",
"level_tag")
+
def __init__(self):
self.element = []
+
def startElement(self, name, attrs):
self.element.append(name)
self.content = ""
@@ -94,6 +96,7 @@ def startElement(self, name, attrs):
elif self.element==["matchset", "match"]:
match = {}
self.record.append(match)
+
def endElement(self, name):
assert name==self.element.pop()
name = str(name)
@@ -106,5 +109,6 @@ def endElement(self, name):
else:
# Unknown type, treat it as a string
match[name] = self.content
+
def characters(self, content):
self.content += content
View
8 Bio/FSSP/__init__.py
@@ -42,6 +42,7 @@ def __init__(self):
self.author = []
self.seqlength = 0
self.nalign = 0
+
def fill_header(self,inline):
for i in header_records:
if header_records[i].match(inline):
@@ -141,9 +142,11 @@ def __init__(self,in_fff_rec):
self.turn5 = in_fff_rec[fssp_rec.align.turn5]
self.pos_align_dict = {}
self.PosAlignList = []
+
def add_align_list(self,align_list):
for i in align_list:
self.PosAlignList.append(PosAlign(i))
+
def pos_align_list2dict(self):
j = 1
for i in self.PosAlignList:
@@ -159,20 +162,23 @@ def __init__(self):
self.pdb_res_dict = {}
self.abs_res_dict = {}
self.data = {}
+
def build_resnum_list(self):
for i in self:
self.abs_res_dict[self[i].abs_res_num] = i
self.pdb_res_dict[self[i].pdb_res_num] = i
+
# Given an absolute residue number & chain, returns the relevant fssp
# record
def abs(self,num):
return self[self.abs_res_dict[num]]
+
# Given an PDB residue number & chain, returns the relevant fssp
# record
def pdb(self,num):
return self[self.pdb_res_dict[num]]
- # Returns a sequence string
+ # Returns a sequence string
def sequence(self,num):
s = ''
sorted_pos_nums = self.abs_res_dict.keys()
View
1 Bio/FSSP/fssp_rec.py
@@ -2,6 +2,7 @@
class fff_rec:
def __init__(self,inrec=''):
self.data = inrec
+
def __repr__(self):
return str(self.data)
__str__ = __repr__
View
1 Bio/File.py
@@ -190,6 +190,7 @@ class MyParser(sgmllib.SGMLParser):
def __init__(self):
sgmllib.SGMLParser.__init__(self)
self.data = ''
+
def handle_data(self, data):
self.data = self.data + data
View
1 Bio/GenBank/utils.py
@@ -21,6 +21,7 @@ class FeatureValueCleaner(object):
the case with /notes).
"""
keys_to_process = ["translation"]
+
def __init__(self, to_process = keys_to_process):
"""Initialize with the keys we should deal with.
"""
View
3 Bio/Index.py
@@ -101,12 +101,15 @@ def __init__(self, indexname, truncate=None):
def update(self, dict):
self.__changed = 1
dict.update(self, dict)
+
def __setitem__(self, key, value):
self.__changed = 1
dict.__setitem__(self, key, value)
+
def __delitem__(self, key):
self.__changed = 1
dict.__delitem__(self, key)
+
def clear(self):
self.__changed = 1
dict.clear(self)
View
7 Bio/KEGG/Compound/__init__.py
@@ -55,6 +55,7 @@ def __init__(self):
self.enzyme = []
self.structures = []
self.dblinks = []
+
def __str__(self):
"""__str__(self)
@@ -69,13 +70,16 @@ def __str__(self):
self._structures() + \
self._dblinks() + \
"///"
+
def _entry(self):
return _write_kegg("ENTRY",
[self.entry])
+
def _name(self):
return _write_kegg("NAME",
[_wrap_kegg(l, wrap_rule = name_wrap) \
for l in self.name])
+
def _formula(self):
return _write_kegg("FORMULA",
[self.formula])
@@ -91,6 +95,7 @@ def _pathway(self):
return _write_kegg("PATHWAY",
[_wrap_kegg(l, wrap_rule = id_wrap(16)) \
for l in s])
+
def _enzyme(self):
s = ""
for entry in self.enzyme:
@@ -101,13 +106,15 @@ def _enzyme(self):
s = s + t.ljust(16)
return _write_kegg("ENZYME",
[_wrap_kegg(s, wrap_rule = id_wrap(0))])
+
def _structures(self):
s = []
for entry in self.structures:
s.append(entry[0] + ": " + " ".join(entry[1]) + " ")
return _write_kegg("STRUCTURES",
[_wrap_kegg(l, wrap_rule = struct_wrap(5)) \
for l in s])
+
def _dblinks(self):
s = []
for entry in self.dblinks:
View
17 Bio/KEGG/Enzyme/__init__.py
@@ -74,6 +74,7 @@ def __init__(self):
self.disease = []
self.structures = []
self.dblinks = []
+
def __str__(self):
"""__str__(self)
@@ -96,76 +97,92 @@ def __str__(self):
self._structures() + \
self._dblinks() + \
"///"
+
def _entry(self):
return _write_kegg("ENTRY",
["EC " + self.entry])
+
def _name(self):
return _write_kegg("NAME",
[_wrap_kegg(l, wrap_rule = name_wrap) \
for l in self.name])
+
def _classname(self):
return _write_kegg("CLASS",
self.classname)
+
def _sysname(self):
return _write_kegg("SYSNAME",
[_wrap_kegg(l, wrap_rule = name_wrap) \
for l in self.sysname])
+
def _reaction(self):
return _write_kegg("REACTION",
[_wrap_kegg(l, wrap_rule = rxn_wrap) \
for l in self.reaction])
+
def _substrate(self):
return _write_kegg("SUBSTRATE",
[_wrap_kegg(l, wrap_rule = name_wrap) \
for l in self.substrate])
+
def _product(self):
return _write_kegg("PRODUCT",
[_wrap_kegg(l, wrap_rule = name_wrap) \
for l in self.product])
+
def _inhibitor(self):
return _write_kegg("INHIBITOR",
[_wrap_kegg(l, wrap_rule = name_wrap) \
for l in self.inhibitor])
+
def _cofactor(self):
return _write_kegg("COFACTOR",
[_wrap_kegg(l, wrap_rule = name_wrap) \
for l in self.cofactor])
+
def _effector(self):
return _write_kegg("EFFECTOR",
[_wrap_kegg(l, wrap_rule = name_wrap) \
for l in self.effector])
+
def _comment(self):
return _write_kegg("COMMENT",
[_wrap_kegg(l, wrap_rule = id_wrap(0)) \
for l in self.comment])
+
def _pathway(self):
s = []
for entry in self.pathway:
s.append(entry[0] + ": " + entry[1] + " " + entry[2])
return _write_kegg("PATHWAY",
[_wrap_kegg(l, wrap_rule = id_wrap(16)) \
for l in s])
+
def _genes(self):
s = []
for entry in self.genes:
s.append(entry[0] + ": " + " ".join(entry[1]))
return _write_kegg("GENES",
[_wrap_kegg(l, wrap_rule = id_wrap(5)) \
for l in s])
+
def _disease(self):
s = []
for entry in self.disease:
s.append(entry[0] + ": " + entry[1] + " " + entry[2])
return _write_kegg("DISEASE",
[_wrap_kegg(l, wrap_rule = id_wrap(13)) \
for l in s])
+
def _structures(self):
s = []
for entry in self.structures:
s.append(entry[0] + ": " + " ".join(entry[1]) + " ")
return _write_kegg("STRUCTURES",
[_wrap_kegg(l, wrap_rule = struct_wrap(5)) \
for l in s])
+
def _dblinks(self):
# This is a bit of a cheat that won't work if enzyme entries
# have more than one link id per db id. For now, that's not
View
2 Bio/MarkovModel.py
@@ -27,6 +27,7 @@
# this module.
import warnings
warnings.warn("For optimal speed, please update to Numpy version 1.3 or later (current version is %s)" % numpy.__version__)
+
def logaddexp(logx, logy):
if logy - logx > 100:
return logy
@@ -58,6 +59,7 @@ def __init__(self, states, alphabet,
self.p_initial = p_initial
self.p_transition = p_transition
self.p_emission = p_emission
+
def __str__(self):
import StringIO
handle = StringIO.StringIO()
View
2 Bio/Nexus/Nexus.py
@@ -558,9 +558,11 @@ def __init__(self, input=None):
def get_original_taxon_order(self):
"""Included for backwards compatibility (DEPRECATED)."""
return self.taxlabels
+
def set_original_taxon_order(self,value):
"""Included for backwards compatibility (DEPRECATED)."""
self.taxlabels=value
+
original_taxon_order=property(get_original_taxon_order,set_original_taxon_order)
def read(self,input):
View
1 Bio/Nexus/Trees.py
@@ -292,6 +292,7 @@ def is_preterminal(self,node):
return False not in [self.is_terminal(n) for n in self.node(node).succ]
else:
return False
+
def count_terminals(self,node=None):
"""Counts the number of terminal nodes that are attached to a node."""
if node is None:
View
2 Bio/ParserSupport.py
@@ -78,8 +78,10 @@ class AbstractConsumer(object):
"""
def _unhandled_section(self):
pass
+
def _unhandled(self, data):
pass
+
def __getattr__(self, attr):
if attr[:6] == 'start_' or attr[:4] == 'end_':
method = self._unhandled_section
View
8 Bio/Phylo/BaseTree.py
@@ -352,6 +352,7 @@ def get_path(self, target=None, **kwargs):
# Only one path will work -- ignore weights and visits
path = []
match = _combine_matchers(target, kwargs, True)
+
def check_in_path(v):
if match(v):
path.append(v)
@@ -363,6 +364,7 @@ def check_in_path(v):
path.append(v)
return True
return False
+
if not check_in_path(self.root):
return None
return path[-2::-1]
@@ -435,11 +437,13 @@ def depths(self, unit_branch_lengths=False):
else:
depth_of = lambda c: c.branch_length or 0
depths = {}
+
def update_depths(node, curr_depth):
depths[node] = curr_depth
for child in node.clades:
new_depth = curr_depth + depth_of(child)
update_depths(child, new_depth)
+
update_depths(self.root, self.root.branch_length or 0)
return depths
@@ -925,6 +929,7 @@ def __str__(self):
"""
TAB = ' '
textlines = []
+
def print_tree(obj, indent):
"""Recursively serialize sub-elements.
@@ -940,6 +945,7 @@ def print_tree(obj, indent):
for elem in child:
if isinstance(elem, TreeElement):
print_tree(elem, indent)
+
print_tree(self, 0)
return '\n'.join(textlines)
@@ -1106,8 +1112,10 @@ def from_hex(cls, hexstr):
hexstr.startswith('#') and
len(hexstr) == 7
), "need a 24-bit hexadecimal string, e.g. #000000"
+
def unpack(cc):
return int('0x'+cc, base=16)
+
RGB = hexstr[1:3], hexstr[3:5], hexstr[5:]
return cls(*map(unpack, RGB))
View
1 Bio/Phylo/NewickIO.py
@@ -179,6 +179,7 @@ def to_strings(self, confidence_as_branch_length=False,
make_info_string = self._info_factory(plain,
confidence_as_branch_length, branch_length_only, max_confidence,
format_confidence, format_branch_length)
+
def newickize(clade):
"""Convert a node tree to a Newick tree string, recursively."""
if clade.is_terminal(): #terminal
View
1 Bio/Phylo/NexusIO.py
@@ -37,6 +37,7 @@ def parse(handle):
eventually change Nexus to use the new NewickIO parser directly.)
"""
nex = Nexus.Nexus(handle)
+
# NB: Once Nexus.Trees is modified to use Tree.Newick objects, do this:
# return iter(nex.trees)
# Until then, convert the Nexus.Trees.Tree object hierarchy:
View
3 Bio/Phylo/_utils.py
@@ -209,12 +209,14 @@ def get_col_positions(tree):
def get_row_positions(tree):
positions = dict((taxon, 2*idx) for idx, taxon in enumerate(taxa))
+
def calc_row(clade):
for subclade in clade:
if subclade not in positions:
calc_row(subclade)
positions[clade] = (positions[clade.clades[0]] +
positions[clade.clades[-1]]) / 2
+
calc_row(tree.root)
return positions
@@ -347,6 +349,7 @@ def get_y_positions(tree):
# Rows are defined by the tips
heights = dict((tip, maxheight - i)
for i, tip in enumerate(reversed(tree.get_terminals())))
+
# Internal nodes: place at midpoint of children
def calc_row(clade):
for subclade in clade:
View
4 Bio/PopGen/FDist/Utils.py
@@ -86,11 +86,13 @@ def _convert_genepop_to_fdist_big(gp_rec, report_pops = None):
pops = []
work_rec = FileParser.read(gp_rec.fname)
lParser = work_rec.get_individual()
+
def init_pop():
my_pop = []
for i in range(num_loci):
my_pop.append({})
return my_pop
+
curr_pop = init_pop()
num_pops = 1
if report_pops:
@@ -166,11 +168,13 @@ def countPops(rec):
lParser = work_rec.get_individual()
#here we go again (necessary...)
alleles.sort()
+
def process_pop(pop_data, alleles, allele_counts):
allele_array = [] #We need the same order as in alleles
for allele in alleles:
allele_array.append(allele_counts.get(allele, 0))
pop_data.append(allele_array)
+
lParser = work_rec2.get_individual()
allele_counts = {}
for allele in alleles:
View
17 Bio/PopGen/GenePop/Controller.py
@@ -272,8 +272,10 @@ def _test_pop_hz_both(self, fname, type, ext, enum_test = True,
opts = self._get_opts(dememorization, batches, iterations, enum_test)
self._run_genepop([ext], [1, type], fname, opts)
f = open(fname + ext)
+
def hw_func(self):
return _hw_func(self.stream, False)
+
return _FileIterator(hw_func, f, fname + ext)
def _test_global_hz_both(self, fname, type, ext, enum_test = True,
@@ -294,8 +296,10 @@ def _test_global_hz_both(self, fname, type, ext, enum_test = True,
"""
opts = self._get_opts(dememorization, batches, iterations, enum_test)
self._run_genepop([ext], [1, type], fname, opts)
+
def hw_pop_func(self):
return _read_table(self.stream, [str, _gp_float, _gp_float, _gp_float])
+
f1 = open(fname + ext)
l = f1.readline()
while "by population" not in l:
@@ -367,10 +371,13 @@ def test_pop_hz_prob(self, fname, ext, enum_test = False,
"""
opts = self._get_opts(dememorization, batches, iterations, enum_test)
self._run_genepop([ext], [1, 3], fname, opts)
+
def hw_prob_loci_func(self):
return _hw_func(self.stream, True, True)
+
def hw_prob_pop_func(self):
return _hw_func(self.stream, False, True)
+
shutil.copyfile(fname+".P", fname+".P2")
f1 = open(fname + ".P")
f2 = open(fname + ".P2")
@@ -420,6 +427,7 @@ def test_ld(self, fname,
dememorization = 10000, batches = 20, iterations = 5000):
opts = self._get_opts(dememorization, batches, iterations)
self._run_genepop([".DIS"], [2, 1], fname, opts)
+
def ld_pop_func(self):
current_pop = None
l = self.stream.readline().rstrip()
@@ -437,6 +445,7 @@ def ld_pop_func(self):
return current_pop, pop, (locus1, locus2), None
p, se, switches = _gp_float(toks[3]), _gp_float(toks[4]), _gp_int(toks[5])
return current_pop, pop, (locus1, locus2), (p, se, switches)
+
def ld_func(self):
l = self.stream.readline().rstrip()
if l == "":
@@ -449,6 +458,7 @@ def ld_func(self):
except ValueError:
return (locus1, locus2), None
return (locus1, locus2), (chi2, df, p)
+
f1 = open(fname + ".DIS")
l = f1.readline()
while "----" not in l:
@@ -560,6 +570,7 @@ def calc_allele_genotype_freqs(self, fname):
# num_loci = _gp_int(m.group(1))
# l = f.readline()
#f.close()
+
def pop_parser(self):
if hasattr(self, "old_line"):
l = self.old_line
@@ -638,6 +649,7 @@ def pop_parser(self):
freq_fis, overall_fis
self.done = True
raise StopIteration
+
def locus_parser(self):
l = self.stream.readline()
while l != "":
@@ -669,6 +681,7 @@ def _calc_diversities_fis(self, fname, ext):
avg_Qintra = _read_table(f, [str, _gp_float])
l = f.readline()
f.close()
+
def fis_func(self):
l = self.stream.readline()
while l != "":
@@ -688,6 +701,7 @@ def fis_func(self):
l = self.stream.readline()
self.done = True
raise StopIteration
+
dvf = open(fname + ext)
return _FileIterator(fis_func, dvf, fname + ext), avg_fis, avg_Qintra
@@ -736,6 +750,7 @@ def calc_fst_all(self, fname):
l = f.readline()
f.close()
f = open(fname + ".FST")
+
def proc(self):
if hasattr(self, "last_line"):
l = self.last_line
@@ -786,6 +801,7 @@ def calc_fst_pair(self, fname):
avg_fst = _read_headed_triangle_matrix(f)
l = f.readline()
f.close()
+
def loci_func(self):
l = self.stream.readline()
while l != "":
@@ -798,6 +814,7 @@ def loci_func(self):
l = self.stream.readline()
self.done = True
raise StopIteration
+
stf = open(fname + ".ST2")
os.remove(fname + ".MIG")
return _FileIterator(loci_func, stf, fname + ".ST2"), avg_fst
View
3 Bio/Restriction/_Update/RestrictionCompiler.py
@@ -333,11 +333,14 @@ def buildtype(self):
'ovhg':None,'ovhgseq':None})
elif t == OneCut:
dct.update({'scd5':None, 'scd3':None})
+
class klass(type):
def __new__(cls):
return type.__new__(cls, 'type%i'%n,ty,dct)
+
def __init__(cls):
super(klass, cls).__init__('type%i'%n,ty,dct)
+
yield klass()
n+=1
View
2 Bio/Search.py
@@ -83,8 +83,10 @@ def __init__(self, length, identical, positives, gaps):
self.identical = identical
self.positives = positives
self.gaps = gaps
+
def __len__(self):
return self.length
+
def __getattr__(self, name):
if name == "frac_identical":
return float(self.identical) / self.length
View
2 Bio/SearchIO/__init__.py
@@ -550,12 +550,14 @@ def index_db(index_filename, filenames=None, format=None,
from Bio.File import _SQLiteManySeqFilesDict
repr = "SearchIO.index_db(%r, filenames=%r, format=%r, key_function=%r, ...)" \
% (index_filename, filenames, format, key_function)
+
def proxy_factory(format, filename=None):
"""Given a filename returns proxy object, else boolean if format OK."""
if filename:
return get_processor(format, _INDEXER_MAP)(filename, **kwargs)
else:
return format in _INDEXER_MAP
+
return _SQLiteManySeqFilesDict(index_filename, filenames,
proxy_factory, format,
key_function, repr)
View
1 Bio/Seq.py
@@ -146,6 +146,7 @@ def __repr__(self):
return "%s(%s, %s)" % (self.__class__.__name__,
repr(self._data),
repr(self.alphabet))
+
def __str__(self):
"""Returns the full sequence as a python string, use str(my_seq).
View
6 Bio/SeqFeature.py
@@ -153,6 +153,7 @@ def __init__(self, location = None, type = '', location_operator = '',
def _get_strand(self):
return self.location.strand
+
def _set_strand(self, value):
try:
self.location.strand = value
@@ -162,6 +163,7 @@ def _set_strand(self, value):
raise ValueError("Can't set strand without a location.")
else:
raise
+
strand = property(fget = _get_strand, fset = _set_strand,
doc = """Feature's strand
@@ -170,6 +172,7 @@ def _set_strand(self, value):
def _get_ref(self):
return self.location.ref
+
def _set_ref(self, value):
try:
self.location.ref = value
@@ -187,6 +190,7 @@ def _set_ref(self, value):
def _get_ref_db(self):
return self.location.ref_db
+
def _set_ref_db(self, value):
self.location.ref_db = value
ref_db = property(fget = _get_ref_db, fset = _set_ref_db,
@@ -619,11 +623,13 @@ def __init__(self, start, end, strand=None, ref=None, ref_db=None):
def _get_strand(self):
return self._strand
+
def _set_strand(self, value):
if value not in [+1, -1, 0, None]:
raise ValueError("Strand should be +1, -1, 0 or None, not %r" \
% value)
self._strand = value
+
strand = property(fget = _get_strand, fset = _set_strand,
doc = "Strand of the location (+1, -1, 0 or None).")
View
2 Bio/SeqIO/__init__.py
@@ -878,12 +878,14 @@ def index_db(index_filename, filenames=None, format=None, alphabet=None,
from Bio.File import _SQLiteManySeqFilesDict
repr = "SeqIO.index_db(%r, filenames=%r, format=%r, alphabet=%r, key_function=%r)" \
% (index_filename, filenames, format, alphabet, key_function)
+
def proxy_factory(format, filename=None):
"""Given a filename returns proxy object, else boolean if format OK."""
if filename:
return _FormatToRandomAccess[format](filename, format, alphabet)
else:
return format in _FormatToRandomAccess
+
return _SQLiteManySeqFilesDict(index_filename, filenames,
proxy_factory, format,
key_function, repr)
View
8 Bio/_py3k/__init__.py
@@ -43,6 +43,7 @@ def _is_int_or_long(i):
return isinstance(i, int)
import io
+
def _binary_to_string_handle(handle):
"""Treat a binary (bytes) handle like a text (unicode) handle."""
#See also http://bugs.python.org/issue5628
@@ -54,19 +55,26 @@ def _binary_to_string_handle(handle):
class EvilHandleHack(object):
def __init__(self, handle):
self._handle = handle
+
def read(self, length=None):
return _as_string(self._handle.read(length))
+
def readline(self):
return _as_string(self._handle.readline())
+
def __iter__(self):
for line in self._handle:
yield _as_string(line)
+
def close(self):
return self._handle.close()
+
def seek(self, pos):
return self._handle.seek(pos)
+
def tell(self):
return self._handle.tell(pos)
+
return EvilHandleHack(handle)
#On Python 3, can depend on OrderedDict being present:
View
3 Bio/pairwise2.py
@@ -764,6 +764,7 @@ class identity_match:
def __init__(self, match=1, mismatch=0):
self.match = match
self.mismatch = mismatch
+
def __call__(self, charA, charB):
if charA == charB:
return self.match
@@ -783,6 +784,7 @@ class dictionary_match:
def __init__(self, score_dict, symmetric=1):
self.score_dict = score_dict
self.symmetric = symmetric
+
def __call__(self, charA, charB):
if self.symmetric and (charA, charB) not in self.score_dict:
# If the score dictionary is symmetric, then look up the
@@ -801,6 +803,7 @@ def __init__(self, open, extend, penalize_extend_when_opening=0):
raise ValueError("Gap penalties should be non-positive.")
self.open, self.extend = open, extend
self.penalize_extend_when_opening = penalize_extend_when_opening
+
def __call__(self, index, length):
return calc_affine_penalty(
length, self.open, self.extend, self.penalize_extend_when_opening)
View
1 BioSQL/BioSeq.py
@@ -105,6 +105,7 @@ def tostring(self):
return self.adaptor.get_subseq_as_string(self.primary_id,
self.start,
self.start + self._length)
+
def __str__(self):
"""Returns the full sequence as a python string."""
return self.adaptor.get_subseq_as_string(self.primary_id,
View
3 Scripts/debug/debug_blast_parser.py
@@ -41,11 +41,14 @@ def __init__(self, decorated=None):
decorated = ParserSupport.AbstractConsumer()
self.decorated = decorated
self._prev_attr = None
+
def _decorated_section(self):
getattr(self.decorated, self._prev_attr)()
+
def _decorated(self, data):
getattr(self.decorated, self._prev_attr)(data)
self.linenum += 1
+
def __getattr__(self, attr):
self._prev_attr = attr
if attr.startswith('start_') or attr.startswith('end_'):
View
1 Scripts/xbbtools/nextorf.py
@@ -30,6 +30,7 @@ class ProteinX(Alphabet.ProteinAlphabet):
class MissingTable:
def __init__(self, table):
self._table = table
+
def get(self, codon, stop_symbol):
try:
return self._table.get(codon, stop_symbol)
View
1 Tests/test_ColorSpiral.py
@@ -13,6 +13,7 @@
except ImportError:
#This was added in Python 2.6, fallback for Python 2.5:
from math import sin, cos
+
def rect(r, phi):
return r * (cos(phi) + sin(phi)*1j)
View
1 Tests/test_Emboss.py
@@ -321,6 +321,7 @@ def test_pir(self):
#Skip GenBank, EMBOSS 6.0.1 on Windows won't output proteins as GenBank
self.check_SeqIO_with_EMBOSS("NBRF/DMB_prot.pir", "pir",
skip_formats=["embl","genbank"])
+
def test_clustalw(self):
"""SeqIO & EMBOSS reading each other's conversions of a Clustalw file."""
self.check_SeqIO_with_EMBOSS("Clustalw/hedgehog.aln", "clustal",
View
2 Tests/test_PDB.py
@@ -49,8 +49,10 @@ def test_1_warnings(self):
# Equivalent to warnings.catch_warnings -- hackmagic
orig_showwarning = warnings.showwarning
all_warns = []
+
def showwarning(*args, **kwargs):
all_warns.append(args[0])
+
warnings.showwarning = showwarning
# Trigger warnings
p = PDBParser(PERMISSIVE=True)
View
2 Tests/test_PDB_KDTree.py
@@ -37,8 +37,10 @@ def test_neighbor_search(self):
class RandomAtom:
def __init__(self):
self.coord = 100 * random(3)
+
def get_coord(self):
return self.coord
+
for i in range(0, 20):
atoms = [RandomAtom() for j in range(100)]
ns = NeighborSearch(atoms)
View
6 Tests/test_PhyloXML.py
@@ -39,11 +39,13 @@ def _test_read_factory(source, count):
phylogenies under the root.
"""
fname = os.path.basename(source)
+
def test_read(self):
phx = PhyloXMLIO.read(source)
self.assertTrue(phx)
self.assertEqual(len(phx), count[0])
self.assertEqual(len(phx.other), count[1])
+
test_read.__doc__ = "Read %s to produce a phyloXML object." % fname
return test_read
@@ -55,9 +57,11 @@ def _test_parse_factory(source, count):
function and counts the total number of trees extracted.
"""
fname = os.path.basename(source)
+
def test_parse(self):
trees = PhyloXMLIO.parse(source)
self.assertEqual(len(list(trees)), count)
+
test_parse.__doc__ = "Parse the phylogenies in %s." % fname
return test_parse
@@ -69,6 +73,7 @@ def _test_shape_factory(source, shapes):
clades deep.
"""
fname = os.path.basename(source)
+
def test_shape(self):
trees = PhyloXMLIO.parse(source)
for tree, shape_expect in zip(trees, shapes):
@@ -77,6 +82,7 @@ def test_shape(self):
self.assertEqual(len(clade), sub_expect[0])
for subclade, len_expect in zip(clade, sub_expect[1]):
self.assertEqual(len(subclade), len_expect)
+
test_shape.__doc__ = "Check the branching structure of %s." % fname
return test_shape
View
6 Tests/test_SearchIO_model.py
@@ -352,9 +352,11 @@ def test_hit_map(self):
# deepcopy the qresult since we'll change the objects within
qresult = deepcopy(self.qresult)
# map func: capitalize hit IDs
+
def map_func(hit):
hit.id = hit.id.upper()
return hit
+
# test before mapping
self.assertEqual('hit1', qresult[0].id)
self.assertEqual('hit2', qresult[1].id)
@@ -426,10 +428,12 @@ def test_hsp_map(self):
for hit in qresult:
for hsp in hit:
setattr(hsp, 'mock', 13)
+
# map func: remove first letter of all HSP.aln
def map_func(hsp):
mapped_frags = [x[1:] for x in hsp]
return HSP(mapped_frags)
+
mapped = qresult.hsp_map(map_func)
# make sure old hsp attributes is not transferred to mapped hsps
for hit in mapped:
@@ -770,10 +774,12 @@ def test_map(self):
# apply mock attributes to hsp, for testing mapped hsp attributes
for hsp in hit:
setattr(hsp, 'mock', 13)
+
# map func: remove first letter of all HSP.alignment
def map_func(hsp):
mapped_frags = [x[1:] for x in hsp]
return HSP(mapped_frags)
+
mapped = hit.map(map_func)
# make sure old hsp attributes is not transferred to mapped hsps
for hsp in mapped:
View
8 Tests/test_SeqIO_FastaIO.py
@@ -126,37 +126,45 @@ def test_no_name(self):
for filename in single_nucleic_files:
name = filename.split(".")[0]
+
def funct(fn):
f = lambda x : x.simple_check(fn, generic_nucleotide)
f.__doc__ = "Checking nucleotide file %s" % fn
return f
+
setattr(TitleFunctions, "test_nuc_%s"%name, funct(filename))
del funct
for filename in multi_dna_files:
name = filename.split(".")[0]
+
def funct(fn):
f = lambda x : x.multi_check(fn, generic_dna)
f.__doc__ = "Checking multi DNA file %s" % fn
return f
+
setattr(TitleFunctions, "test_mutli_dna_%s"%name, funct(filename))
del funct
for filename in single_amino_files:
name = filename.split(".")[0]
+
def funct(fn):
f = lambda x : x.simple_check(fn, generic_nucleotide)
f.__doc__ = "Checking protein file %s" % fn
return f
+
setattr(TitleFunctions, "test_pro_%s"%name, funct(filename))
del funct
for filename in multi_amino_files:
name = filename.split(".")[0]
+
def funct(fn):
f = lambda x : x.multi_check(fn, generic_dna)
f.__doc__ = "Checking multi protein file %s" % fn
return f
+
setattr(TitleFunctions, "test_mutli_pro_%s"%name, funct(filename))
del funct
View
2 Tests/test_SeqIO_QualityIO.py
@@ -303,10 +303,12 @@ def simple_check(self, base_name, in_variant):
("misc_rna", "sanger")]
for base_name, variant in tests:
assert variant in ["sanger", "solexa", "illumina"]
+
def funct(bn,var):
f = lambda x : x.simple_check(bn,var)
f.__doc__ = "Reference conversions of %s file %s" % (var, bn)
return f
+
setattr(TestReferenceFastqConversions, "test_%s_%s" % (base_name, variant),
funct(base_name, variant))
del funct
View
5 Tests/test_SeqIO_convert.py
@@ -160,6 +160,7 @@ class ConvertTests(unittest.TestCase):
"""Cunning unit test where methods are added at run time."""
def simple_check(self, filename, in_format, out_format, alphabet):
check_convert(filename, in_format, out_format, alphabet)
+
def failure_check(self, filename, in_format, out_format, alphabet):
check_convert_fails(filename, in_format, out_format, alphabet)
@@ -180,10 +181,12 @@ def failure_check(self, filename, in_format, out_format, alphabet):
for (in_format, out_format) in converter_dict:
if in_format != format:
continue
+
def funct(fn,fmt1, fmt2, alpha):
f = lambda x : x.simple_check(fn, fmt1, fmt2, alpha)
f.__doc__ = "Convert %s from %s to %s" % (fn, fmt1, fmt2)
return f
+
setattr(ConvertTests, "test_%s_%s_to_%s" \
% (filename.replace("/","_").replace(".","_"), in_format, out_format),
funct(filename, in_format, out_format, alphabet))
@@ -223,10 +226,12 @@ def funct(fn,fmt1, fmt2, alpha):
#TODO? These conversions don't check for bad characters in the quality,
#and in order to pass this strict test they should.
continue
+
def funct(fn,fmt1, fmt2, alpha):
f = lambda x : x.failure_check(fn, fmt1, fmt2, alpha)
f.__doc__ = "Convert %s from %s to %s" % (fn, fmt1, fmt2)
return f
+
setattr(ConvertTests, "test_%s_%s_to_%s" \
% (filename.replace("/","_").replace(".","_"), in_format, out_format),
funct(filename, in_format, out_format, alphabet))
View
1 Tests/test_SeqIO_features.py
@@ -456,6 +456,7 @@ class FeatureWriting(unittest.TestCase):
def setUp(self):
self.record = SeqRecord(Seq("ACGT"*100, generic_dna),
id="Test", name="Test", description="Test")
+
def write_read_check(self, format):
handle = StringIO()
SeqIO.write([self.record], handle, format)
View
3 Tests/test_SffIO.py
@@ -63,12 +63,15 @@ def setUp(self):
elif 'XY' in line:
x, y = map(int, fields[-1].split('_'))
self.test_annotations[current_name]["coords"] = (x, y)
+
def test_time(self):
for record in self.records:
self.assertEqual(record.annotations["time"], self.test_annotations[record.name]["time"])
+
def test_region(self):
for record in self.records:
self.assertEqual(record.annotations["region"], self.test_annotations[record.name]["region"])
+
def test_coords(self):
for record in self.records:
self.assertEqual(record.annotations["coords"], self.test_annotations[record.name]["coords"])
View
2 Tests/test_Tutorial.py
@@ -84,6 +84,7 @@ class TutorialDocTestHolder(object):
for name, example, folder in extract_doctests(tutorial):
if sys.version_info[0] >= 3:
example = rt.refactor_docstring(example, name)
+
def funct(n, d, f):
global tutorial_base
method = lambda x : None
@@ -95,6 +96,7 @@ def funct(n, d, f):
method.__doc__ = "%s\n\n%s\n" % (n, d)
method._folder = f
return method
+
setattr(TutorialDocTestHolder,
"doctest_%s" % name.replace(" ","_"),
funct(name, example, folder))
View
1 setup.py
@@ -192,6 +192,7 @@ class install_biopython(install):
boolean_options = install.boolean_options + [
'single-version-externally-managed',
]
+
def initialize_options(self):
install.initialize_options(self)
self.single_version_externally_managed = None

0 comments on commit bfa8b25

Please sign in to comment.