Skip to content

HTTPS clone URL

Subversion checkout URL

You can clone with HTTPS or Subversion.

Download ZIP
Browse files

Different PEP8 fixes, mostly spacing around inline comments (E261, E2…

…62).
  • Loading branch information...
commit f57bacee36c57d4e3834faa5e68159a29a9bf658 1 parent 7585a62
@cbrueffer cbrueffer authored
Showing with 201 additions and 198 deletions.
  1. +1 −1  Bio/Align/AlignInfo.py
  2. +1 −1  Bio/Align/Applications/_Clustalw.py
  3. +1 −1  Bio/Align/Applications/_Dialign.py
  4. +1 −1  Bio/Align/Applications/_Mafft.py
  5. +1 −1  Bio/Align/Applications/_Muscle.py
  6. +1 −1  Bio/Align/Applications/_Prank.py
  7. +1 −1  Bio/Align/Applications/_Probcons.py
  8. +2 −2 Bio/Align/Applications/_TCoffee.py
  9. +1 −1  Bio/Align/Applications/__init__.py
  10. +1 −1  Bio/Align/Generic.py
  11. +2 −2 Bio/Align/__init__.py
  12. +2 −2 Bio/AlignIO/ClustalIO.py
  13. +1 −1  Bio/AlignIO/EmbossIO.py
  14. +2 −2 Bio/AlignIO/FastaIO.py
  15. +2 −2 Bio/AlignIO/NexusIO.py
  16. +3 −3 Bio/AlignIO/PhylipIO.py
  17. +7 −7 Bio/AlignIO/StockholmIO.py
  18. +4 −4 Bio/AlignIO/__init__.py
  19. +3 −3 Bio/Application/__init__.py
  20. +1 −1  Bio/Blast/Applications.py
  21. +1 −1  Bio/Blast/NCBIXML.py
  22. +2 −2 Bio/Blast/Record.py
  23. +2 −2 Bio/Data/CodonTable.py
  24. +1 −1  Bio/Entrez/__init__.py
  25. +4 −4 Bio/FSSP/__init__.py
  26. +3 −3 Bio/GA/Organism.py
  27. +5 −5 Bio/GenBank/__init__.py
  28. +3 −3 Bio/Graphics/BasicChromosome.py
  29. +4 −4 Bio/Graphics/GenomeDiagram/_AbstractDrawer.py
  30. +12 −12 Bio/Graphics/GenomeDiagram/_CircularDrawer.py
  31. +2 −2 Bio/Graphics/GenomeDiagram/_Diagram.py
  32. +8 −8 Bio/Graphics/GenomeDiagram/_LinearDrawer.py
  33. +20 −20 Bio/Motif/_Motif.py
  34. +3 −3 Bio/Motif/__init__.py
  35. +3 −3 Bio/Nexus/Nexus.py
  36. +2 −2 Bio/Nexus/Trees.py
  37. +1 −1  Bio/Phylo/NewickIO.py
  38. +1 −1  Bio/Phylo/PAML/baseml.py
  39. +1 −1  Bio/Phylo/PAML/codeml.py
  40. +1 −1  Bio/Phylo/PAML/yn00.py
  41. +1 −1  Bio/PopGen/FDist/Async.py
  42. +4 −4 Bio/PopGen/FDist/Utils.py
  43. +1 −1  Bio/PopGen/FDist/__init__.py
  44. +1 −1  Bio/PopGen/GenePop/Controller.py
  45. +2 −2 Bio/PopGen/GenePop/EasyController.py
  46. +2 −2 Bio/PopGen/GenePop/LargeFileParser.py
  47. +2 −2 Bio/PopGen/GenePop/__init__.py
  48. +1 −1  Bio/PopGen/SimCoal/Cache.py
  49. +1 −1  Bio/PopGen/SimCoal/Controller.py
  50. +12 −12 Bio/Restriction/_Update/RestrictionCompiler.py
  51. +1 −1  Bio/SCOP/Raf.py
  52. +2 −2 Bio/SCOP/__init__.py
  53. +6 −6 Bio/Seq.py
  54. +12 −9 Bio/SeqIO/UniprotIO.py
  55. +2 −2 Bio/SeqRecord.py
  56. +4 −4 Bio/SeqUtils/MeltingTemp.py
  57. +2 −2 Bio/SeqUtils/ProtParam.py
  58. +1 −1  Bio/SwissProt/__init__.py
  59. +3 −3 Bio/TogoWS/__init__.py
  60. +1 −1  Bio/__init__.py
  61. +11 −11 Bio/bgzf.py
  62. +3 −3 BioSQL/BioSeq.py
  63. +2 −2 Doc/examples/ACT_example.py
  64. +4 −4 Doc/examples/Proux_et_al_2002_Figure_6.py
  65. +3 −3 Scripts/SeqGui/SeqGui.py
View
2  Bio/Align/AlignInfo.py
@@ -565,7 +565,7 @@ def _get_column_info_content(self, obs_freq, e_freq_table, log_base,
except AttributeError:
#The alphabet doesn't declare a gap - there could be none
#in the sequence... or just a vague alphabet.
- gap_char = "-" #Safe?
+ gap_char = "-" # Safe?
if e_freq_table:
if not isinstance(e_freq_table, FreqTable.FreqTable):
View
2  Bio/Align/Applications/_Clustalw.py
@@ -5,7 +5,7 @@
"""Command line wrapper for the multiple alignment program Clustal W.
"""
-__docformat__ = "epytext en" #Don't just use plain text in epydoc API pages!
+__docformat__ = "epytext en" # Don't just use plain text in epydoc API pages!
import os
from Bio.Application import _Option, _Switch, AbstractCommandline
View
2  Bio/Align/Applications/_Dialign.py
@@ -5,7 +5,7 @@
"""Command line wrapper for the multiple alignment program DIALIGN2-2.
"""
-__docformat__ = "epytext en" #Don't just use plain text in epydoc API pages!
+__docformat__ = "epytext en" # Don't just use plain text in epydoc API pages!
from Bio.Application import _Option, _Argument, _Switch, AbstractCommandline
View
2  Bio/Align/Applications/_Mafft.py
@@ -5,7 +5,7 @@
"""Command line wrapper for the multiple alignment programme MAFFT.
"""
-__docformat__ = "epytext en" #Don't just use plain text in epydoc API pages!
+__docformat__ = "epytext en" # Don't just use plain text in epydoc API pages!
import os
from Bio.Application import _Option, _Switch, _Argument, AbstractCommandline
View
2  Bio/Align/Applications/_Muscle.py
@@ -5,7 +5,7 @@
"""Command line wrapper for the multiple alignment program MUSCLE.
"""
-__docformat__ = "epytext en" #Don't just use plain text in epydoc API pages!
+__docformat__ = "epytext en" # Don't just use plain text in epydoc API pages!
from Bio.Application import _Option, _Switch, AbstractCommandline
View
2  Bio/Align/Applications/_Prank.py
@@ -5,7 +5,7 @@
"""Command line wrapper for the multiple alignment program PRANK.
"""
-__docformat__ = "epytext en" #Don't just use plain text in epydoc API pages!
+__docformat__ = "epytext en" # Don't just use plain text in epydoc API pages!
from Bio.Application import _Option, _Switch, AbstractCommandline
View
2  Bio/Align/Applications/_Probcons.py
@@ -5,7 +5,7 @@
"""Command line wrapper for the multiple alignment program PROBCONS.
"""
-__docformat__ = "epytext en" #Don't just use plain text in epydoc API pages!
+__docformat__ = "epytext en" # Don't just use plain text in epydoc API pages!
from Bio.Application import _Option, _Switch, _Argument, AbstractCommandline
View
4 Bio/Align/Applications/_TCoffee.py
@@ -5,7 +5,7 @@
"""Command line wrapper for the multiple alignment program TCOFFEE.
"""
-__docformat__ = "epytext en" #Don't just use plain text in epydoc API pages!
+__docformat__ = "epytext en" # Don't just use plain text in epydoc API pages!
from Bio.Application import _Option, _Switch, AbstractCommandline
@@ -53,7 +53,7 @@ def __init__(self, cmd="t_coffee", **kwargs):
Note that of these Biopython's AlignIO module will only
read clustalw, pir, and fasta.
- """, #TODO - Can we read the PHYLIP output?
+ """, # TODO - Can we read the PHYLIP output?
equate=False),
_Option(["-infile", "infile"],
"Specify the input file.",
View
2  Bio/Align/Applications/__init__.py
@@ -4,7 +4,7 @@
# as part of this package.
"""Alignment command line tool wrappers."""
-__docformat__ = "epytext en" #Don't just use plain text in epydoc API pages!
+__docformat__ = "epytext en" # Don't just use plain text in epydoc API pages!
from _Muscle import MuscleCommandline
from _Clustalw import ClustalwCommandline
View
2  Bio/Align/Generic.py
@@ -12,7 +12,7 @@
Classes:
- Alignment
"""
-__docformat__ = "epytext en" #Don't just use plain text in epydoc API pages!
+__docformat__ = "epytext en" # Don't just use plain text in epydoc API pages!
# biopython
from Bio.Seq import Seq
View
4 Bio/Align/__init__.py
@@ -9,7 +9,7 @@
class, used in the Bio.AlignIO module.
"""
-__docformat__ = "epytext en" #Don't just use plain text in epydoc API pages!
+__docformat__ = "epytext en" # Don't just use plain text in epydoc API pages!
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
@@ -218,7 +218,7 @@ def extend(self, records):
expected_length = self.get_alignment_length()
else:
#Take the first record's length
- records = iter(records) #records arg could be list or iterator
+ records = iter(records) # records arg could be list or iterator
try:
rec = records.next()
except StopIteration:
View
4 Bio/AlignIO/ClustalIO.py
@@ -122,7 +122,7 @@ def next(self):
ids = []
seqs = []
consensus = ""
- seq_cols = None #: Used to extract the consensus
+ seq_cols = None # Used to extract the consensus
#Use the first block to get the sequence identifiers
while True:
@@ -171,7 +171,7 @@ def next(self):
break
line = handle.readline()
if not line:
- break #end of file
+ break # end of file
assert line.strip() == ""
assert seq_cols is not None
View
2  Bio/AlignIO/EmbossIO.py
@@ -147,7 +147,7 @@ def next(self):
end = int(end)
else:
assert seq.replace("-","") != ""
- start = int(start)-1 #python counting
+ start = int(start) - 1 # python counting
end = int(end)
#The identifier is truncated...
View
4 Bio/AlignIO/FastaIO.py
@@ -109,8 +109,8 @@ def build_hsp():
assert query_tags, query_tags
assert match_tags, match_tags
evalue = align_tags.get("fa_expect", None)
- q = "?" #Just for printing len(q) in debug below
- m = "?" #Just for printing len(m) in debug below
+ q = "?" # Just for printing len(q) in debug below
+ m = "?" # Just for printing len(m) in debug below
tool = global_tags.get("tool", "").upper()
try:
q = _extract_alignment_region(query_seq, query_tags)
View
4 Bio/AlignIO/NexusIO.py
@@ -73,7 +73,7 @@ def write_file(self, alignments):
alignments - A list or iterator returning MultipleSeqAlignment objects.
This should hold ONE and only one alignment.
"""
- align_iter = iter(alignments) #Could have been a list
+ align_iter = iter(alignments) # Could have been a list
try:
first_alignment = align_iter.next()
except StopIteration:
@@ -92,7 +92,7 @@ def write_file(self, alignments):
#Good. Actually write the single alignment,
self.write_alignment(first_alignment)
- return 1 #we only support writing one alignment!
+ return 1 # we only support writing one alignment!
def write_alignment(self, alignment):
#Creates an empty Nexus object, adds the sequences,
View
6 Bio/AlignIO/PhylipIO.py
@@ -232,9 +232,9 @@ def next(self):
while ""==line.strip():
line = handle.readline()
if not line:
- break #end of file
+ break # end of file
if not line:
- break #end of file
+ break # end of file
if self._is_header(line):
#Looks like the start of a concatenated alignment
@@ -251,7 +251,7 @@ def next(self):
if (not line) and i+1 < number_of_seqs:
raise ValueError("End of file mid-block")
if not line:
- break #end of file
+ break # end of file
records = (SeqRecord(Seq("".join(s), self.alphabet),
id=i, name=i, description=i)
View
14 Bio/AlignIO/StockholmIO.py
@@ -129,7 +129,7 @@
>>> print sub_record.letter_annotations['secondary_structure']
-------<<<
"""
-__docformat__ = "epytext en" #not just plaintext
+__docformat__ = "epytext en" # not just plaintext
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio.Align import MultipleSeqAlignment
@@ -338,8 +338,8 @@ def next(self):
while 1:
line = self.handle.readline()
if not line:
- break #end of file
- line = line.strip() #remove trailing \n
+ break # end of file
+ line = line.strip() # remove trailing \n
if line == '# STOCKHOLM 1.0':
self._header = line
break
@@ -425,7 +425,7 @@ def next(self):
% (len(ids), self.records_per_alignment))
alignment_length = len(seqs.values()[0])
- records = [] #Alignment obj will put them all in a list anyway
+ records = [] # Alignment obj will put them all in a list anyway
for id in ids:
seq = seqs[id]
if alignment_length != len(seq):
@@ -509,12 +509,12 @@ def _populate_meta_data(self, identifier, record):
seq_data = self._get_meta_data(identifier, self.seq_annotation)
for feature in seq_data:
#Note this dictionary contains lists!
- if feature=="AC" : #ACcession number
+ if feature=="AC": # ACcession number
assert len(seq_data[feature])==1
record.annotations["accession"]=seq_data[feature][0]
- elif feature=="DE" : #DEscription
+ elif feature=="DE": # DEscription
record.description = "\n".join(seq_data[feature])
- elif feature=="DR" : #Database Reference
+ elif feature=="DR": # Database Reference
#Should we try and parse the strings?
record.dbxrefs = seq_data[feature]
elif feature in self.pfam_gs_mapping:
View
8 Bio/AlignIO/__init__.py
@@ -123,7 +123,7 @@
# For using with statement in Python 2.5 or Jython
from __future__ import with_statement
-__docformat__ = "epytext en" #not just plaintext
+__docformat__ = "epytext en" # not just plaintext
#TODO
# - define policy on reading aligned sequences with gaps in
@@ -154,7 +154,7 @@
#Convention for format names is "mainname-subtype" in lower case.
#Please use the same names as BioPerl and EMBOSS where possible.
-_FormatToIterator = {#"fasta" is done via Bio.SeqIO
+_FormatToIterator = { # "fasta" is done via Bio.SeqIO
"clustal" : ClustalIO.ClustalIterator,
"emboss" : EmbossIO.EmbossIterator,
"fasta-m10" : FastaIO.FastaM10Iterator,
@@ -165,8 +165,8 @@
"stockholm" : StockholmIO.StockholmIterator,
}
-_FormatToWriter = {#"fasta" is done via Bio.SeqIO
- #"emboss" : EmbossIO.EmbossWriter, (unfinished)
+_FormatToWriter = { # "fasta" is done via Bio.SeqIO
+ # "emboss" : EmbossIO.EmbossWriter, (unfinished)
"nexus" : NexusIO.NexusWriter,
"phylip" : PhylipIO.PhylipWriter,
"phylip-sequential" : PhylipIO.SequentialPhylipWriter,
View
6 Bio/Application/__init__.py
@@ -212,7 +212,7 @@ def deleter(name):
"and its associated value. Set this property to the " \
"argument value required." % p.names[0]
prop = property(getter(name), setter(name), deleter(name), doc)
- setattr(self.__class__, name, prop) #magic!
+ setattr(self.__class__, name, prop) # magic!
for key, value in kwargs.iteritems():
self.set_parameter(key, value)
@@ -333,7 +333,7 @@ def _check_value(self, value, name, check_function):
finish silently otherwise.
"""
if check_function is not None:
- is_good = check_function(value) #May raise an exception
+ is_good = check_function(value) # May raise an exception
assert is_good in [0,1,True,False]
if not is_good:
raise ValueError("Invalid parameter value %r for parameter %s"
@@ -364,7 +364,7 @@ def __setattr__(self, name, value):
assumed to be parameters, and passed to the self.set_parameter method
for validation and assignment.
"""
- if name in ['parameters', 'program_name']: # Allowed attributes
+ if name in ['parameters', 'program_name']: # Allowed attributes
self.__dict__[name] = value
else:
self.set_parameter(name, value) # treat as a parameter
View
2  Bio/Blast/Applications.py
@@ -505,7 +505,7 @@ def __init__(self, cmd=None, **kwargs):
_Option(["-query", "query"],
"The sequence to search with.",
filename=True,
- equate=False), #Should this be required?
+ equate=False), # Should this be required?
_Option(["-query_loc", "query_loc"],
"Location on the query sequence (Format: start-stop)",
equate=False),
View
2  Bio/Blast/NCBIXML.py
@@ -148,7 +148,7 @@ def reset(self):
self._records = []
self._header = Record.Header()
self._parameters = Record.Parameters()
- self._parameters.filter = None #Maybe I should update the class?
+ self._parameters.filter = None # Maybe I should update the class?
def _start_Iteration(self):
self._blast = Record.Blast()
View
4 Bio/Blast/Record.py
@@ -226,11 +226,11 @@ def to_generic(self, alphabet):
parse_number = 0
n = 0
for name, start, seq, end in self.alignment:
- if name == 'QUERY': #QUERY is the first in each alignment block
+ if name == 'QUERY': # QUERY is the first in each alignment block
parse_number += 1
n = 0
- if parse_number == 1: # create on first_parse, append on all others
+ if parse_number == 1: # create on first_parse, append on all others
seq_parts.append(seq)
seq_names.append(name)
else:
View
4 Bio/Data/CodonTable.py
@@ -241,7 +241,7 @@ def list_ambiguous_codons(codons, ambiguous_nucleotide_values):
codon = c1+c2+c3
if codon not in candidates and codon not in codons:
candidates.append(codon)
- answer = codons[:] #copy
+ answer = codons[:] # copy
#print "Have %i new candidates" % len(candidates)
for ambig_codon in candidates:
wanted = True
@@ -874,7 +874,7 @@ def register_ncbi_table(name, alt_name, id,
assert ambiguous_rna_by_id[n].forward_table["GUN"] == "V"
if n != 23 :
#For table 23, UUN = F, L or stop.
- assert ambiguous_rna_by_id[n].forward_table["UUN"] == "X" #F or L
+ assert ambiguous_rna_by_id[n].forward_table["UUN"] == "X" # F or L
#R = A or G, so URR = UAA or UGA / TRA = TAA or TGA = stop codons
if "UAA" in unambiguous_rna_by_id[n].stop_codons \
and "UGA" in unambiguous_rna_by_id[n].stop_codons:
View
2  Bio/Entrez/__init__.py
@@ -133,7 +133,7 @@ def efetch(db, **keywds):
#a list of ID strings now gives HTTP Error 500: Internal server error
#This was turned into ...&id=22307645&id=22303114&... which used to work
#while now the NCBI appear to insist on ...&id=22301129,22299544,...
- keywords = keywds.copy() #Don't alter input dict!
+ keywords = keywds.copy() # Don't alter input dict!
keywords["id"] = ",".join(keywds["id"])
variables.update(keywords)
return _open(cgi, variables)
View
8 Bio/FSSP/__init__.py
@@ -231,8 +231,8 @@ def read_fssp(fssp_handle):
if not summary_title.match(curline):
raise ValueError('Bad FSSP file: no summary record found')
- curline = fssp_handle.readline() #Read the title line, discard
- curline = fssp_handle.readline() #Read the next line
+ curline = fssp_handle.readline() # Read the title line, discard
+ curline = fssp_handle.readline() # Read the next line
# Process the summary records into a list
while summary_rec.match(curline):
cur_sum_rec = FSSPSumRec(curline)
@@ -255,8 +255,8 @@ def read_fssp(fssp_handle):
break
# If we got to this point, this means that we have matched an
# alignments title. Parse the alignment records in a loop.
- curline = fssp_handle.readline() #Read the title line, discard
- curline = fssp_handle.readline() #Read the next line
+ curline = fssp_handle.readline() # Read the title line, discard
+ curline = fssp_handle.readline() # Read the next line
while alignments_rec.match(curline):
align_rec = FSSPAlignRec(fff_rec(curline))
key = align_rec.chain_id+align_rec.res_name+str(align_rec.pdb_res_num)
View
6 Bio/GA/Organism.py
@@ -1,7 +1,7 @@
"""Deal with an Organism in a Genetic Algorithm population.
"""
# standard modules
-import sys #for Python 3 hack
+import sys # for Python 3 hack
import random
import array
@@ -57,9 +57,9 @@ def random_population(genome_alphabet, genome_size, num_organisms,
# figure out what type of characters are in the alphabet
if isinstance(genome_alphabet.letters[0], str):
if sys.version_info[0] == 3:
- alphabet_type = "u" #Use unicode string on Python 3
+ alphabet_type = "u" # Use unicode string on Python 3
else:
- alphabet_type = "c" #Use byte string on Python 2
+ alphabet_type = "c" # Use byte string on Python 2
elif isinstance(genome_alphabet.letters[0], int):
alphabet_type = "i"
elif isinstance(genome_alphabet.letters[0], float):
View
10 Bio/GenBank/__init__.py
@@ -119,7 +119,7 @@
assert _re_complex_location.match("(3.9)..10")
assert _re_complex_location.match("26..(30.33)")
assert _re_complex_location.match("(13.19)..(20.28)")
-assert _re_complex_location.match("41^42") #between
+assert _re_complex_location.match("41^42") # between
assert _re_complex_location.match("AL121804:41^42")
assert _re_complex_location.match("AL121804:41..610")
assert _re_complex_location.match("AL121804.2:41..610")
@@ -318,7 +318,7 @@ def _split_compound_loc(compound_loc):
assert compound_loc[0:2] != ".."
i = compound_loc.find(",")
part = compound_loc[:i]
- compound_loc = compound_loc[i:] #includes the comma
+ compound_loc = compound_loc[i:] # includes the comma
while part.count("(") > part.count(")"):
assert "one-of(" in part, (part, compound_loc)
i = compound_loc.find(")")
@@ -331,7 +331,7 @@ def _split_compound_loc(compound_loc):
compound_loc = ""
else:
part += compound_loc[:i]
- compound_loc = compound_loc[i:] #includes the comma
+ compound_loc = compound_loc[i:] # includes the comma
while part.count("(") > part.count(")"):
assert part.count("one-of(") == 2
i = compound_loc.find(")")
@@ -377,7 +377,7 @@ def next(self):
while True:
line = self.handle.readline()
if not line:
- return None #Premature end of file?
+ return None # Premature end of file?
lines.append(line)
if line.rstrip() == "//":
break
@@ -1142,7 +1142,7 @@ def record_end(self, content):
if not self.data.id:
assert 'accessions' not in self.data.annotations, \
self.data.annotations['accessions']
- self.data.id = self.data.name #Good fall back?
+ self.data.id = self.data.name # Good fall back?
elif self.data.id.count('.') == 0:
try:
self.data.id+='.%i' % self.data.annotations['sequence_version']
View
6 Bio/Graphics/BasicChromosome.py
@@ -407,9 +407,9 @@ def draw(self, cur_drawing):
self.start_y_position, self.end_y_position):
assert position != -1, "Need to set drawing coordinates."
- self._draw_subcomponents(cur_drawing) #Anything behind
+ self._draw_subcomponents(cur_drawing) # Anything behind
self._draw_segment(cur_drawing)
- self._overdraw_subcomponents(cur_drawing) #Anything on top
+ self._overdraw_subcomponents(cur_drawing) # Anything on top
self._draw_label(cur_drawing)
def _draw_subcomponents(self, cur_drawing):
@@ -501,7 +501,7 @@ def _spring_layout(desired, minimum, maximum, gap=0):
"""
count = len(desired)
if count <= 1:
- return desired #Easy!
+ return desired # Easy!
if minimum >= maximum:
raise ValueError("Bad min/max %f and %f" % (minimum, maximum))
if min(desired) < minimum or max(desired) > maximum:
View
8 Bio/Graphics/GenomeDiagram/_AbstractDrawer.py
@@ -164,7 +164,7 @@ def draw_cut_corner_box(point1, point2, corner=0.5,
x1+corner, y1],
strokeColor=strokecolor,
strokeWidth=1,
- strokeLineJoin=1, #1=round
+ strokeLineJoin=1, # 1=round
fillColor=color,
**kwargs)
@@ -246,7 +246,7 @@ def draw_arrow(point1, point2, color=colors.lightgreen, border=None,
shaftheight = boxheight*shaft_height_ratio
headlength = min(abs(boxheight)*head_length_ratio, abs(boxwidth))
if boxwidth < 0:
- headlength *= -1 #reverse it
+ headlength *= -1 # reverse it
shafttop = 0.5*(boxheight+shaftheight)
shaftbase = boxheight-shafttop
@@ -263,7 +263,7 @@ def draw_arrow(point1, point2, color=colors.lightgreen, border=None,
#strokeWidth=max(1, int(boxheight/40.)),
strokeWidth=1,
#default is mitre/miter which can stick out too much:
- strokeLineJoin=1, #1=round
+ strokeLineJoin=1, # 1=round
fillColor=color,
**kwargs)
@@ -426,7 +426,7 @@ def __init__(self, parent, pagesize='A3', orientation='landscape',
# Perform 'administrative' tasks of setting up the page
self.set_page_size(pagesize, orientation) # Set drawing size
self.set_margins(x, y, xl, xr, yt, yb) # Set page margins
- self.set_bounds(start, end) # Set limits on what will be drawn
+ self.set_bounds(start, end) # Set limits on what will be drawn
self.tracklines = tracklines # Set flags
if cross_track_links is None:
cross_track_links = []
View
24 Bio/Graphics/GenomeDiagram/_CircularDrawer.py
@@ -1122,7 +1122,7 @@ def _draw_arc_line(self, path, start_radius, end_radius, start_angle, end_angle,
x0, y0 = self.xcenter, self.ycenter # origin of the circle
radius_diff = end_radius - start_radius
angle_diff = end_angle - start_angle
- dx = 0.01 #heuristic
+ dx = 0.01 # heuristic
a = start_angle*pi/180
if move:
path.moveTo(x0+start_radius*cos(a), y0+start_radius*sin(a))
@@ -1132,7 +1132,7 @@ def _draw_arc_line(self, path, start_radius, end_radius, start_angle, end_angle,
if 0.01 <= abs(dx):
while x < 1:
r = start_radius + x*radius_diff
- a = (start_angle + x*(angle_diff))*pi/180 #to radians for sin/cos
+ a = (start_angle + x * (angle_diff)) * pi / 180 # to radians for sin/cos
#print x0+r*cos(a), y0+r*sin(a)
path.lineTo(x0+r*cos(a), y0+r*sin(a))
x += dx
@@ -1156,7 +1156,7 @@ def _draw_arc_poly(self, inner_radius, outer_radius,
p = ArcPath(strokeColor=strokecolor,
fillColor=color,
#default is mitre/miter which can stick out too much:
- strokeLineJoin=1, #1=round
+ strokeLineJoin=1, # 1=round
strokewidth=0)
#Note reportlab counts angles anti-clockwise from the horizontal
#(as in mathematics, e.g. complex numbers and polar coordinates)
@@ -1194,7 +1194,7 @@ def _draw_arc_poly(self, inner_radius, outer_radius,
x4,y4 = (x0+outer_radius*outer_startsin, y0+outer_radius*outer_startcos)
return draw_polygon([(x1,y1),(x2,y2),(x3,y3),(x4,y4)], color, border,
#default is mitre/miter which can stick out too much:
- strokeLineJoin=1, #1=round
+ strokeLineJoin=1, # 1=round
)
def _draw_sigil_cut_corner_box(self, bottom, center, top,
@@ -1226,7 +1226,7 @@ def _draw_sigil_cut_corner_box(self, bottom, center, top,
cornerangle_delta = max(0.0,min(abs(boxheight)*0.5*corner/middle_radius, abs(angle*0.5)))
if angle < 0:
- cornerangle_delta *= -1 #reverse it
+ cornerangle_delta *= -1 # reverse it
# Calculate trig values for angle and coordinates
startcos, startsin = cos(startangle), sin(startangle)
@@ -1234,7 +1234,7 @@ def _draw_sigil_cut_corner_box(self, bottom, center, top,
x0, y0 = self.xcenter, self.ycenter # origin of the circle
p = ArcPath(strokeColor=strokecolor,
fillColor=color,
- strokeLineJoin=1, #1=round
+ strokeLineJoin=1, # 1=round
strokewidth=0,
**kwargs)
#Inner curved edge
@@ -1272,7 +1272,7 @@ def _draw_sigil_arrow(self, bottom, center, top,
else:
inner_radius = bottom
outer_radius = top
- orientation = "right" #backwards compatibility
+ orientation = "right" # backwards compatibility
return self._draw_arc_arrow(inner_radius, outer_radius, startangle, endangle,
orientation=orientation, **kwargs)
@@ -1316,7 +1316,7 @@ def _draw_arc_arrow(self, inner_radius, outer_radius, startangle, endangle,
shaft_outer_radius = middle_radius + 0.5*shaft_height
headangle_delta = max(0.0,min(abs(boxheight)*head_length_ratio/middle_radius, abs(angle)))
if angle < 0:
- headangle_delta *= -1 #reverse it
+ headangle_delta *= -1 # reverse it
if orientation=="right":
headangle = endangle-headangle_delta
else:
@@ -1350,13 +1350,13 @@ def _draw_arc_arrow(self, inner_radius, outer_radius, startangle, endangle,
return Polygon([x1,y1,x2,y2,x3,y3],
strokeColor=border or color,
fillColor=color,
- strokeLineJoin=1, #1=round, not mitre!
+ strokeLineJoin=1, # 1=round, not mitre!
strokewidth=0)
elif orientation=="right":
p = ArcPath(strokeColor=strokecolor,
fillColor=color,
#default is mitre/miter which can stick out too much:
- strokeLineJoin=1, #1=round
+ strokeLineJoin=1, # 1=round
strokewidth=0,
**kwargs)
#Note reportlab counts angles anti-clockwise from the horizontal
@@ -1386,7 +1386,7 @@ def _draw_arc_arrow(self, inner_radius, outer_radius, startangle, endangle,
p = ArcPath(strokeColor=strokecolor,
fillColor=color,
#default is mitre/miter which can stick out too much:
- strokeLineJoin=1, #1=round
+ strokeLineJoin=1, # 1=round
strokewidth=0,
**kwargs)
#Note reportlab counts angles anti-clockwise from the horizontal
@@ -1470,7 +1470,7 @@ def _draw_sigil_jaggy(self, bottom, center, top,
p = ArcPath(strokeColor=strokecolor,
fillColor=color,
#default is mitre/miter which can stick out too much:
- strokeLineJoin=1, #1=round
+ strokeLineJoin=1, # 1=round
strokewidth=0,
**kwargs)
#Note reportlab counts angles anti-clockwise from the horizontal
View
4 Bio/Graphics/GenomeDiagram/_Diagram.py
@@ -416,11 +416,11 @@ def renumber_tracks(self, low=1, step=1):
value (low)
"""
track = low # Start numbering from here
- levels = self.get_levels() #
+ levels = self.get_levels()
conversion = {} # Holds new set of levels
for level in levels: # Starting at low...
- conversion[track] = self.tracks[level] # Add old tracks to new set
+ conversion[track] = self.tracks[level] # Add old tracks to new set
conversion[track].track_level = track
track += step # step interval
self.tracks = conversion # Replace old set of levels with new set
View
16 Bio/Graphics/GenomeDiagram/_LinearDrawer.py
@@ -907,7 +907,7 @@ def draw_cross_link(self, cross_link):
strokeColor=strokecolor,
fillColor=fillcolor,
#default is mitre/miter which can stick out too much:
- strokeLineJoin=1, #1=round
+ strokeLineJoin=1, # 1=round
strokewidth=0))
elif fragment < start_fragmentA or end_fragmentA < fragment:
if cross_link.flip:
@@ -927,7 +927,7 @@ def draw_cross_link(self, cross_link):
strokeColor=strokecolor,
fillColor=fillcolor,
#default is mitre/miter which can stick out too much:
- strokeLineJoin=1, #1=round
+ strokeLineJoin=1, # 1=round
strokewidth=0))
elif cross_link.flip and ((crop_leftA and not crop_rightA) or
(crop_leftB and not crop_rightB)):
@@ -938,7 +938,7 @@ def draw_cross_link(self, cross_link):
strokeColor=strokecolor,
fillColor=fillcolor,
#default is mitre/miter which can stick out too much:
- strokeLineJoin=1, #1=round
+ strokeLineJoin=1, # 1=round
strokewidth=0))
elif cross_link.flip and ((crop_rightA and not crop_leftA) or
(crop_rightB and not crop_leftB)):
@@ -949,21 +949,21 @@ def draw_cross_link(self, cross_link):
strokeColor=strokecolor,
fillColor=fillcolor,
#default is mitre/miter which can stick out too much:
- strokeLineJoin=1, #1=round
+ strokeLineJoin=1, # 1=round
strokewidth=0))
elif cross_link.flip:
answer.append(Polygon([xAs, yA, xAe, yA, xBs, yB, xBe, yB],
strokeColor=strokecolor,
fillColor=fillcolor,
#default is mitre/miter which can stick out too much:
- strokeLineJoin=1, #1=round
+ strokeLineJoin=1, # 1=round
strokewidth=0))
else:
answer.append(Polygon([xAs, yA, xAe, yA, xBe, yB, xBs, yB],
strokeColor=strokecolor,
fillColor=fillcolor,
#default is mitre/miter which can stick out too much:
- strokeLineJoin=1, #1=round
+ strokeLineJoin=1, # 1=round
strokewidth=0))
return answer
@@ -1396,7 +1396,7 @@ def _draw_sigil_jaggy(self, bottom, center, top, x1, x2, strand,
return Polygon(points,
strokeColor=strokecolor,
strokeWidth=1,
- strokeLineJoin=1, #1=round
+ strokeLineJoin=1, # 1=round
fillColor=color,
**kwargs)
@@ -1413,7 +1413,7 @@ def _draw_sigil_arrow(self, bottom, center, top, x1, x2, strand, **kwargs):
else:
y1 = bottom
y2 = top
- orientation = "right" #backward compatibility
+ orientation = "right" # backward compatibility
return draw_arrow((x1,y1), (x2,y2), orientation=orientation, **kwargs)
def _draw_sigil_big_arrow(self, bottom, center, top, x1, x2, strand, **kwargs):
View
40 Bio/Motif/_Motif.py
@@ -220,7 +220,7 @@ def pwm(self,laplace=True):
#dict[seq[i]]=dict[seq[i]]+1
try:
dict[seq[i]]+=1
- except KeyError: #we need to ignore non-alphabet letters
+ except KeyError: # we need to ignore non-alphabet letters
pass
self._pwm.append(FreqTable.FreqTable(dict,FreqTable.COUNT,self.alphabet))
self._pwm_is_current=1
@@ -356,7 +356,7 @@ def dist_pearson(self, motif, masked = 0):
for offset in range(-self.length+1,motif.length):
if offset<0:
p = self.dist_pearson_at(motif,-offset)
- else: #offset>=0
+ else: # offset>=0
p = motif.dist_pearson_at(self,offset)
if max_p<p:
@@ -365,11 +365,11 @@ def dist_pearson(self, motif, masked = 0):
return 1-max_p,max_o
def dist_pearson_at(self,motif,offset):
- sxx = 0 # \sum x^2
- sxy = 0 # \sum x \cdot y
- sx = 0 # \sum x
- sy = 0 # \sum y
- syy = 0 # \sum x^2
+ sxx = 0 # \sum x^2
+ sxy = 0 # \sum x \cdot y
+ sx = 0 # \sum x
+ sy = 0 # \sum y
+ syy = 0 # \sum x^2
norm=max(self.length,offset+motif.length)
for pos in range(max(self.length,offset+motif.length)):
@@ -395,7 +395,7 @@ def dist_product(self,other):
for offset in range(-self.length+1,other.length):
if offset<0:
p = self.dist_product_at(other,-offset)
- else: #offset>=0
+ else: # offset>=0
p = other.dist_product_at(self,offset)
if max_p<p:
max_p=p
@@ -437,7 +437,7 @@ def dist_dpq(self,other):
if offset<0:
d = self.dist_dpq_at(other,-offset)
overlap = self.length+offset
- else: #offset>=0
+ else: # offset>=0
d = other.dist_dpq_at(self,offset)
overlap = other.length-offset
overlap = min(self.length,other.length,overlap)
@@ -450,7 +450,7 @@ def dist_dpq(self,other):
if min_d> d:
min_d=d
min_o=-offset
- return min_d,min_o#,d_s
+ return min_d,min_o # ,d_s
def dist_dpq_at(self,other,offset):
"""
@@ -543,8 +543,8 @@ def _to_fasta(self):
col[i]+=(alpha*s)[:(s-len(col[i]))]
#iterate over instances
for i in range(s):
- instance="" #start with empty seq
- for j in range(self.length): #iterate over positions
+ instance = "" # start with empty seq
+ for j in range(self.length): # iterate over positions
instance+=col[j][i]
instance = Seq(instance, self.alphabet)
instances.append(instance)
@@ -566,7 +566,7 @@ def reverse_complement(self):
instance = instance.reverse_complement()
instances.append(instance)
res = Motif(alphabet, instances)
- else: # has counts
+ else: # has counts
res = Motif(alphabet)
res.counts={}
res.counts["A"]=self.counts["T"][:]
@@ -626,8 +626,8 @@ def _from_horiz_matrix(self,stream,letters=None,make_instances=False):
#print ln
try:
self.counts[i]=map(int,ln)
- except ValueError: #not integers
- self.counts[i]=map(float,ln) #map(lambda s: int(100*float(s)),ln)
+ except ValueError: # not integers
+ self.counts[i]=map(float,ln) # map(lambda s: int(100*float(s)),ln)
#print counts[i]
s = sum(self.counts[nuc][0] for nuc in letters)
@@ -660,8 +660,8 @@ def make_instances_from_counts(self):
#print i,col[i]
#iterate over instances
for i in range(s):
- inst="" #start with empty seq
- for j in range(self.length): #iterate over positions
+ inst="" # start with empty seq
+ for j in range(self.length): # iterate over positions
inst+=col[j][i]
#print i,inst
inst=Seq(inst,self.alphabet)
@@ -1013,7 +1013,7 @@ def weblogo(self,fname,format="PNG",version="2.8.2", **kwds):
def _to_transfac(self):
"""Write the representation of a motif in TRANSFAC format
"""
- res="XX\nTY Motif\n" #header
+ res="XX\nTY Motif\n" # header
try:
res+="ID %s\n"%self.name
except:
@@ -1054,13 +1054,13 @@ def _to_horizontal_matrix(self,letters=None,normalized=True):
if letters is None:
letters=self.alphabet.letters
res=""
- if normalized: #output PWM
+ if normalized: # output PWM
self._pwm_is_current=False
mat=self.pwm(laplace=False)
for a in letters:
res+="\t".join([str(mat[i][a]) for i in range(self.length)])
res+="\n"
- else: #output counts
+ else: # output counts
if self.counts is None:
self.make_counts_from_instances()
mat=self.counts
View
6 Bio/Motif/__init__.py
@@ -73,13 +73,13 @@ def parse(handle,format):
parser=_parsers[format]
except KeyError:
- try: #not a true parser, try reader formats
+ try: # not a true parser, try reader formats
reader=_readers[format]
except:
raise ValueError("Wrong parser format")
- else: #we have a proper reader
+ else: # we have a proper reader
yield reader(handle)
- else: # we have a proper reader
+ else: # we have a proper reader
for m in parser(handle).motifs:
yield m
View
6 Bio/Nexus/Nexus.py
@@ -499,7 +499,7 @@ def __init__(self, line, title):
try:
#Assume matrix (all other command lines have been stripped of \n)
self.command, options = line.strip().split('\n', 1)
- except ValueError: #Not matrix
+ except ValueError: # Not matrix
#self.command,options=line.split(' ',1) #no: could be tab or spaces (translate...)
self.command=line.split()[0]
options=' '.join(line.split()[1:])
@@ -703,8 +703,8 @@ def _format(self,options):
self.unambiguous_letters=copy.deepcopy(IUPACData.unambiguous_rna_letters)
elif self.datatype=='protein':
self.alphabet=copy.deepcopy(IUPAC.protein)
- self.ambiguous_values={'B':'DN','Z':'EQ','X':copy.deepcopy(IUPACData.protein_letters)} # that's how PAUP handles it
- self.unambiguous_letters=copy.deepcopy(IUPACData.protein_letters)+'*' # stop-codon
+ self.ambiguous_values={'B':'DN','Z':'EQ','X':copy.deepcopy(IUPACData.protein_letters)} # that's how PAUP handles it
+ self.unambiguous_letters=copy.deepcopy(IUPACData.protein_letters)+'*' # stop-codon
elif self.datatype=='standard':
raise NexusError('Datatype standard is not yet supported.')
#self.alphabet=None
View
4 Bio/Nexus/Trees.py
@@ -448,7 +448,7 @@ def is_bifurcating(self,node=None):
"""Return True if tree downstream of node is strictly bifurcating."""
if node is None:
node=self.root
- if node==self.root and len(self.node(node).succ)==3: #root can be trifurcating, because it has no ancestor
+ if node==self.root and len(self.node(node).succ)==3: # root can be trifurcating, because it has no ancestor
return self.is_bifurcating(self.node(node).succ[0]) and \
self.is_bifurcating(self.node(node).succ[1]) and \
self.is_bifurcating(self.node(node).succ[2])
@@ -613,7 +613,7 @@ def ladderize_nodes(nodes,ladderize=None):
def newickize(node,ladderize=None):
"""Convert a node tree to a newick tree recursively."""
- if not self.node(node).succ: #terminal
+ if not self.node(node).succ: # terminal
return self.node(node).data.taxon+make_info_string(self.node(node).data,terminal=True)
else:
succnodes=ladderize_nodes(self.node(node).succ,ladderize=ladderize)
View
2  Bio/Phylo/NewickIO.py
@@ -183,7 +183,7 @@ def to_strings(self, confidence_as_branch_length=False,
def newickize(clade):
"""Convert a node tree to a Newick tree string, recursively."""
- if clade.is_terminal(): #terminal
+ if clade.is_terminal(): # terminal
return ((clade.name or '')
+ make_info_string(clade, terminal=True))
else:
View
2  Bio/Phylo/PAML/baseml.py
@@ -68,7 +68,7 @@ def write_ctl_file(self):
"""
# Make sure all paths are relative to the working directory
self._set_rel_paths()
- if True: #Dummy statement to preserve indentation for diff
+ if True: # Dummy statement to preserve indentation for diff
ctl_handle = open(self.ctl_file, 'w')
ctl_handle.write("seqfile = %s\n" % self._rel_alignment)
ctl_handle.write("outfile = %s\n" % self._rel_out_file)
View
2  Bio/Phylo/PAML/codeml.py
@@ -74,7 +74,7 @@ def write_ctl_file(self):
"""
# Make sure all paths are relative to the working directory
self._set_rel_paths()
- if True: #Dummy statement to preserve indentation for diff
+ if True: # Dummy statement to preserve indentation for diff
ctl_handle = open(self.ctl_file, 'w')
ctl_handle.write("seqfile = %s\n" % self._rel_alignment)
ctl_handle.write("outfile = %s\n" % self._rel_out_file)
View
2  Bio/Phylo/PAML/yn00.py
@@ -43,7 +43,7 @@ def write_ctl_file(self):
"""
# Make sure all paths are relative to the working directory
self._set_rel_paths()
- if True: #Dummy statement to preserve indentation for diff
+ if True: # Dummy statement to preserve indentation for diff
ctl_handle = open(self.ctl_file, 'w')
ctl_handle.write("seqfile = %s\n" % self._rel_alignment)
ctl_handle.write("outfile = %s\n" % self._rel_out_file)
View
2  Bio/PopGen/FDist/Async.py
@@ -165,7 +165,7 @@ def run_fdist(self, npops, nsamples, fst, sample_size,
try:
os.mkdir(full_path)
except OSError:
- pass #Its ok, if it is already there
+ pass # Its ok, if it is already there
if "ss_file" in os.listdir(data_dir):
shutil.copy(data_dir + os.sep + "ss_file", full_path)
id = self.async.run_program('fdist', {
View
8 Bio/PopGen/FDist/Utils.py
@@ -49,7 +49,7 @@ def _convert_genepop_to_fdist(gp_rec):
for al in indiv[1][lc_i]:
if al is not None and al not in alleles:
alleles.append(al)
- alleles.sort() #Dominance requires this
+ alleles.sort() # Dominance requires this
#here we go again (necessary...)
for pop_i in range(len(gp_rec.populations)):
@@ -59,7 +59,7 @@ def _convert_genepop_to_fdist(gp_rec):
if al is not None:
count = allele_counts.get(al, 0)
allele_counts[al] = count + 1
- allele_array = [] #We need the same order as in alleles
+ allele_array = [] # We need the same order as in alleles
for allele in alleles:
allele_array.append(allele_counts.get(allele, 0))
pop_data.append(allele_array)
@@ -114,7 +114,7 @@ def init_pop():
report_pops(num_pops)
curr_pop = init_pop()
lParser = work_rec.get_individual()
- work_rec._handle.close() #TODO - Needs a proper fix
+ work_rec._handle.close() # TODO - Needs a proper fix
pops.append(curr_pop)
fd_rec.num_pops = num_pops
for loci_pos in range(num_loci):
@@ -173,7 +173,7 @@ def countPops(rec):
alleles.sort()
def process_pop(pop_data, alleles, allele_counts):
- allele_array = [] #We need the same order as in alleles
+ allele_array = [] # We need the same order as in alleles
for allele in alleles:
allele_array.append(allele_counts.get(allele, 0))
pop_data.append(allele_array)
View
2  Bio/PopGen/FDist/__init__.py
@@ -68,7 +68,7 @@ def __init__(self):
self.loci_data = []
def __str__(self):
- rep = ['0\n'] #We only export in 0 format, even if originally was 1
+ rep = ['0\n'] # We only export in 0 format, even if originally was 1
rep.append(str(self.num_pops) + '\n')
rep.append(str(self.num_loci) + '\n')
rep.append('\n')
View
2  Bio/PopGen/GenePop/Controller.py
@@ -263,7 +263,7 @@ def _run_genepop(self, extensions, option, fname, opts={}):
self.controller.set_input(fname)
for opt in opts:
self.controller.set_parameter(opt, opt+"="+str(opts[opt]))
- self.controller() #checks error level is zero
+ self.controller() # checks error level is zero
self._remove_garbage(None)
return
View
4 Bio/PopGen/GenePop/EasyController.py
@@ -24,8 +24,8 @@ def __init__(self, fname, genepop_dir = None):
"""
self._fname = fname
self._controller = GenePopController(genepop_dir)
- self.__fst_pair_locus = {} #More caches like this needed!
- self.__allele_frequency = {} #More caches like this needed!
+ self.__fst_pair_locus = {} # More caches like this needed!
+ self.__allele_frequency = {} # More caches like this needed!
def get_basic_info(self):
f=open(self._fname)
View
4 Bio/PopGen/GenePop/LargeFileParser.py
@@ -22,7 +22,7 @@ def get_indiv(line):
indiv_name, marker_line = line.split(',')
markers = marker_line.replace('\t', ' ').split(' ')
markers = [marker for marker in markers if marker!='']
- if len(markers[0]) in [2, 4]: #2 digits per allele
+ if len(markers[0]) in [2, 4]: # 2 digits per allele
marker_len = 2
else:
marker_len = 3
@@ -30,7 +30,7 @@ def get_indiv(line):
allele_list = [(int(marker[0:marker_len]),
int(marker[marker_len:]))
for marker in markers]
- except ValueError: #Haploid
+ except ValueError: # Haploid
allele_list = [(int(marker[0:marker_len]),)
for marker in markers]
return indiv_name, allele_list, marker_len
View
4 Bio/PopGen/GenePop/__init__.py
@@ -31,7 +31,7 @@ def int_no_zero(val):
indiv_name, marker_line = line.split(',')
markers = marker_line.replace('\t', ' ').split(' ')
markers = [marker for marker in markers if marker!='']
- if len(markers[0]) in [2, 4]: #2 digits per allele
+ if len(markers[0]) in [2, 4]: # 2 digits per allele
marker_len = 2
else:
marker_len = 3
@@ -39,7 +39,7 @@ def int_no_zero(val):
allele_list = [(int_no_zero(marker[0:marker_len]),
int_no_zero(marker[marker_len:]))
for marker in markers]
- except ValueError: #Haploid
+ except ValueError: # Haploid
allele_list = [(int_no_zero(marker[0:marker_len]),)
for marker in markers]
return indiv_name, allele_list, marker_len
View
2  Bio/PopGen/SimCoal/Cache.py
@@ -46,7 +46,7 @@ def run_simcoal(self, par_file, num_sims, ploydi = '1', parDir = None):
try:
tf.close()
except NameError:
- pass #not opened in the first place, OK.
+ pass # not opened in the first place, OK.
scc = SimCoalController(self.simcoalDir)
scc.run_simcoal(par_file, num_sims, ploydi, parDir)
tf = tarfile.open(tar_name, 'w:bz2')
View
2  Bio/PopGen/SimCoal/Controller.py
@@ -21,7 +21,7 @@ def __init__(self, simcoal_dir):
The initializer checks for existance and executability of binaries.
"""
self.simcoal_dir = simcoal_dir
- self.os_name = os.name #remove this?
+ self.os_name = os.name # remove this?
dir_contents = os.listdir(self.simcoal_dir)
#We expect the tool to be installed as simcoal2(.exe)
#without any trailing version number.
View
24 Bio/Restriction/_Update/RestrictionCompiler.py
@@ -702,16 +702,16 @@ class NotFoundError(Exception):
def parseline(self, line):
line = [line[0]]+[line[1].upper()]+[int(i) for i in line[2:9]]+line[9:]
name = line[0].replace("-","_")
- site = line[1] # sequence of the recognition site
+ site = line[1] # sequence of the recognition site
dna = DNA(site)
- size = line[2] # size of the recognition site
+ size = line[2] # size of the recognition site
#
# Calculate the overhang.
#
- fst5 = line[5] # first site sense strand
- fst3 = line[6] # first site antisense strand
- scd5 = line[7] # second site sense strand
- scd3 = line[8] # second site antisense strand
+ fst5 = line[5] # first site sense strand
+ fst3 = line[6] # first site antisense strand
+ scd5 = line[7] # second site sense strand
+ scd3 = line[8] # second site antisense strand
#
# the overhang is the difference between the two cut
@@ -919,7 +919,7 @@ def get(self, block):
#
bl3 = block[3].strip()
if not bl3:
- bl3 = False # site is not methylable
+ bl3 = False # site is not methylable
return (block[0].strip(), bl3, block[5].strip())
def information_mixer(self, file1, file2, file3):
@@ -939,8 +939,8 @@ def information_mixer(self, file1, file2, file3):
line = (sitefile[i2].strip()).split()
name = line[0]
if name == bl[0]:
- line.append(bl[1]) # -> methylation
- line.append(bl[2]) # -> suppliers
+ line.append(bl[1]) # -> methylation
+ line.append(bl[2]) # -> suppliers
else:
bl = self.get(oldblock)
if line[0] == bl[0]:
@@ -953,8 +953,8 @@ def information_mixer(self, file1, file2, file3):
i2 += 1
try:
line = self.parseline(line)
- except OverhangError : # overhang error
- n = name # do not include the enzyme
+ except OverhangError: # overhang error
+ n = name # do not include the enzyme
if not bl[2]:
print 'Anyway, %s is not commercially available.\n' %n
else:
@@ -986,7 +986,7 @@ def information_mixer(self, file1, file2, file3):
# the data to produce the enzyme class are then stored in
# enzymedict.
#
- enzymedict[name] = line[1:] #element zero was the name
+ enzymedict[name] = line[1:] # element zero was the name
except IndexError:
pass
for i in supplier:
View
2  Bio/SCOP/Raf.py
@@ -258,7 +258,7 @@ def getAtoms(self, pdb_handle, out_handle):
#The set of residues that I have to find records for.
resSet = {}
for r in self.res:
- if r.atom=='X' : #Unknown residue type
+ if r.atom=='X': # Unknown residue type
continue
chainid = r.chainid
if chainid == '_':
View
4 Bio/SCOP/__init__.py
@@ -545,7 +545,7 @@ def toHieRecord(self):
"""Return an Hie.Record"""
rec = Hie.Record()
rec.sunid = str(self.sunid)
- if self.getParent() : #Not root node
+ if self.getParent(): # Not root node
rec.parent = str(self.getParent().sunid)
else:
rec.parent = '-'
@@ -662,7 +662,7 @@ def toClaRecord(self):
rec.sunid = self.sunid
n = self
- while n.sunid != 0: #Not root node
+ while n.sunid != 0: # Not root node
rec.hierarchy[n.type] = str(n.sunid)
n = n.getParent()
View
12 Bio/Seq.py
@@ -11,9 +11,9 @@
- U{http://biopython.org/DIST/docs/tutorial/Tutorial.html}
- U{http://biopython.org/DIST/docs/tutorial/Tutorial.pdf}
"""
-__docformat__ ="epytext en" #Don't just use plain text in epydoc API pages!
+__docformat__ ="epytext en" # Don't just use plain text in epydoc API pages!
-import string #for maketrans only
+import string # for maketrans only
import array
import sys
@@ -164,7 +164,7 @@ def __hash__(self):
See the __cmp__ documentation - we plan to change this!
"""
- return id(self) #Currently use object identity for equality testing
+ return id(self) # Currently use object identity for equality testing
def __cmp__(self, other):
"""Compare the sequence to another sequence or a string (README).
@@ -278,7 +278,7 @@ def __add__(self, other):
elif isinstance(other, basestring):
#other is a plain string - use the current alphabet
return self.__class__(str(self) + other, self.alphabet)
- from Bio.SeqRecord import SeqRecord #Lazy to avoid circular imports
+ from Bio.SeqRecord import SeqRecord # Lazy to avoid circular imports
if isinstance(other, SeqRecord):
#Get the SeqRecord's __radd__ to handle this
return NotImplemented
@@ -1081,7 +1081,7 @@ def ungap(self, gap=None):
elif not gap:
raise ValueError("Gap character not given and not defined in alphabet")
else:
- alpha = self.alphabet #modify!
+ alpha = self.alphabet # modify!
if len(gap)!=1 or not isinstance(gap, str):
raise ValueError("Unexpected gap character, %s" % repr(gap))
return Seq(str(self).replace(gap, ""), alpha)
@@ -1532,7 +1532,7 @@ def __init__(self, data, alphabet = Alphabet.generic_alphabet):
self.array_indicator = "u"
else:
self.array_indicator = "c"
- if isinstance(data, str): #TODO - What about unicode?
+ if isinstance(data, str): # TODO - What about unicode?
self.data = array.array(self.array_indicator, data)
else:
self.data = data # assumes the input is an array
View
21 Bio/SeqIO/UniprotIO.py
@@ -110,7 +110,7 @@ def _parse_protein(element):
"""Parse protein names (PRIVATE)."""
descr_set = False
for protein_element in element.getchildren():
- if protein_element.tag in [NS + 'recommendedName', NS + 'alternativeName']:#recommendedName tag are parsed before
+ if protein_element.tag in [NS + 'recommendedName', NS + 'alternativeName']: # recommendedName tag are parsed before
#use protein fields for name and description
for rec_name in protein_element.getchildren():
ann_key = '%s_%s' % (protein_element.tag.replace(NS, ''),
@@ -120,9 +120,9 @@ def _parse_protein(element):
self.ParsedSeqRecord.description = rec_name.text
descr_set = True
elif protein_element.tag == NS + 'component':
- pass #not parsed
+ pass # not parsed
elif protein_element.tag == NS + 'domain':
- pass #not parsed
+ pass # not parsed
def _parse_gene(element):
for genename_element in element.getchildren():
@@ -232,7 +232,7 @@ def _parse_comment(element):
"pharmaceutical",
"polymorphism",
"PTM",
- "RNA editing",#positions not parsed
+ "RNA editing", # positions not parsed
"similarity",
"subunit",
"tissue specificity",
@@ -273,16 +273,16 @@ def _parse_comment(element):
else:
start = int(loc_element.getiterator(NS +'begin')[0].attrib['position']) - 1
end = int(loc_element.getiterator(NS +'end')[0].attrib['position'])
- except :#undefined positions or erroneusly mapped
+ except: # undefined positions or erroneusly mapped
pass
mass = element.attrib['mass']
- method = element.attrib['mass'] #TODO - Check this, looks wrong!
+ method = element.attrib['mass'] # TODO - Check this, looks wrong!
if start == end == 0:
append_to_annotations(ann_key, 'undefined:%s|%s' % (mass, method))
else:
append_to_annotations(ann_key, '%s..%s:%s|%s' % (start, end, mass, method))
elif element.attrib['type'] == 'sequence caution':
- pass#not parsed: few information, complex structure
+ pass # not parsed: few information, complex structure
elif element.attrib['type'] == 'online information':
for link_element in element.getiterator(NS + 'link'):
ann_key = 'comment_%s' % element.attrib['type'].replace(' ', '')
@@ -382,7 +382,10 @@ def _parse_reference(element):
else:
tissues_str = ''
- reference.location = [] #locations cannot be parsed since they are actually written in free text inside scopes so all the references are put in the annotation.
+ # locations cannot be parsed since they are actually written in
+ # free text inside scopes so all the references are put in the
+ # annotation.
+ reference.location = []
reference.authors = ', '.join(authors)
if journal_name:
if pub_date and j_volume and j_first and j_last:
@@ -437,7 +440,7 @@ def _parse_feature(element):
try:
feature.qualifiers[feature_element.tag.replace(NS, '')] = feature_element.text
except:
- pass#skip unparsable tag
+ pass # skip unparsable tag
self.ParsedSeqRecord.features.append(feature)
def _parse_proteinExistence(element):
View
4 Bio/SeqRecord.py
@@ -6,7 +6,7 @@
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Represent a Sequence Record, a sequence with annotation."""
-__docformat__ = "epytext en" #Simple markup to show doctests nicely
+__docformat__ = "epytext en" # Simple markup to show doctests nicely
# NEEDS TO BE SYNCH WITH THE REST OF BIOPYTHON AND BIOPERL
# In particular, the SeqRecord and BioSQL.BioSeq.DBSeqRecord classes
@@ -1075,7 +1075,7 @@ def reverse_complement(self, id=False, name=False, description=False,
>>> print rc.id, rc.seq
Test ACGA
"""
- from Bio.Seq import MutableSeq #Lazy to avoid circular imports
+ from Bio.Seq import MutableSeq # Lazy to avoid circular imports
if isinstance(self.seq, MutableSeq):
#Currently the MutableSeq reverse complement is in situ
answer = SeqRecord(self.seq.toseq().reverse_complement())
View
8 Bio/SeqUtils/MeltingTemp.py
@@ -55,8 +55,8 @@ def Tm_staluc(s,dnac=50,saltc=50,rna=0):
#+Put thermodinamics table in a external file for users to change at will
#+Add support for danglings ends (see Le Novele. 2001) and mismatches.
- dh = 0 #DeltaH. Enthalpy
- ds = 0 #deltaS Entropy
+ dh = 0 # DeltaH. Enthalpy
+ ds = 0 # deltaS Entropy
def tercorr(stri):
deltah = 0
@@ -115,8 +115,8 @@ def overcount(st,p):
x = i + 1
return ocu
- R = 1.987 # universal gas constant in Cal/degrees C*Mol
- sup = str(s).upper() #turn any Seq object into a string (need index method)
+ R = 1.987 # universal gas constant in Cal/degrees C*Mol
+ sup = str(s).upper() # turn any Seq object into a string (need index method)
vsTC, vh = tercorr(sup)
vs = vsTC
View
4 Bio/SeqUtils/ProtParam.py
@@ -18,8 +18,8 @@
"""
import sys
-import ProtParamData #Local
-import IsoelectricPoint #Local
+import ProtParamData # Local
+import IsoelectricPoint # Local
from Bio.Seq import Seq
from Bio.Alphabet import IUPAC
from Bio.Data import IUPACData
View
2  Bio/SwissProt/__init__.py
@@ -240,7 +240,7 @@ def _read(handle):
reference.authors = " ".join(reference.authors).rstrip(";")
reference.title = " ".join(reference.title).rstrip(";")
if reference.title.startswith('"') and reference.title.endswith('"'):
- reference.title = reference.title[1:-1] #remove quotes
+ reference.title = reference.title[1:-1] # remove quotes
reference.location = " ".join(reference.location)
record.sequence = "".join(_sequence_lines)
return record
View
6 Bio/TogoWS/__init__.py
@@ -185,8 +185,8 @@ def search_iter(db, query, limit=None, batch=100):
remain = count
if limit is not None:
remain = min(remain, limit)
- offset = 1 #They don't use zero based counting
- prev_ids = [] #Just cache the last batch for error checking
+ offset = 1 # They don't use zero based counting
+ prev_ids = [] # Just cache the last batch for error checking
while remain:
batch = min(batch, remain)
#print "%r left, asking for %r" % (remain, batch)
@@ -305,7 +305,7 @@ def _open(url, post=None):
In the absense of clear guidelines, this function enforces a limit of
"up to three queries per second" to avoid abusing the TogoWS servers.
"""
- delay = 0.333333333 #one third of a second
+ delay = 0.333333333 # one third of a second
current = time.time()
wait = _open.previous + delay - current
if wait > 0:
View
2  Bio/__init__.py
@@ -10,7 +10,7 @@
http://biopython.org
"""
-__docformat__ = "epytext en" #not just plaintext
+__docformat__ = "epytext en" # not just plaintext
__version__ = "1.60+"
View
22 Bio/bgzf.py
@@ -223,7 +223,7 @@
import zlib
import struct
-import __builtin__ #to access the usual open function
+import __builtin__ # to access the usual open function
from Bio._py3k import _as_bytes, _as_string
@@ -405,18 +405,18 @@ def _load_bgzf_block(handle, text_mode=False):
x_len = 0
while x_len < extra_len:
subfield_id = handle.read(2)
- subfield_len = struct.unpack("<H", handle.read(2))[0] #uint16_t
+ subfield_len = struct.unpack("<H", handle.read(2))[0] # uint16_t
subfield_data = handle.read(subfield_len)
x_len += subfield_len + 4
if subfield_id == _bytes_BC:
assert subfield_len == 2, "Wrong BC payload length"
assert block_size is None, "Two BC subfields?"
- block_size = struct.unpack("<H", subfield_data)[0]+1 #uint16_t
+ block_size = struct.unpack("<H", subfield_data)[0] + 1 # uint16_t
assert x_len == extra_len, (x_len, extra_len)
assert block_size is not None, "Missing BC, this isn't a BGZF file!"
#Now comes the compressed data, CRC, and length of uncompressed data.
deflate_size = block_size - 1 - extra_len - 19
- d = zlib.decompressobj(-15) #Negative window size means no headers
+ d = zlib.decompressobj(-15) # Negative window size means no headers
data = d.decompress(handle.read(deflate_size)) + d.flush()
expected_crc = handle.read(4)
expected_size = struct.unpack("<I", handle.read(4))[0]
@@ -618,16 +618,16 @@ def read(self, size=-1):
#(lazy loading, don't load the next block unless we have too)
data = self._buffer[self._within_block_offset:self._within_block_offset + size]
self._within_block_offset += size
- assert data #Must be at least 1 byte
+ assert data # Must be at least 1 byte
return data
else:
data = self._buffer[self._within_block_offset:]
size -= len(data)
- self._load_block() #will reset offsets
+ self._load_block() # will reset offsets
#TODO - Test with corner case of an empty block followed by
#a non-empty block
if not self._buffer:
- return data #EOF
+ return data # EOF
elif size:
#TODO - Avoid recursion
return data + self.read(size)
@@ -641,9 +641,9 @@ def readline(self):
if i==-1:
#No newline, need to read in more data
data = self._buffer[self._within_block_offset:]
- self._load_block() #will reset offsets
+ self._load_block() # will reset offsets
if not self._buffer:
- return data #EOF
+ return data # EOF
else:
#TODO - Avoid recursion
return data + self.readline()
@@ -651,7 +651,7 @@ def readline(self):
#Found new line, but right at end of block (SPECIAL)
data = self._buffer[self._within_block_offset:]
#Must now load the next block to ensure tell() works
- self._load_block() #will reset offsets
+ self._load_block() # will reset offsets
assert data
return data
else:
@@ -724,7 +724,7 @@ def _write_block(self, block):
crc = struct.pack("<i", crc)
else:
crc = struct.pack("<I", crc)
- bsize = struct.pack("<H", len(compressed)+25) #includes -1
+ bsize = struct.pack("<H", len(compressed)+25) # includes -1
crc = struct.pack("<I", zlib.crc32(block) & 0xffffffffL)
uncompressed_length = struct.pack("<I", len(block))
#Fixed 16 bytes,
View
6 BioSQL/BioSeq.py
@@ -162,7 +162,7 @@ def _retrieve_seq(adaptor, primary_id):
del seq
del given_length
- moltype = moltype.lower() #might be upper case in database
+ moltype = moltype.lower() # might be upper case in database
#We have no way of knowing if these sequences will use IUPAC
#alphabets, and we certainly can't assume they are unambiguous!
if moltype == "dna":
@@ -271,7 +271,7 @@ def _retrieve_features(adaptor, primary_id):
lookup[location_id] = (dbname, v)
feature = SeqFeature.SeqFeature(type = seqfeature_type)
- feature._seqfeature_id = seqfeature_id #Store the key as a private property
+ feature._seqfeature_id = seqfeature_id # Store the key as a private property
feature.qualifiers = qualifiers
if len(locations) == 0:
pass
@@ -400,7 +400,7 @@ def _retrieve_reference(adaptor, primary_id):
#If the start/end are missing, reference.location is an empty list
if (start is not None) or (end is not None):
if start is not None:
- start -= 1 #python counting
+ start -= 1 # python counting
reference.location = [SeqFeature.FeatureLocation(start, end)]
#Don't replace the default "" with None.
if authors:
View
4 Doc/examples/ACT_example.py
@@ -48,8 +48,8 @@
print "Drawing matches..."
for i, crunch_file in enumerate(comparisons):
- q = genomes[i+1][0] #query file
- s = genomes[i][0] #subject file
+ q = genomes[i+1][0] # query file
+ s = genomes[i][0] # subject file
q_set = feature_sets[q]
s_set = feature_sets[s]
handle = open(crunch_file)
View
8 Doc/examples/Proux_et_al_2002_Figure_6.py
@@ -81,10 +81,10 @@
B_vs_C = [
(42, "orf39", "lin2581"),
(31, "orf40", "lin2580"),
- (49, "orf41", "lin2579"), #terL
- (54, "orf42", "lin2578"), #portal
- (55, "orf43", "lin2577"), #protease
- (33, "orf44", "lin2576"), #mhp
+ (49, "orf41", "lin2579"), # terL
+ (54, "orf42", "lin2578"), # portal
+ (55, "orf43", "lin2577"), # protease
+ (33, "orf44", "lin2576"), # mhp
(51, "orf46", "lin2575"),
(33, "orf47", "lin2574"),
(40, "orf48", "lin2573"),
View
6 Scripts/SeqGui/SeqGui.py
@@ -162,20 +162,20 @@ def OnClear(self, event):
self.dest_text.Clear()
def translate(self, codon_table):
- seq = "".join(self.src_text.GetValue().split()) #remove whitespace
+ seq = "".join(self.src_text.GetValue().split()) # remove whitespace
print seq
self.dest_text.Clear()
self.dest_text.SetValue(translate(seq, table=codon_table,
to_stop=True))
def transcribe(self):
- seq = "".join(self.src_text.GetValue().split()) #remove whitespace
+ seq = "".join(self.src_text.GetValue().split()) # remove whitespace
print seq
self.dest_text.Clear()
self.dest_text.SetValue(transcribe(seq))
def back_transcribe(self):
- seq = "".join(self.src_text.GetValue().split()) #remove whitespace
+ seq = "".join(self.src_text.GetValue().split()) # remove whitespace
print seq
self.dest_text.Clear()
self.dest_text.SetValue(back_transcribe(seq))
Please sign in to comment.
Something went wrong with that request. Please try again.