Permalink
Browse files

Fix whitespace before and after operators (PEP8 E221 and E222).

Whitespace was left intact in places where it helps readability.
  • Loading branch information...
cbrueffer authored and peterjc committed Dec 4, 2012
1 parent 332494c commit 5192d13e01ff304f061f755e01a27d4e07b14a9a
Showing with 136 additions and 136 deletions.
  1. +3 −3 Bio/Affy/CelFile.py
  2. +1 −1 Bio/Align/AlignInfo.py
  3. +2 −2 Bio/AlignIO/FastaIO.py
  4. +3 −3 Bio/Alphabet/Reduced.py
  5. +2 −2 Bio/Crystal/__init__.py
  6. +2 −2 Bio/FSSP/__init__.py
  7. +3 −3 Bio/GA/Crossover/GeneralPoint.py
  8. +1 −1 Bio/GA/Crossover/Uniform.py
  9. +1 −1 Bio/GenBank/Record.py
  10. +1 −1 Bio/Geo/Record.py
  11. +1 −1 Bio/Graphics/GenomeDiagram/_GraphSet.py
  12. +2 −2 Bio/HMM/Trainer.py
  13. +1 −1 Bio/KEGG/Compound/__init__.py
  14. +1 −1 Bio/KEGG/Enzyme/__init__.py
  15. +2 −2 Bio/KEGG/__init__.py
  16. +1 −1 Bio/NMR/xpktools.py
  17. +1 −1 Bio/NaiveBayes.py
  18. +3 −3 Bio/Pathway/__init__.py
  19. +1 −1 Bio/PopGen/FDist/Async.py
  20. +1 −1 Bio/PopGen/FDist/Controller.py
  21. +4 −4 Bio/PopGen/GenePop/Controller.py
  22. +1 −1 Bio/PopGen/GenePop/EasyController.py
  23. +4 −4 Bio/PopGen/GenePop/FileParser.py
  24. +7 −7 Bio/PopGen/GenePop/LargeFileParser.py
  25. +5 −5 Bio/PopGen/GenePop/__init__.py
  26. +1 −1 Bio/PopGen/SimCoal/Template.py
  27. +1 −1 Bio/Restriction/PrintFormat.py
  28. +7 −7 Bio/Restriction/Restriction.py
  29. +2 −2 Bio/SCOP/Raf.py
  30. +1 −1 Bio/SCOP/__init__.py
  31. +1 −1 Bio/SearchIO/ExonerateIO/_base.py
  32. +1 −1 Bio/SearchIO/FastaIO.py
  33. +2 −2 Bio/SearchIO/_model/hsp.py
  34. +6 −6 Bio/SearchIO/_model/query.py
  35. +2 −2 Bio/Seq.py
  36. +1 −1 Bio/SeqUtils/CheckSum.py
  37. +3 −3 Bio/SeqUtils/__init__.py
  38. +1 −1 Bio/SubsMat/MatrixInfo.py
  39. +1 −1 Bio/SwissProt/__init__.py
  40. +1 −1 BioSQL/Loader.py
  41. +1 −1 Scripts/PDB/generate_three_to_one_dict.py
  42. +1 −1 Scripts/SeqGui/SeqGui.py
  43. +1 −1 Scripts/xbbtools/xbb_search.py
  44. +2 −2 Scripts/xbbtools/xbb_translations.py
  45. +2 −2 Tests/test_AlignIO.py
  46. +2 −2 Tests/test_AlignIO_FastaIO.py
  47. +25 −25 Tests/test_Cluster.py
  48. +2 −2 Tests/test_EmbossPhylipNew.py
  49. +2 −2 Tests/test_GACrossover.py
  50. +1 −1 Tests/test_GenomeDiagram.py
  51. +1 −1 Tests/test_Mafft_tool.py
  52. +3 −3 Tests/test_Muscle_tool.py
  53. +1 −1 Tests/test_PDB.py
  54. +2 −2 Tests/test_SeqIO.py
  55. +4 −4 Tests/test_TogoWS.py
  56. +1 −1 Tests/test_geo.py
View
@@ -42,7 +42,7 @@ def read(handle):
section = "HEADER"
elif line[:11]=="[INTENSITY]":
section = "INTENSITY"
- record.intensities = numpy.zeros((record.nrows, record.ncols))
+ record.intensities = numpy.zeros((record.nrows, record.ncols))
record.stdevs = numpy.zeros((record.nrows, record.ncols))
record.npix = numpy.zeros((record.nrows, record.ncols), int)
elif line[0]=="[":
@@ -58,7 +58,7 @@ def read(handle):
continue
words = line.split()
y, x = map(int, words[:2])
- record.intensities[x,y] = float(words[2])
+ record.intensities[x,y] = float(words[2])
record.stdevs[x,y] = float(words[3])
- record.npix[x,y] = int(words[4])
+ record.npix[x,y] = int(words[4])
return record
View
@@ -179,7 +179,7 @@ def _guess_consensus_alphabet(self, ambiguous):
#Now check its compatible with all the rest of the sequences
for record in self.alignment:
#Get the (un-gapped version of) the sequence's alphabet
- alt = Alphabet._get_base_alphabet(record.seq.alphabet)
+ alt = Alphabet._get_base_alphabet(record.seq.alphabet)
if not isinstance(alt, a.__class__):
raise ValueError("Alignment contains a sequence with \
an incompatible alphabet.")
View
@@ -45,13 +45,13 @@ def _extract_alignment_region(alignment_seq_with_flanking, annotation):
if int(annotation['al_start']) <= int(annotation['al_stop']):
start = int(annotation['al_start']) \
- display_start
- end = int(annotation['al_stop']) \
+ end = int(annotation['al_stop']) \
- display_start + 1
else:
#FASTA has flipped this sequence...
start = display_start \
- int(annotation['al_start'])
- end = display_start \
+ end = display_start \
- int(annotation['al_stop']) + 1
end += align_stripped.count("-")
assert 0 <= start and start < end and end <= len(align_stripped), \
View
@@ -79,7 +79,7 @@ class Murphy10(Alphabet.ProteinAlphabet):
size = 10
murphy_10 = Murphy10()
-murphy_8_tab = {"L": "L",
+murphy_8_tab = {"L": "L",
"V": "L",
"I": "L",
"M": "L",
@@ -105,7 +105,7 @@ class Murphy8(Alphabet.ProteinAlphabet):
size = 8
murphy_8 = Murphy8()
-murphy_4_tab = {"L": "L",
+murphy_4_tab = {"L": "L",
"V": "L",
"I": "L",
"M": "L",
@@ -157,7 +157,7 @@ class HPModel(Alphabet.ProteinAlphabet):
size = 2
hp_model = HPModel()
-pc_5_table = {"I": "A", # Aliphatic
+pc_5_table = {"I": "A", # Aliphatic
"V": "A",
"L": "A",
"F": "R", # Aromatic
View
@@ -231,15 +231,15 @@ def __repr__(self):
keys = self.data.keys()
keys.sort()
for key in keys:
- output = output + '%s : %s\n' % (key, self.data[ key ])
+ output = output + '%s : %s\n' % (key, self.data[ key ])
return output
def __str__(self):
output = ''
keys = self.data.keys()
keys.sort()
for key in keys:
- output = output + '%s : %s\n' % (key, self.data[ key ])
+ output = output + '%s : %s\n' % (key, self.data[ key ])
return output
def tostring(self):
View
@@ -80,7 +80,7 @@ def __repr__(self):
outstring = self.aa+self.ss.lower()
return outstring
- __str__ = __repr__
+ __str__ = __repr__
@@ -129,7 +129,7 @@ def __init__(self,in_fff_rec):
# print in_fff_rec
self.abs_res_num = int(in_fff_rec[fssp_rec.align.abs_res_num])
self.pdb_res_num = in_fff_rec[fssp_rec.align.pdb_res_num].strip()
- self.chain_id = in_fff_rec[fssp_rec.align.chain_id]
+ self.chain_id = in_fff_rec[fssp_rec.align.chain_id]
if self.chain_id == ' ':
self.chain_id = '0'
self.res_name = in_fff_rec[fssp_rec.align.res_name]
@@ -49,7 +49,7 @@ def __init__(self, points, crossover_prob = .1):
"""
self._crossover_prob = crossover_prob
- self._sym = points % 2 # odd n, gets a symmetry flag
+ self._sym = points % 2 # odd n, gets a symmetry flag
self._npoints = (points + self._sym)//2 # (N or N+1)//2
def do_crossover(self, org_1, org_2):
@@ -62,7 +62,7 @@ def do_crossover(self, org_1, org_2):
if crossover_chance <= self._crossover_prob:
# pre-compute bounds (len(genome))
- bound = (len(new_org[0].genome), len(new_org[1].genome))
+ bound = (len(new_org[0].genome), len(new_org[1].genome))
mbound = min(bound)
# can't have more than 0,x_0...x_n,bound locations
@@ -82,7 +82,7 @@ def do_crossover(self, org_1, org_2):
y_locs = self._generate_locs( bound[1] )
else:
y_locs = x_locs
- xlocs = self._generate_locs( bound[0] )
+ xlocs = self._generate_locs( bound[0] )
# copy new genome strings over
tmp = self._crossover(0, new_org, (x_locs,y_locs))
@@ -27,7 +27,7 @@ def __init__(self, crossover_prob = .1, uniform_prob = 0.7):
"""Initialize to do uniform crossover at the specified probability and frequency.
"""
self._crossover_prob = crossover_prob
- self._uniform_prob = uniform_prob
+ self._uniform_prob = uniform_prob
return
def do_crossover(self, org_1, org_2):
View
@@ -334,7 +334,7 @@ def _keywords_line(self):
"""
output = ""
if len(self.keywords) >= 0:
- output += Record.BASE_FORMAT % "KEYWORDS"
+ output += Record.BASE_FORMAT % "KEYWORDS"
keyword_info = ""
for keyword in self.keywords:
keyword_info += "%s; " % keyword
View
@@ -84,6 +84,6 @@ def __str__( self ):
def out_block( text, prefix = '' ):
output = ''
for j in range( 0, len( text ), 80 ):
- output = output + '%s%s\n' % ( prefix, text[ j: j + 80 ] )
+ output = output + '%s%s\n' % ( prefix, text[ j: j + 80 ] )
output = output + '\n'
return output
@@ -122,7 +122,7 @@ def new_graph(self, data, name=None, style='bar', color=colors.lightgreen,
id = self._next_id # get id number
graph = GraphData(id, data, name, style, color, altcolor, center)
graph.linewidth = linewidth
- self._graphs[id] = graph # add graph data
+ self._graphs[id] = graph # add graph data
self._next_id += 1 # increment next id
return graph
View
@@ -199,7 +199,7 @@ def train(self, training_seqs, stopping_criteria,
# calculate the forward and backward variables
DP = dp_method(self._markov_model, training_seq)
forward_var, seq_prob = DP.forward_algorithm()
- backward_var = DP.backward_algorithm()
+ backward_var = DP.backward_algorithm()
all_probabilities.append(seq_prob)
@@ -221,7 +221,7 @@ def train(self, training_seqs, stopping_criteria,
self._markov_model.transition_prob = ml_transitions
self._markov_model.emission_prob = ml_emissions
- cur_log_likelihood = self.log_likelihood(all_probabilities)
+ cur_log_likelihood = self.log_likelihood(all_probabilities)
# if we have previously calculated the log likelihood (ie.
# not the first round), see if we can finish
@@ -61,7 +61,7 @@ def __str__(self):
Returns a string representation of this Record.
"""
return self._entry() + \
- self._name() + \
+ self._name() + \
self._formula() + \
self._mass() + \
self._pathway() + \
@@ -80,7 +80,7 @@ def __str__(self):
Returns a string representation of this Record.
"""
return self._entry() + \
- self._name() + \
+ self._name() + \
self._classname() + \
self._sysname() + \
self._reaction() + \
View
@@ -33,9 +33,9 @@ def _wrap_kegg(line, max_width = KEGG_DATA_LENGTH, wrap_rule = _default_wrap):
"""
s = ""
wrapped_line = ""
- indent = " " * wrap_rule[0]
+ indent = " " * wrap_rule[0]
connect = wrap_rule[1]
- rules = wrap_rule[2:]
+ rules = wrap_rule[2:]
while 1:
if len(line) <= max_width:
wrapped_line = wrapped_line + line
View
@@ -26,7 +26,7 @@ class XpkEntry(object):
def __init__(self,entry,headline):
self.fields={} # Holds all fields from input line in a dictionary
# keys are data labels from the .xpk header
- datlist = entry.split()
+ datlist = entry.split()
headlist = headline.split()
i=0
View
@@ -72,7 +72,7 @@ def calculate(nb, observation, scale=0):
% (len(observation), nb.dimensionality))
# Calculate log P(observation|class) for every class.
- n = len(nb.classes)
+ n = len(nb.classes)
lp_observation_class = numpy.zeros(n) # array of log P(observation|class)
for i in range(n):
# log P(observation|class) = SUM_i log P(observation_i|class)
View
@@ -75,8 +75,8 @@ def __init__(self, reactants = {}, catalysts = [],
for r, value in reactants.iteritems():
if value == 0:
del self.reactants[r]
- self.catalysts = sorted(set(catalysts))
- self.data = data
+ self.catalysts = sorted(set(catalysts))
+ self.data = data
self.reversible = reversible
def __eq__(self, r):
@@ -107,7 +107,7 @@ def __repr__(self):
def __str__(self):
"""Returns a string representation of self."""
substrates = ""
- products = ""
+ products = ""
all_species = sorted(self.reactants)
for species in all_species:
stoch = self.reactants[species]
@@ -106,7 +106,7 @@ def monitor(self):
while(True):
sleep(1)
self.async.access_ds.acquire()
- keys = self.async.done.keys()[:]
+ keys = self.async.done.keys()[:]
self.async.access_ds.release()
for done in keys:
self.async.access_ds.acquire()
@@ -269,7 +269,7 @@ def run_cplot(self, ci= 0.95, data_dir='.', version = 1, smooth=0.04):
cplot_name = "cplot"
else:
cplot_name = "cplot2"
- os.system('cd ' + data_dir + ' && ' +
+ os.system('cd ' + data_dir + ' && ' +
self._get_path(cplot_name) + ' < ' + in_name + ' > ' + out_name)
os.remove(data_dir + os.sep + in_name)
os.remove(data_dir + os.sep + out_name)
@@ -609,13 +609,13 @@ def pop_parser(self):
while "Expected number of ho" not in l:
l = self.stream.readline()
- expHo = _gp_float(l[38:])
+ expHo = _gp_float(l[38:])
l = self.stream.readline()
- obsHo = _gp_int(l[38:])
+ obsHo = _gp_int(l[38:])
l = self.stream.readline()
- expHe = _gp_float(l[38:])
+ expHe = _gp_float(l[38:])
l = self.stream.readline()
- obsHe = _gp_int(l[38:])
+ obsHe = _gp_int(l[38:])
l = self.stream.readline()
while "Sample count" not in l:
@@ -157,7 +157,7 @@ def get_f_stats(self, locus_name):
Returns Fis(CW), Fst, Fit, Qintra, Qinter
"""
- loci_iter = self._controller.calc_fst_all(self._fname)[1]
+ loci_iter = self._controller.calc_fst_all(self._fname)[1]
for name, fis, fst, fit, qintra, qinter in loci_iter:
if name == locus_name:
return fis, fst, fit, qintra, qinter
@@ -62,9 +62,9 @@ class FileRecord(object):
"""
def __init__(self, fname):
- self.comment_line = ""
- self.loci_list = []
- self.fname = fname
+ self.comment_line = ""
+ self.loci_list = []
+ self.fname = fname
self.start_read()
def __str__(self):
@@ -74,7 +74,7 @@ def __str__(self):
Marker length will be 3.
"""
marker_len = 3
- rep = [self.comment_line + '\n']
+ rep = [self.comment_line + '\n']
rep.append('\n'.join(self.loci_list) + '\n')
current_pop = self.current_pop
current_ind = self.current_ind
@@ -86,13 +86,13 @@ class Record(object):
"""
def __init__(self, handle):
- self.handle = handle
- self.marker_len = 0
- self.comment_line = ""
- self.loci_list = []
- self.populations = []
- self.data_generator = None
- self.stack = []
+ self.handle = handle
+ self.marker_len = 0
+ self.comment_line = ""
+ self.loci_list = []
+ self.populations = []
+ self.data_generator = None
+ self.stack = []
def data_generator(self):
for handle in [self.stack, self.handle]:
@@ -121,11 +121,11 @@ class Record(object):
"""
def __init__(self):
- self.marker_len = 0
- self.comment_line = ""
- self.loci_list = []
- self.pop_list = []
- self.populations = []
+ self.marker_len = 0
+ self.comment_line = ""
+ self.loci_list = []
+ self.pop_list = []
+ self.populations = []
def __str__(self):
"""Returns (reconstructs) a GenePop textual representation.
@@ -37,7 +37,7 @@ def process_para(in_string, out_file_prefix, para_list, curr_values):
f = open(f_name + '.par', 'w')
#executed_template = template
executed_template = exec_template(template)
- clean_template = executed_template.replace('\r\n','\n').replace('\n\n','\n')
+ clean_template = executed_template.replace('\r\n','\n').replace('\n\n','\n')
f.write(clean_template)
f.close()
return [f_name]
@@ -260,7 +260,7 @@ def _make_number_only(self, ls, title, nc = [], s1 =''):
return title
ls.sort(lambda x,y : cmp(len(x[1]), len(y[1])))
iterator = iter(ls)
- cur_len = 1
+ cur_len = 1
new_sect = []
for name, sites in iterator:
l = len(sites)
Oops, something went wrong.

0 comments on commit 5192d13

Please sign in to comment.