Permalink
Browse files

Distinguish continuation lines from the next logical line (PEP8 E125).

Peter: Merged four of Christian's commits into one as requested
  • Loading branch information...
1 parent 1bd24e2 commit d9500da69edc5b010f012b8e34ee8767d3018a14 @cbrueffer cbrueffer committed with peterjc Dec 5, 2012
View
@@ -345,8 +345,8 @@ def _get_base_replacements(self, skip_items = []):
# now create the dictionary
for first_letter in all_letters:
for second_letter in all_letters:
- if (first_letter not in skip_items and
- second_letter not in skip_items):
+ if first_letter not in skip_items and \
+ second_letter not in skip_items:
base_dictionary[(first_letter, second_letter)] = 0
return base_dictionary, skip_items
@@ -192,13 +192,13 @@ def _scan_header(self, uhandle, consumer):
# blastpgp may have a reference for compositional score matrix
# adjustment (see Bug 2502):
if attempt_read_and_call(
- uhandle, consumer.reference, start="Reference"):
+ uhandle, consumer.reference, start="Reference"):
read_and_call_until(uhandle, consumer.reference, blank=1)
read_and_call_while(uhandle, consumer.noevent, blank=1)
# blastpgp has a Reference for composition-based statistics.
if attempt_read_and_call(
- uhandle, consumer.reference, start="Reference"):
+ uhandle, consumer.reference, start="Reference"):
read_and_call_until(uhandle, consumer.reference, blank=1)
read_and_call_while(uhandle, consumer.noevent, blank=1)
@@ -253,10 +253,10 @@ def _scan_rounds(self, uhandle, consumer):
while not self._eof(uhandle):
line = safe_peekline(uhandle)
- if (not line.startswith('Searching') and
- not line.startswith('Results from round') and
- re.search(r"Score +E", line) is None and
- 'No hits found' not in line):
+ if not line.startswith('Searching') and \
+ not line.startswith('Results from round') and \
+ re.search(r"Score +E", line) is None and \
+ 'No hits found' not in line:
break
self._scan_descriptions(uhandle, consumer)
self._scan_alignments(uhandle, consumer)
@@ -334,8 +334,8 @@ def _scan_descriptions(self, uhandle, consumer):
# indicates that no descriptions follow, and we should go straight
# to the alignments.
if not attempt_read_and_call(
- uhandle, consumer.description_header,
- has_re=re.compile(r'Score +E')):
+ uhandle, consumer.description_header,
+ has_re=re.compile(r'Score +E')):
# Either case 2 or 3. Look for "No hits found".
attempt_read_and_call(uhandle, consumer.no_hits,
contains='No hits found')
View
@@ -60,8 +60,7 @@ def filter(sum_dict,align_dict,filter_attribute,low_bound, high_bound):
# new_align_dict = copy.copy(align_dict)
for prot_num in sum_dict:
attr_value = getattr(sum_dict[prot_num],filter_attribute)
- if (attr_value >= low_bound and
- attr_value <= high_bound):
+ if attr_value >= low_bound and attr_value <= high_bound:
new_sum_dict[prot_num] = sum_dict[prot_num]
prot_numbers = new_sum_dict.keys()
prot_numbers.sort()
View
@@ -634,7 +634,7 @@ def __str__(self):
# determine whether we can wrap on spaces
space_wrap = 1
for no_space_key in \
- Bio.GenBank._BaseGenBankConsumer.remove_space_keys:
+ Bio.GenBank._BaseGenBankConsumer.remove_space_keys:
if no_space_key in qualifier.key:
space_wrap = 0
@@ -104,8 +104,8 @@ def _draw_distributions(self, cur_drawing, start_x_pos, x_pos_change,
for y_drawing in range(int(num_y_drawings)):
# if we are on the last y position, we may not be able
# to fill all of the x columns
- if ((y_drawing + 1) * self.number_of_columns >
- len(self.distributions)):
+ if (y_drawing + 1) * self.number_of_columns > \
+ len(self.distributions):
num_x_drawings = len(self.distributions) - \
y_drawing * self.number_of_columns
else:
View
@@ -340,8 +340,8 @@ def allow_transition(self, from_state, to_state, probability = None,
"State %s was not found in the sequence alphabet" % state
# ensure that the states are not already set
- if ((from_state, to_state) not in self.transition_prob and
- (from_state, to_state) not in self.transition_pseudo):
+ if (from_state, to_state) not in self.transition_prob and \
+ (from_state, to_state) not in self.transition_pseudo:
# set the initial probability
if probability is None:
probability = 0
@@ -42,8 +42,7 @@ def write(self, pattern_list, output_handle):
"""
for pattern in pattern_list:
# deal with signatures, concatentate them with the separator
- if (isinstance(pattern, list) or
- isinstance(pattern, tuple)):
+ if isinstance(pattern, list) or isinstance(pattern, tuple):
string_pattern = self.separator.join(pattern)
# deal with the normal cases
else:
@@ -545,9 +545,9 @@ def matches_schema(pattern, schema, ambiguity_character = '*'):
# check each position, and return a non match if the schema and pattern
# are non ambiguous and don't match
for pos in range(len(pattern)):
- if (schema[pos] != ambiguity_character and
- pattern[pos] != ambiguity_character and
- pattern[pos] != schema[pos]):
+ if schema[pos] != ambiguity_character and \
+ pattern[pos] != ambiguity_character and \
+ pattern[pos] != schema[pos]:
return 0
View
@@ -266,8 +266,8 @@ def resid2code(res_id):
res_seq_icode = resid2code(res_id)
for r in chain:
if r.id[0] not in (' ', 'W'):
- if (resid2code(r.id) == res_seq_icode and
- r.get_list()[0].get_altloc() in tuple('A1 ')):
+ if resid2code(r.id) == res_seq_icode and \
+ r.get_list()[0].get_altloc() in tuple('A1 '):
res = r
break
View
@@ -66,7 +66,7 @@ def __init__(self, use_model_flag=0):
# private mathods
def _get_atom_line(self, atom, hetfield, segid, atom_number, resname,
- resseq, icode, chain_id, charge=" "):
+ resseq, icode, chain_id, charge=" "):
"""Returns an ATOM PDB string (PRIVATE)."""
if hetfield!=" ":
record_type="HETATM"
@@ -99,7 +99,7 @@ def _nice_case(line):
c=c.upper()
nextCap=0
elif c==' ' or c=='.' or c==',' or c==';' or c==':' or c=='\t' or\
- c=='-' or c=='_':
+ c=='-' or c=='_':
nextCap=1
s+=c
i+=1
@@ -75,7 +75,7 @@ class SplitFDist(object):
simulation.
"""
def __init__(self, report_fun = None,
- num_thr = 2, split_size = 1000, fdist_dir = '', ext = None):
+ num_thr = 2, split_size = 1000, fdist_dir = '', ext = None):
"""Constructor.
Parameters:
@@ -145,9 +145,9 @@ def release(self):
#You can only run a fdist case at a time
def run_fdist(self, npops, nsamples, fst, sample_size,
- mut = 0, num_sims = 20000, data_dir='.',
- is_dominant = False, theta = 0.06, beta = (0.25, 0.25),
- max_freq = 0.99):
+ mut = 0, num_sims = 20000, data_dir='.',
+ is_dominant = False, theta = 0.06, beta = (0.25, 0.25),
+ max_freq = 0.99):
"""Runs FDist.
Parameters can be seen on FDistController.run_fdist.
@@ -70,7 +70,7 @@ def _get_temp_file(self):
return strftime("%H%M%S") + str(int(clock()*100)) + str(randint(0,1000)) + str(self.tmp_idx)
def run_datacal(self, data_dir='.', version=1,
- crit_freq = 0.99, p = 0.5, beta= (0.25, 0.25)):
+ crit_freq = 0.99, p = 0.5, beta= (0.25, 0.25)):
"""Executes datacal.
data_dir - Where the data is found.
@@ -124,9 +124,9 @@ def _generate_intfile(self, data_dir):
inf.close()
def run_fdist(self, npops, nsamples, fst, sample_size,
- mut = 0, num_sims = 50000, data_dir='.',
- is_dominant = False, theta = 0.06, beta = (0.25, 0.25),
- max_freq = 0.99):
+ mut = 0, num_sims = 50000, data_dir='.',
+ is_dominant = False, theta = 0.06, beta = (0.25, 0.25),
+ max_freq = 0.99):
"""Executes (d)fdist.
Parameters:
@@ -198,10 +198,10 @@ def run_fdist(self, npops, nsamples, fst, sample_size,
return fst
def run_fdist_force_fst(self, npops, nsamples, fst, sample_size,
- mut = 0, num_sims = 50000, data_dir='.',
- try_runs = 5000, limit=0.001,
- is_dominant = False, theta = 0.06, beta = (0.25, 0.25),
- max_freq = 0.99):
+ mut = 0, num_sims = 50000, data_dir='.',
+ try_runs = 5000, limit=0.001,is_dominant = False,
+ theta = 0.06, beta = (0.25, 0.25),
+ max_freq = 0.99):
"""Executes fdist trying to force Fst.
Parameters:
@@ -258,7 +258,8 @@ def _run_genepop(self, extensions, option, fname, opts={}):
return
def _test_pop_hz_both(self, fname, type, ext, enum_test = True,
- dememorization = 10000, batches = 20, iterations = 5000):
+ dememorization = 10000, batches = 20,
+ iterations = 5000):
"""Hardy-Weinberg test for heterozygote deficiency/excess.
Returns a population iterator containg
@@ -276,7 +277,8 @@ def hw_func(self):
return _FileIterator(hw_func, f, fname + ext)
def _test_global_hz_both(self, fname, type, ext, enum_test = True,
- dememorization = 10000, batches = 20, iterations = 5000):
+ dememorization = 10000, batches = 20,
+ iterations = 5000):
"""Global Hardy-Weinberg test for heterozygote deficiency/excess.
Returns a triple with:
@@ -323,7 +325,8 @@ def hw_pop_func(self):
#1.1
def test_pop_hz_deficiency(self, fname, enum_test = True,
- dememorization = 10000, batches = 20, iterations = 5000):
+ dememorization = 10000, batches = 20,
+ iterations = 5000):
"""Hardy-Weinberg test for heterozygote deficiency.
Returns a population iterator containg
@@ -336,7 +339,8 @@ def test_pop_hz_deficiency(self, fname, enum_test = True,
#1.2
def test_pop_hz_excess(self, fname, enum_test = True,
- dememorization = 10000, batches = 20, iterations = 5000):
+ dememorization = 10000, batches = 20,
+ iterations = 5000):
"""Hardy-Weinberg test for heterozygote deficiency.
Returns a population iterator containg
@@ -349,7 +353,8 @@ def test_pop_hz_excess(self, fname, enum_test = True,
#1.3 P file
def test_pop_hz_prob(self, fname, ext, enum_test = False,
- dememorization = 10000, batches = 20, iterations = 5000):
+ dememorization = 10000, batches = 20,
+ iterations = 5000):
"""Hardy-Weinberg test based on probability.
Returns 2 iterators and a final tuple:
@@ -382,7 +387,8 @@ def hw_prob_pop_func(self):
#1.4
def test_global_hz_deficiency(self, fname, enum_test = True,
- dememorization = 10000, batches = 20, iterations = 5000):
+ dememorization = 10000, batches = 20,
+ iterations = 5000):
"""Global Hardy-Weinberg test for heterozygote deficiency.
Returns a triple with:
@@ -401,7 +407,8 @@ def test_global_hz_deficiency(self, fname, enum_test = True,
#1.5
def test_global_hz_excess(self, fname, enum_test = True,
- dememorization = 10000, batches = 20, iterations = 5000):
+ dememorization = 10000, batches = 20,
+ iterations = 5000):
"""Global Hardy-Weinberg test for heterozygote excess.
Returns a triple with:
@@ -420,7 +427,7 @@ def test_global_hz_excess(self, fname, enum_test = True,
#2.1
def test_ld(self, fname,
- dememorization = 10000, batches = 20, iterations = 5000):
+ dememorization = 10000, batches = 20, iterations = 5000):
opts = self._get_opts(dememorization, batches, iterations)
self._run_genepop([".DIS"], [2, 1], fname, opts)
@@ -474,22 +481,22 @@ def create_contingency_tables(self, fname):
#3.1 PR/GE files
def test_genic_diff_all(self, fname,
- dememorization = 10000, batches = 20, iterations = 5000):
+ dememorization = 10000, batches = 20, iterations = 5000):
raise NotImplementedError
#3.2 PR2/GE2 files
def test_genic_diff_pair(self, fname,
- dememorization = 10000, batches = 20, iterations = 5000):
+ dememorization = 10000, batches = 20, iterations = 5000):
raise NotImplementedError
#3.3 G files
def test_genotypic_diff_all(self, fname,
- dememorization = 10000, batches = 20, iterations = 5000):
+ dememorization = 10000, batches = 20, iterations = 5000):
raise NotImplementedError
#3.4 2G2 files
def test_genotypic_diff_pair(self, fname,
- dememorization = 10000, batches = 20, iterations = 5000):
+ dememorization = 10000, batches = 20, iterations = 5000):
raise NotImplementedError
#4
@@ -45,7 +45,7 @@ def test_hw_pop(self, pop_pos, test_type = "probability"):
return hw_res.next()
def test_hw_global(self, test_type = "deficiency", enum_test = True,
- dememorization = 10000, batches = 20, iterations = 5000):
+ dememorization = 10000, batches = 20, iterations = 5000):
if test_type=="deficiency":
pop_res, loc_res, all = self._controller.test_global_hz_deficiency(self._fname,
enum_test, dememorization, batches, iterations)
@@ -54,8 +54,8 @@ def test_hw_global(self, test_type = "deficiency", enum_test = True,
enum_test, dememorization, batches, iterations)
return list(pop_res), list(loc_res), all
- def test_ld_all_pair(self, locus1, locus2,
- dememorization = 10000, batches = 20, iterations = 5000):
+ def test_ld_all_pair(self, locus1, locus2, dememorization = 10000,
+ batches = 20, iterations = 5000):
all_ld = self._controller.test_ld(self._fname, dememorization, batches, iterations)[1]
for ld_case in all_ld:
(l1, l2), result = ld_case
@@ -125,7 +125,7 @@ def process_text(in_string, out_file_prefix, para_list, curr_values,
#sep is because of jython
def generate_model(par_stream, out_prefix, params,
- specific_processor = no_processor, out_dir = '.'):
+ specific_processor = no_processor, out_dir = '.'):
#prepare_dir()
text = par_stream.read()
out_file_prefix = sep.join([out_dir, out_prefix])
@@ -173,8 +173,8 @@ def _create_hsp(hid, qid, hspd):
continue
# try to set frame if there are translation in the alignment
if len(frag.aln_annotation) > 1 or \
- frag.query_strand == 0 or \
- ('vulgar_comp' in hspd and re.search(_RE_TRANS, hspd['vulgar_comp'])):
+ frag.query_strand == 0 or \
+ ('vulgar_comp' in hspd and re.search(_RE_TRANS, hspd['vulgar_comp'])):
_set_frame(frag)
frags.append(frag)
@@ -242,9 +242,9 @@ def _create_hits(self, hit_attrs, qid):
# break out of hsp parsing if there are no hits, it's the last hsp
# or it's the start of a new hit
if self.line.startswith(' [No targets detected that satisfy') or \
- self.line.startswith('Internal pipeline statistics summary:') or \
- self.line.startswith(' Alignments for each domain:') or \
- self.line.startswith('>>'):
+ self.line.startswith('Internal pipeline statistics summary:') or \
+ self.line.startswith(' Alignments for each domain:') or \
+ self.line.startswith('>>'):
hit_attr = hit_attrs.pop(0)
hit = Hit(hsp_list)
View
@@ -158,8 +158,8 @@ def end_tr( self ):
if( text[ 0 ] == ':' ):
text = text[ 1: ]
text = string.join( string.split( text ) )
- if( ( self.context == 'general_info' ) or \
- ( self.context == 'seq_info' ) ):
+ if self.context == 'general_info' or \
+ self.context == 'seq_info':
try:
contents = self.queue[ self.master_key ][ self.key_waiting ]
if isinstance(contents, list):
Oops, something went wrong.

0 comments on commit d9500da

Please sign in to comment.