Skip to content
This repository
Browse code

KDTree/Medline/NMR/NeuralNetwork: Some more PEP8 whitespace cleanup.

  • Loading branch information...
commit ee9fc123a0d183bed9bf384fb1323bb9b6deb43d 1 parent b28d709
Christian Brueffer authored December 11, 2012 peterjc committed December 18, 2012
84  Bio/KDTree/KDTree.py
@@ -21,8 +21,8 @@
21 21
 
22 22
 
23 23
 def _dist(p, q):
24  
-    diff=p-q
25  
-    return sqrt(sum(diff*diff))
  24
+    diff = p - q
  25
+    return sqrt(sum(diff * diff))
26 26
 
27 27
 
28 28
 def _neighbor_test(nr_points, dim, bucket_size, radius):
@@ -37,23 +37,23 @@ def _neighbor_test(nr_points, dim, bucket_size, radius):
37 37
     o radius - radius of search (typically 0.05 or so)
38 38
     """
39 39
     # KD tree search
40  
-    kdt=_CKDTree.KDTree(dim, bucket_size)
41  
-    coords=random((nr_points, dim))
  40
+    kdt = _CKDTree.KDTree(dim, bucket_size)
  41
+    coords = random((nr_points, dim))
42 42
     kdt.set_data(coords)
43 43
     neighbors = kdt.neighbor_search(radius)
44 44
     r = [neighbor.radius for neighbor in neighbors]
45 45
     if r is None:
46  
-        l1=0
  46
+        l1 = 0
47 47
     else:
48  
-        l1=len(r)
  48
+        l1 = len(r)
49 49
     # now do a slow search to compare results
50 50
     neighbors = kdt.neighbor_simple_search(radius)
51 51
     r = [neighbor.radius for neighbor in neighbors]
52 52
     if r is None:
53  
-        l2=0
  53
+        l2 = 0
54 54
     else:
55  
-        l2=len(r)
56  
-    if l1==l2:
  55
+        l2 = len(r)
  56
+    if l1 == l2:
57 57
         print "Passed."
58 58
     else:
59 59
         print "Not passed: %i != %i." % (l1, l2)
@@ -70,23 +70,23 @@ def _test(nr_points, dim, bucket_size, radius):
70 70
     o radius - radius of search (typically 0.05 or so)
71 71
     """
72 72
     # kd tree search
73  
-    kdt=_CKDTree.KDTree(dim, bucket_size)
74  
-    coords=random((nr_points, dim))
75  
-    center=coords[0]
  73
+    kdt = _CKDTree.KDTree(dim, bucket_size)
  74
+    coords = random((nr_points, dim))
  75
+    center = coords[0]
76 76
     kdt.set_data(coords)
77 77
     kdt.search_center_radius(center, radius)
78  
-    r=kdt.get_indices()
  78
+    r = kdt.get_indices()
79 79
     if r is None:
80  
-        l1=0
  80
+        l1 = 0
81 81
     else:
82  
-        l1=len(r)
83  
-    l2=0
  82
+        l1 = len(r)
  83
+    l2 = 0
84 84
     # now do a manual search to compare results
85 85
     for i in range(0, nr_points):
86  
-        p=coords[i]
87  
-        if _dist(p, center)<=radius:
88  
-            l2=l2+1
89  
-    if l1==l2:
  86
+        p = coords[i]
  87
+        if _dist(p, center) <= radius:
  88
+            l2 = l2 + 1
  89
+    if l1 == l2:
90 90
         print "Passed."
91 91
     else:
92 92
         print "Not passed: %i != %i." % (l1, l2)
@@ -126,9 +126,9 @@ class KDTree(object):
126 126
     """
127 127
 
128 128
     def __init__(self, dim, bucket_size=1):
129  
-        self.dim=dim
130  
-        self.kdt=_CKDTree.KDTree(dim, bucket_size)
131  
-        self.built=0
  129
+        self.dim = dim
  130
+        self.kdt = _CKDTree.KDTree(dim, bucket_size)
  131
+        self.built = 0
132 132
 
133 133
     # Set data
134 134
 
@@ -139,12 +139,12 @@ def set_coords(self, coords):
139 139
         have dimensionality D and there are N points, the coords
140 140
         array should be NxD dimensional.
141 141
         """
142  
-        if coords.min()<=-1e6 or coords.max()>=1e6:
  142
+        if coords.min() <= -1e6 or coords.max() >= 1e6:
143 143
                 raise Exception("Points should lie between -1e6 and 1e6")
144  
-        if len(coords.shape)!=2 or coords.shape[1]!=self.dim:
  144
+        if len(coords.shape) != 2 or coords.shape[1] != self.dim:
145 145
                 raise Exception("Expected a Nx%i NumPy array" % self.dim)
146 146
         self.kdt.set_data(coords)
147  
-        self.built=1
  147
+        self.built = 1
148 148
 
149 149
     # Fixed radius search for a point
150 150
 
@@ -157,7 +157,7 @@ def search(self, center, radius):
157 157
         """
158 158
         if not self.built:
159 159
                 raise Exception("No point set specified")
160  
-        if center.shape!=(self.dim,):
  160
+        if center.shape != (self.dim,):
161 161
                 raise Exception("Expected a %i-dimensional NumPy array"
162 162
                                 % self.dim)
163 163
         self.kdt.search_center_radius(center, radius)
@@ -168,7 +168,7 @@ def get_radii(self):
168 168
         Return the list of distances from center after
169 169
         a neighbor search.
170 170
         """
171  
-        a=self.kdt.get_radii()
  171
+        a = self.kdt.get_radii()
172 172
         if a is None:
173 173
             return []
174 174
         return a
@@ -182,7 +182,7 @@ def get_indices(self):
182 182
 
183 183
         For an index pair, the first index<second index.
184 184
         """
185  
-        a=self.kdt.get_indices()
  185
+        a = self.kdt.get_indices()
186 186
         if a is None:
187 187
             return []
188 188
         return a
@@ -219,16 +219,16 @@ def all_get_radii(self):
219 219
         """
220 220
         return [neighbor.radius for neighbor in self.neighbors]
221 221
 
222  
-if __name__=="__main__":
  222
+if __name__ == "__main__":
223 223
 
224  
-    nr_points=100000
225  
-    dim=3
226  
-    bucket_size=10
227  
-    query_radius=10
  224
+    nr_points = 100000
  225
+    dim = 3
  226
+    bucket_size = 10
  227
+    query_radius = 10
228 228
 
229  
-    coords=(200*random((nr_points, dim)))
  229
+    coords = (200 * random((nr_points, dim)))
230 230
 
231  
-    kdtree=KDTree(dim, bucket_size)
  231
+    kdtree = KDTree(dim, bucket_size)
232 232
 
233 233
     # enter coords
234 234
     kdtree.set_coords(coords)
@@ -242,8 +242,8 @@ def all_get_radii(self):
242 242
     # indices is a list of tuples. Each tuple contains the
243 243
     # two indices of a point pair within query_radius of
244 244
     # each other.
245  
-    indices=kdtree.all_get_indices()
246  
-    radii=kdtree.all_get_radii()
  245
+    indices = kdtree.all_get_indices()
  246
+    radii = kdtree.all_get_radii()
247 247
 
248 248
     print "Found %i point pairs within radius %f." % (len(indices), query_radius)
249 249
 
@@ -251,14 +251,14 @@ def all_get_radii(self):
251 251
 
252 252
     for i in range(0, 10):
253 253
         # pick a random center
254  
-        center=random(dim)
  254
+        center = random(dim)
255 255
 
256 256
         # search neighbors
257 257
         kdtree.search(center, query_radius)
258 258
 
259 259
         # get indices & radii of points
260  
-        indices=kdtree.get_indices()
261  
-        radii=kdtree.get_radii()
  260
+        indices = kdtree.get_indices()
  261
+        radii = kdtree.get_radii()
262 262
 
263  
-        x, y, z=center
  263
+        x, y, z = center
264 264
         print "Found %i points in radius %f around center (%.2f, %.2f, %.2f)." % (len(indices), query_radius, x, y, z)
2  Bio/Medline/__init__.py
@@ -127,7 +127,7 @@ def parse(handle):
127 127
     record = Record()
128 128
     finished = False
129 129
     while not finished:
130  
-        if line[:6]=="      ": # continuation line
  130
+        if line[:6] == "      ":  # continuation line
131 131
             record[key].append(line[6:])
132 132
         elif line:
133 133
             key = line[:4].rstrip()
39  Bio/NMR/NOEtools.py
@@ -9,7 +9,7 @@
9 9
 import xpktools
10 10
 
11 11
 
12  
-def predictNOE(peaklist,originNuc,detectedNuc,originResNum,toResNum):
  12
+def predictNOE(peaklist, originNuc, detectedNuc, originResNum, toResNum):
13 13
 # Predict the i->j NOE position based on self peak (diagonal) assignments
14 14
 #
15 15
 # example predictNOE(peaklist,"N15","H1",10,12)
@@ -23,30 +23,29 @@ def predictNOE(peaklist,originNuc,detectedNuc,originResNum,toResNum):
23 23
 #       assumption holds true.  Check your peaklist for errors and
24 24
 #       off diagonal peaks before attempting to use predictNOE.
25 25
 
26  
-    returnLine = "" # The modified line to be returned to the caller
  26
+    returnLine = ""  # The modified line to be returned to the caller
27 27
 
28 28
     datamap = _data_map(peaklist.datalabels)
29 29
 
30 30
     # Construct labels for keying into dictionary
31  
-    originAssCol = datamap[originNuc+".L"]+1
32  
-    originPPMCol = datamap[originNuc+".P"]+1
33  
-    detectedPPMCol = datamap[detectedNuc+".P"]+1
  31
+    originAssCol = datamap[originNuc + ".L"] + 1
  32
+    originPPMCol = datamap[originNuc + ".P"] + 1
  33
+    detectedPPMCol = datamap[detectedNuc + ".P"] + 1
34 34
 
35 35
     # Make a list of the data lines involving the detected
36 36
     if str(toResNum) in peaklist.residue_dict(detectedNuc) \
37 37
     and str(originResNum) in peaklist.residue_dict(detectedNuc):
38  
-        detectedList=peaklist.residue_dict(detectedNuc)[str(toResNum)]
39  
-        originList=peaklist.residue_dict(detectedNuc)[str(originResNum)]
40  
-        returnLine=detectedList[0]
  38
+        detectedList = peaklist.residue_dict(detectedNuc)[str(toResNum)]
  39
+        originList = peaklist.residue_dict(detectedNuc)[str(originResNum)]
  40
+        returnLine = detectedList[0]
41 41
 
42 42
         for line in detectedList:
43  
-
44  
-            aveDetectedPPM = _col_ave(detectedList,detectedPPMCol)
45  
-            aveOriginPPM = _col_ave(originList,originPPMCol)
  43
+            aveDetectedPPM = _col_ave(detectedList, detectedPPMCol)
  44
+            aveOriginPPM = _col_ave(originList, originPPMCol)
46 45
             originAss = originList[0].split()[originAssCol]
47 46
 
48  
-        returnLine=xpktools.replace_entry(returnLine,originAssCol+1,originAss)
49  
-        returnLine=xpktools.replace_entry(returnLine,originPPMCol+1,aveOriginPPM)
  47
+        returnLine = xpktools.replace_entry(returnLine, originAssCol + 1, originAss)
  48
+        returnLine = xpktools.replace_entry(returnLine, originPPMCol + 1, aveOriginPPM)
50 49
 
51 50
     return returnLine
52 51
 
@@ -54,9 +53,9 @@ def predictNOE(peaklist,originNuc,detectedNuc,originResNum,toResNum):
54 53
 def _data_map(labelline):
55 54
     # Generate a map between datalabels and column number
56 55
     #   based on a labelline
57  
-    i=0 # A counter
58  
-    datamap={} # The data map dictionary
59  
-    labelList=labelline.split() # Get the label line
  56
+    i = 0  # A counter
  57
+    datamap = {}  # The data map dictionary
  58
+    labelList = labelline.split()  # Get the label line
60 59
 
61 60
     # Get the column number for each label
62 61
     for i in range(len(labelList)):
@@ -65,11 +64,11 @@ def _data_map(labelline):
65 64
     return datamap
66 65
 
67 66
 
68  
-def _col_ave(list,col):
  67
+def _col_ave(list, col):
69 68
     # Compute average values from a particular column in a string list
70  
-    total=0
71  
-    n=0
  69
+    total = 0
  70
+    n = 0
72 71
     for element in list:
73 72
         total += float(element.split()[col])
74 73
         n += 1
75  
-    return total/n
  74
+    return total / n
168  Bio/NMR/xpktools.py
@@ -11,7 +11,7 @@
11 11
 import sys
12 12
 
13 13
 # * * * * * INITIALIZATIONS * * * * *
14  
-HEADERLEN=6
  14
+HEADERLEN = 6
15 15
 # * * * * * _______________ * * * * *
16 16
 
17 17
 
@@ -24,19 +24,19 @@ class XpkEntry(object):
24 24
     #   self.field["H1.P"] will return the H1.P entry for example.
25 25
     #   self.field["entrynum"] returns the line number (1st field of line)
26 26
 
27  
-    def __init__(self,entry,headline):
28  
-        self.fields={}   # Holds all fields from input line in a dictionary
29  
-                         # keys are data labels from the .xpk header
  27
+    def __init__(self, entry, headline):
  28
+        self.fields = {}  # Holds all fields from input line in a dictionary
  29
+                          # keys are data labels from the .xpk header
30 30
         datlist = entry.split()
31 31
         headlist = headline.split()
32 32
 
33  
-        i=0
34  
-        for i in range(len(datlist)-1):
35  
-            self.fields[headlist[i]]=datlist[i+1]
36  
-        i=i+1
  33
+        i = 0
  34
+        for i in range(len(datlist) - 1):
  35
+            self.fields[headlist[i]] = datlist[i+1]
  36
+        i = i + 1
37 37
 
38 38
         try:
39  
-            self.fields["entrynum"]=datlist[0]
  39
+            self.fields["entrynum"] = datlist[0]
40 40
         except IndexError, e:
41 41
             pass
42 42
 
@@ -45,68 +45,68 @@ class Peaklist(object):
45 45
     # This class reads in an entire xpk file and returns
46 46
     # Header file lines are available as attributes
47 47
     # The data lines are available as a list
48  
-    def __init__(self,infn):
  48
+    def __init__(self, infn):
49 49
 
50  
-        self.data=[]    # init the data line list
  50
+        self.data = []    # init the data line list
51 51
 
52  
-        infile=open(infn,'r')
  52
+        infile = open(infn, 'r')
53 53
 
54 54
         # Read in the header lines
55  
-        self.firstline=infile.readline().split("\012")[0]
56  
-        self.axislabels=infile.readline().split("\012")[0]
57  
-        self.dataset=infile.readline().split("\012")[0]
58  
-        self.sw=infile.readline().split("\012")[0]
59  
-        self.sf=infile.readline().split("\012")[0]
60  
-        self.datalabels=infile.readline().split("\012")[0]
  55
+        self.firstline = infile.readline().split("\012")[0]
  56
+        self.axislabels = infile.readline().split("\012")[0]
  57
+        self.dataset = infile.readline().split("\012")[0]
  58
+        self.sw = infile.readline().split("\012")[0]
  59
+        self.sf = infile.readline().split("\012")[0]
  60
+        self.datalabels = infile.readline().split("\012")[0]
61 61
 
62 62
         # Read in the data lines to a list
63  
-        line=infile.readline()
  63
+        line = infile.readline()
64 64
         while line:
65 65
             self.data.append(line.split("\012")[0])
66  
-        line=infile.readline()
  66
+        line = infile.readline()
67 67
 
68  
-    def residue_dict(self,index):
  68
+    def residue_dict(self, index):
69 69
         # Generate a dictionary idexed by residue number or a nucleus
70 70
         # The nucleus should be given as the input argument in the
71 71
         # same form as it appears in the xpk label line (H1, 15N for example)
72 72
 
73  
-        maxres=-1
74  
-        minres=-1
  73
+        maxres = -1
  74
+        minres = -1
75 75
 
76 76
         # Cast the data lines into the xpentry class
77  
-        self.dict={}
  77
+        self.dict = {}
78 78
         for i in range(len(self.data)):
79  
-            line=self.data[i]
80  
-            ind=XpkEntry(line,self.datalabels).fields[index+".L"]
81  
-            key=ind.split(".")[0]
  79
+            line = self.data[i]
  80
+            ind = XpkEntry(line, self.datalabels).fields[index + ".L"]
  81
+            key = ind.split(".")[0]
82 82
 
83  
-            res=int(key)
  83
+            res = int(key)
84 84
 
85  
-            if (maxres==-1):
86  
-                maxres=res
87  
-            if (minres==-1):
88  
-                minres=res
  85
+            if (maxres == -1):
  86
+                maxres = res
  87
+            if (minres == -1):
  88
+                minres = res
89 89
 
90  
-            maxres=max([maxres,res])
91  
-            minres=min([minres,res])
  90
+            maxres = max([maxres, res])
  91
+            minres = min([minres, res])
92 92
 
93 93
             if str(res) in self.dict:
94 94
                 # Append additional data to list under same key
95  
-                templst=self.dict[str(res)]
  95
+                templst = self.dict[str(res)]
96 96
                 templst.append(line)
97  
-                self.dict[str(res)]=templst
  97
+                self.dict[str(res)] = templst
98 98
 
99 99
             else:
100 100
                 # This is a new residue, start a new list
101  
-                self.dict[str(res)]=[line]  # Use [] for list type
  101
+                self.dict[str(res)] = [line]  # Use [] for list type
102 102
 
103  
-        self.dict["maxres"]=maxres
104  
-        self.dict["minres"]=minres
  103
+        self.dict["maxres"] = maxres
  104
+        self.dict["minres"] = minres
105 105
 
106 106
         return self.dict
107 107
 
108  
-    def write_header(self,outfn):
109  
-        outfile=_try_open_write(outfn)
  108
+    def write_header(self, outfn):
  109
+        outfile = _try_open_write(outfn)
110 110
         outfile.write(self.firstline)
111 111
         outfile.write("\012")
112 112
         outfile.write(self.axislabels)
@@ -125,7 +125,7 @@ def write_header(self,outfn):
125 125
 def _try_open_read(fn):
126 126
     # Try to open a file for reading.  Exit on IOError
127 127
     try:
128  
-        infile=open(fn,'r')
  128
+        infile = open(fn, 'r')
129 129
     except IOError, e:
130 130
         print "file", fn, "could not be opened for reading - quitting."
131 131
         sys.exit(0)
@@ -135,62 +135,62 @@ def _try_open_read(fn):
135 135
 def _try_open_write(fn):
136 136
     # Try to open a file for writing.  Exit on IOError
137 137
     try:
138  
-        infile=open(fn,'w')
  138
+        infile = open(fn, 'w')
139 139
     except IOError, e:
140 140
         print "file", fn, "could not be opened for writing - quitting."
141 141
         sys.exit(0)
142 142
     return infile
143 143
 
144 144
 
145  
-def replace_entry(line,fieldn,newentry):
  145
+def replace_entry(line, fieldn, newentry):
146 146
     # Replace an entry in a string by the field number
147 147
     # No padding is implemented currently.  Spacing will change if
148 148
     #  the original field entry and the new field entry are of
149 149
     #  different lengths.
150 150
     # This method depends on xpktools._find_start_entry
151 151
 
152  
-    start=_find_start_entry(line,fieldn)
153  
-    leng=len(line[start:].split()[0])
154  
-    newline=line[:start]+str(newentry)+line[(start+leng):]
  152
+    start = _find_start_entry(line, fieldn)
  153
+    leng = len(line[start:].split()[0])
  154
+    newline = line[:start] + str(newentry) + line[(start+leng):]
155 155
     return newline
156 156
 
157 157
 
158  
-def _find_start_entry(line,n):
  158
+def _find_start_entry(line, n):
159 159
     # find the starting point character for the n'th entry in
160 160
     # a space delimited line.  n is counted starting with 1
161 161
     # The n=1 field by definition begins at the first character
162 162
     # This function is used by replace_entry
163 163
 
164  
-    infield=0       # A flag that indicates that the counter is in a field
  164
+    infield = 0       # A flag that indicates that the counter is in a field
165 165
 
166  
-    if (n==1):
  166
+    if (n == 1):
167 167
         return 0        # Special case
168 168
 
169 169
     # Count the number of fields by counting spaces
170  
-    c=1
171  
-    leng=len(line)
  170
+    c = 1
  171
+    leng = len(line)
172 172
 
173 173
     # Initialize variables according to whether the first character
174 174
     #  is a space or a character
175  
-    if (line[0]==" "):
176  
-        infield=0
177  
-        field=0
  175
+    if (line[0] == " "):
  176
+        infield = 0
  177
+        field = 0
178 178
     else:
179  
-        infield=1
180  
-        field=1
  179
+        infield = 1
  180
+        field = 1
181 181
 
182  
-    while (c<leng and field<n):
  182
+    while (c < leng and field < n):
183 183
         if (infield):
184  
-            if (line[c]==" " and not (line[c-1]==" ")):
185  
-                infield=0
  184
+            if (line[c] == " " and not (line[c-1] == " ")):
  185
+                infield = 0
186 186
             else:
187  
-                if (not line[c]==" "):
188  
-                    infield=1
189  
-                    field=field+1
  187
+                if (not line[c] == " "):
  188
+                    infield = 1
  189
+                    field = field + 1
190 190
 
191  
-        c=c+1
  191
+        c = c + 1
192 192
 
193  
-    return c-1
  193
+    return c - 1
194 194
 
195 195
 
196 196
 def data_table(fn_list, datalabel, keyatom):
@@ -199,13 +199,13 @@ def data_table(fn_list, datalabel, keyatom):
199 199
     # <datalabel> and the index for the data table is by the
200 200
     # nucleus indicated by <keyatom>.
201 201
 
202  
-    outlist=[]
  202
+    outlist = []
203 203
 
204  
-    [dict_list,label_line_list]=_read_dicts(fn_list,keyatom)
  204
+    [dict_list, label_line_list] = _read_dicts(fn_list, keyatom)
205 205
 
206 206
     # Find global max and min residue numbers
207  
-    minr=dict_list[0]["minres"]
208  
-    maxr=dict_list[0]["maxres"]
  207
+    minr = dict_list[0]["minres"]
  208
+    maxr = dict_list[0]["maxres"]
209 209
 
210 210
     for dictionary in dict_list:
211 211
         if (maxr < dictionary["maxres"]):
@@ -213,37 +213,37 @@ def data_table(fn_list, datalabel, keyatom):
213 213
         if (minr > dictionary["minres"]):
214 214
             minr = dictionary["minres"]
215 215
 
216  
-    res=minr
  216
+    res = minr
217 217
     while res <= maxr:        # s.t. res numbers
218  
-        count=0
219  
-        line=str(res)
  218
+        count = 0
  219
+        line = str(res)
220 220
         for dictionary in dict_list:      # s.t. dictionaries
221  
-            label=label_line_list[count]
  221
+            label = label_line_list[count]
222 222
             if str(res) in dictionary:
223  
-                line=line+"\t"+XpkEntry(dictionary[str(res)][0],label).fields[datalabel]
  223
+                line = line + "\t" + XpkEntry(dictionary[str(res)][0], label).fields[datalabel]
224 224
             else:
225  
-                line=line+"\t"+"*"
226  
-            count=count+1
227  
-        line=line+"\n"
  225
+                line = line + "\t" + "*"
  226
+            count = count + 1
  227
+        line = line + "\n"
228 228
         outlist.append(line)
229  
-        res=res+1
  229
+        res = res + 1
230 230
 
231 231
     return outlist
232 232
 
233 233
 
234 234
 def _sort_keys(dictionary):
235  
-    keys=dictionary.keys()
236  
-    sorted_keys=keys.sort()
  235
+    keys = dictionary.keys()
  236
+    sorted_keys = keys.sort()
237 237
     return sorted_keys
238 238
 
239 239
 
240 240
 def _read_dicts(fn_list, keyatom):
241 241
     # Read multiple files into a list of residue dictionaries
242  
-    dict_list=[]
243  
-    datalabel_list=[]
  242
+    dict_list = []
  243
+    datalabel_list = []
244 244
     for fn in fn_list:
245  
-        peaklist=Peaklist(fn)
246  
-        dict=peaklist.residue_dict(keyatom)
  245
+        peaklist = Peaklist(fn)
  246
+        dict = peaklist.residue_dict(keyatom)
247 247
         dict_list.append(dict)
248 248
         datalabel_list.append(peaklist.datalabels)
249 249
 
4  Bio/NeuralNetwork/BackPropagation/Layer.py
@@ -146,7 +146,7 @@ def backpropagate(self, outputs, learning_rate, momentum):
146 146
 
147 147
 
148 148
 class HiddenLayer(AbstractLayer):
149  
-    def __init__(self, num_nodes, next_layer, activation = logistic_function):
  149
+    def __init__(self, num_nodes, next_layer, activation=logistic_function):
150 150
         """Initialize a hidden layer.
151 151
 
152 152
         Arguments:
@@ -258,7 +258,7 @@ def backpropagate(self, outputs, learning_rate, momentum):
258 258
 
259 259
 
260 260
 class OutputLayer(AbstractLayer):
261  
-    def __init__(self, num_nodes, activation = logistic_function):
  261
+    def __init__(self, num_nodes, activation=logistic_function):
262 262
         """Initialize the Output Layer.
263 263
 
264 264
         Arguments:
2  Bio/NeuralNetwork/Gene/Motif.py
@@ -18,7 +18,7 @@
18 18
 class MotifFinder(object):
19 19
     """Find motifs in a set of Sequence Records.
20 20
     """
21  
-    def __init__(self, alphabet_strict = 1):
  21
+    def __init__(self, alphabet_strict=1):
22 22
         """Initialize a finder to get motifs.
23 23
 
24 24
         Arguments:
4  Bio/NeuralNetwork/Gene/Pattern.py
@@ -19,7 +19,7 @@ class PatternIO(object):
19 19
     This just defines a simple persistance class for patterns, making
20 20
     it easy to write them to a file and read 'em back.
21 21
     """
22  
-    def __init__(self, alphabet = None):
  22
+    def __init__(self, alphabet=None):
23 23
         """Intialize the reader and writer class.
24 24
 
25 25
         Arguments:
@@ -218,7 +218,7 @@ def get_differing(self, top_num, bottom_num):
218 218
 
219 219
         return all_patterns
220 220
 
221  
-    def remove_polyA(self, at_percentage = .9):
  221
+    def remove_polyA(self, at_percentage=.9):
222 222
         """Remove patterns which are likely due to polyA tails from the lists.
223 223
 
224 224
         This is just a helper function to remove pattenrs which are likely
28  Bio/NeuralNetwork/Gene/Schema.py
@@ -166,11 +166,11 @@ class SchemaDNAAlphabet(Alphabet.Alphabet):
166 166
     """
167 167
     letters = ["G", "A", "T", "C", "*"]
168 168
 
169  
-    alphabet_matches = {"G" : "G",
170  
-                        "A" : "A",
171  
-                        "T" : "T",
172  
-                        "C" : "C",
173  
-                        "*" : "GATC"}
  169
+    alphabet_matches = {"G": "G",
  170
+                        "A": "A",
  171
+                        "T": "T",
  172
+                        "C": "C",
  173
+                        "*": "GATC"}
174 174
 
175 175
 # -- GA schema finder
176 176
 
@@ -186,7 +186,7 @@ class GeneticAlgorithmFinder(object):
186 186
     can be overridden easily by creating a GeneticAlgorithmFinder
187 187
     with a different alphabet.
188 188
     """
189  
-    def __init__(self, alphabet = SchemaDNAAlphabet()):
  189
+    def __init__(self, alphabet=SchemaDNAAlphabet()):
190 190
         """Initialize a finder to get schemas using Genetic Algorithms.
191 191
 
192 192
         Arguments:
@@ -215,8 +215,8 @@ def _set_up_genetic_algorithm(self):
215 215
         """
216 216
         self.motif_generator = RandomMotifGenerator(self.alphabet)
217 217
 
218  
-        self.mutator = SinglePositionMutation(mutation_rate = 0.1)
219  
-        self.crossover = SinglePointCrossover(crossover_prob = 0.25)
  218
+        self.mutator = SinglePositionMutation(mutation_rate=0.1)
  219
+        self.crossover = SinglePointCrossover(crossover_prob=0.25)
220 220
         self.repair = AmbiguousRepair(Schema(self.alphabet.alphabet_matches),
221 221
                                       4)
222 222
         self.base_selector = TournamentSelection(self.mutator, self.crossover,
@@ -370,7 +370,7 @@ def calculate_fitness(self, genome):
370 370
 class RandomMotifGenerator(object):
371 371
     """Generate a random motif within given parameters.
372 372
     """
373  
-    def __init__(self, alphabet, min_size = 12, max_size = 17):
  373
+    def __init__(self, alphabet, min_size=12, max_size=17):
374 374
         """Initialize with the motif parameters.
375 375
 
376 376
         Arguments:
@@ -408,7 +408,7 @@ class SimpleFinisher(object):
408 408
     GA has proceeded for a specified number of generations and has
409 409
     a given number of unique schema with positive fitness.
410 410
     """
411  
-    def __init__(self, num_schemas, min_generations = 100):
  411
+    def __init__(self, num_schemas, min_generations=100):
412 412
         """Initialize the finisher with its parameters.
413 413
 
414 414
         Arguments:
@@ -456,8 +456,8 @@ class SchemaFinder(object):
456 456
     in a set of DNA sequences, but the finder can be customized to deal
457 457
     with any type of data.
458 458
     """
459  
-    def __init__(self, num_schemas = 100,
460  
-                 schema_finder = GeneticAlgorithmFinder()):
  459
+    def __init__(self, num_schemas=100,
  460
+                 schema_finder=GeneticAlgorithmFinder()):
461 461
         self.num_schemas = num_schemas
462 462
         self._finder = schema_finder
463 463
 
@@ -536,7 +536,7 @@ def representation(self, sequence):
536 536
         return schema_counts
537 537
 
538 538
 
539  
-def matches_schema(pattern, schema, ambiguity_character = '*'):
  539
+def matches_schema(pattern, schema, ambiguity_character='*'):
540 540
     """Determine whether or not the given pattern matches the schema.
541 541
 
542 542
     Arguments:
@@ -567,7 +567,7 @@ def matches_schema(pattern, schema, ambiguity_character = '*'):
567 567
 class SchemaFactory(object):
568 568
     """Generate Schema from inputs of Motifs or Signatures.
569 569
     """
570  
-    def __init__(self, ambiguity_symbol = '*'):
  570
+    def __init__(self, ambiguity_symbol='*'):
571 571
         """Initialize the SchemaFactory
572 572
 
573 573
         Arguments:
2  Bio/NeuralNetwork/Gene/Signature.py
@@ -21,7 +21,7 @@ class SignatureFinder(object):
21 21
     two motifs separated by a gap. We need something a lot smarter than
22 22
     this to find more complicated signatures.
23 23
     """
24  
-    def __init__(self, alphabet_strict = 1):
  24
+    def __init__(self, alphabet_strict=1):
25 25
         """Initialize a finder to get signatures.
26 26
 
27 27
         Arguments:
4  Bio/NeuralNetwork/StopTraining.py
@@ -24,8 +24,8 @@ class ValidationIncreaseStop(object):
24 24
     set. This stopping criterion function will stop when the validation
25 25
     error increases.
26 26
     """
27  
-    def __init__(self, max_iterations = None, min_iterations = 0,
28  
-                 verbose = 0):
  27
+    def __init__(self, max_iterations=None, min_iterations=0,
  28
+                 verbose=0):
29 29
         """Initialize the stopping criterion class.
30 30
 
31 31
         Arguments:
6  Bio/NeuralNetwork/Training.py
@@ -9,7 +9,7 @@ class TrainingExample(object):
9 9
 
10 10
     XXX Do I really need this?
11 11
     """
12  
-    def __init__(self, inputs, outputs, name = ""):
  12
+    def __init__(self, inputs, outputs, name=""):
13 13
         self.name = name
14 14
         self.inputs = inputs
15 15
         self.outputs = outputs
@@ -34,7 +34,7 @@ class ExampleManager(object):
34 34
     provide a completely independent method of testing how well a network
35 35
     performs.
36 36
     """
37  
-    def __init__(self, training_percent = .4, validation_percent = .4):
  37
+    def __init__(self, training_percent=.4, validation_percent=.4):
38 38
         """Initialize the manager with the training examples.
39 39
 
40 40
         Arguments:
@@ -56,7 +56,7 @@ def __init__(self, training_percent = .4, validation_percent = .4):
56 56
         o test_examples - Examples for training purposes.
57 57
         """
58 58
         assert training_percent + validation_percent <= 1.0, \
59  
-               "Training and validation percentages more than 100 percent"
  59
+            "Training and validation percentages more than 100 percent"
60 60
 
61 61
         self.train_examples = []
62 62
         self.validation_examples = []

0 notes on commit ee9fc12

Please sign in to comment.
Something went wrong with that request. Please try again.