Skip to content

Commit

Permalink
update docs
Browse files Browse the repository at this point in the history
  • Loading branch information
amaiya committed Mar 9, 2022
1 parent a14b30f commit 20da7eb
Show file tree
Hide file tree
Showing 13 changed files with 911 additions and 437 deletions.
24 changes: 21 additions & 3 deletions docs/core.html
Original file line number Diff line number Diff line change
Expand Up @@ -627,7 +627,7 @@ <h1 class="title">Module <code>ktrain.core</code></h1>
U.vprint(&#39;done.&#39;, verbose=verbose)
if show_plot:
U.vprint(&#39;Visually inspect loss plot and select learning rate associated with falling loss&#39;, verbose=verbose)
self.lr_plot()
self.lr_plot(suggest=suggest)
else:
U.vprint(&#39;Please invoke the Learner.lr_plot() method to visually inspect &#39;
&#39;the loss plot to help identify the maximal learning rate &#39;
Expand Down Expand Up @@ -1683,6 +1683,24 @@ <h1 class="title">Module <code>ktrain.core</code></h1>
from .text.ner.anago.layers import CRF
from .text.ner import crf_loss
custom_objects={&#39;CRF&#39;: CRF, &#39;crf_loss&#39;:crf_loss}
# save old te_model as backup
if preproc:
old_te_model = preproc.p.te_model
# load TransformerEmbedding model from fpath/hf folder
# if model_name is local_path, update it to reflect current predictor folder, since
# all model/tokenizer/config files should have been saved there by predictor.save

preproc.p.te_model = os.path.join(fpath, &#39;hf&#39;) if preproc.p.te_model else preproc.p.te_model
if preproc.p.te_model:
# te_model should point fpath/hf folder
try:
preproc.p.activate_transformer(preproc.p.te_model, layers=preproc.p.te_layers)
except:
# fall back to old model id or location if error for backwards compatibility
warnings.warn(f&#39;could not load TransformerEmbedding model from {preproc.p.te_model} - trying {old_te_model}&#39;)
preproc.p.te_model = old_te_model
preproc.p.activate_transformer(preproc.p.te_model, layers=preproc.p.te_layers)

elif (preproc and (isinstance(preproc, NodePreprocessor) or \
type(preproc).__name__ == &#39;NodePreprocessor&#39;)) or \
train_data and U.is_nodeclass(data=train_data):
Expand Down Expand Up @@ -3460,7 +3478,7 @@ <h3>Inherited members</h3>
U.vprint(&#39;done.&#39;, verbose=verbose)
if show_plot:
U.vprint(&#39;Visually inspect loss plot and select learning rate associated with falling loss&#39;, verbose=verbose)
self.lr_plot()
self.lr_plot(suggest=suggest)
else:
U.vprint(&#39;Please invoke the Learner.lr_plot() method to visually inspect &#39;
&#39;the loss plot to help identify the maximal learning rate &#39;
Expand Down Expand Up @@ -4697,7 +4715,7 @@ <h3>Methods</h3>
U.vprint(&#39;done.&#39;, verbose=verbose)
if show_plot:
U.vprint(&#39;Visually inspect loss plot and select learning rate associated with falling loss&#39;, verbose=verbose)
self.lr_plot()
self.lr_plot(suggest=suggest)
else:
U.vprint(&#39;Please invoke the Learner.lr_plot() method to visually inspect &#39;
&#39;the loss plot to help identify the maximal learning rate &#39;
Expand Down
141 changes: 124 additions & 17 deletions docs/lroptimize/lrfinder.html
Original file line number Diff line number Diff line change
Expand Up @@ -199,18 +199,46 @@ <h1 class="title">Module <code>ktrain.lroptimize.lrfinder</code></h1>
print(&#34;Failed to compute the gradients, there might not be enough points.\n&#34; +\
&#34;Plot displayed without suggestion.&#34;)
else:
valley = self.valley(self.lrs, self.losses)
mg = self.mg
ml = self.ml
print(&#39;Two possible suggestions for LR from plot:&#39;)
print(f&#34;\tMin numerical gradient: {self.lrs[mg]:.2E}&#34;)
print(f&#34;\tMin loss divided by 10: {self.lrs[ml]/10:.2E}&#34;)
ax.plot(self.lrs[mg],self.losses[mg], markersize=10,marker=&#39;o&#39;,color=&#39;red&#39;)
print(&#39;Three possible suggestions for LR from plot:&#39;)
print(f&#34;\tLongest valley (red): {self.lrs[valley]:.2E}&#34;)
print(f&#34;\tMin numerical gradient (purple): {self.lrs[mg]:.2E}&#34;)
print(f&#34;\tMin loss divided by 10 (omitted from plot): {self.lrs[ml]/10:.2E}&#34;)
ax.plot(self.lrs[valley],self.losses[valley], markersize=10,marker=&#39;o&#39;,color=&#39;red&#39;)
ax.plot(self.lrs[mg],self.losses[mg], markersize=10,marker=&#39;o&#39;,color=&#39;purple&#39;)
fig = plt.gcf()
plt.show()
if return_fig: return fig
return


def valley(self, lrs, losses):
&#34;&#34;&#34;
valley method for LR suggestions:
https://github.com/fastai/fastai/pull/3377
&#34;&#34;&#34;
n = len(losses)
max_start, max_end = 0,0

# find the longest valley
lds = [1]*n
for i in range(1,n):
for j in range(0,i):
if (losses[i] &lt; losses[j]) and (lds[i] &lt; lds[j] + 1):
lds[i] = lds[j] + 1
if lds[max_end] &lt; lds[i]:
max_end = i
max_start = max_end - lds[max_end]

sections = (max_end - max_start) / 3
idx = max_start + int(sections) + int(sections/2)

#return lrs[idx], (lrs[idx], losses[idx])
return idx


def _compute_stats(self):
&#34;&#34;&#34;
```
Expand All @@ -223,8 +251,9 @@ <h1 class="title">Module <code>ktrain.lroptimize.lrfinder</code></h1>
self.ml = np.argmin(self.losses)
try:
self.mg = (np.gradient(np.array(self.losses[32:self.ml]))).argmin()
except:
except Exception as e:
self.mg = None
warnings.warn(e)
return


Expand All @@ -234,6 +263,7 @@ <h1 class="title">Module <code>ktrain.lroptimize.lrfinder</code></h1>
Generates two numerical estimates of lr:
1. lr associated with minum numerical gradient (None if gradient computation fails)
2. lr associated with minimum loss divided by 10
3. lr associated with longest valley
Args:
tuple: (float, float)

Expand All @@ -246,7 +276,9 @@ <h1 class="title">Module <code>ktrain.lroptimize.lrfinder</code></h1>
if self.mg is not None:
lr1 = self.lrs[self.mg]
lr2 = self.lrs[self.ml]/10
return (lr1, lr2)
lr3 = self.lrs[self.valley(self.lrs, self.losses)]

return (lr1, lr2, lr3)


def find_called(self):
Expand Down Expand Up @@ -470,18 +502,46 @@ <h2 class="section-title" id="header-classes">Classes</h2>
print(&#34;Failed to compute the gradients, there might not be enough points.\n&#34; +\
&#34;Plot displayed without suggestion.&#34;)
else:
valley = self.valley(self.lrs, self.losses)
mg = self.mg
ml = self.ml
print(&#39;Two possible suggestions for LR from plot:&#39;)
print(f&#34;\tMin numerical gradient: {self.lrs[mg]:.2E}&#34;)
print(f&#34;\tMin loss divided by 10: {self.lrs[ml]/10:.2E}&#34;)
ax.plot(self.lrs[mg],self.losses[mg], markersize=10,marker=&#39;o&#39;,color=&#39;red&#39;)
print(&#39;Three possible suggestions for LR from plot:&#39;)
print(f&#34;\tLongest valley (red): {self.lrs[valley]:.2E}&#34;)
print(f&#34;\tMin numerical gradient (purple): {self.lrs[mg]:.2E}&#34;)
print(f&#34;\tMin loss divided by 10 (omitted from plot): {self.lrs[ml]/10:.2E}&#34;)
ax.plot(self.lrs[valley],self.losses[valley], markersize=10,marker=&#39;o&#39;,color=&#39;red&#39;)
ax.plot(self.lrs[mg],self.losses[mg], markersize=10,marker=&#39;o&#39;,color=&#39;purple&#39;)
fig = plt.gcf()
plt.show()
if return_fig: return fig
return


def valley(self, lrs, losses):
&#34;&#34;&#34;
valley method for LR suggestions:
https://github.com/fastai/fastai/pull/3377
&#34;&#34;&#34;
n = len(losses)
max_start, max_end = 0,0

# find the longest valley
lds = [1]*n
for i in range(1,n):
for j in range(0,i):
if (losses[i] &lt; losses[j]) and (lds[i] &lt; lds[j] + 1):
lds[i] = lds[j] + 1
if lds[max_end] &lt; lds[i]:
max_end = i
max_start = max_end - lds[max_end]

sections = (max_end - max_start) / 3
idx = max_start + int(sections) + int(sections/2)

#return lrs[idx], (lrs[idx], losses[idx])
return idx


def _compute_stats(self):
&#34;&#34;&#34;
```
Expand All @@ -494,8 +554,9 @@ <h2 class="section-title" id="header-classes">Classes</h2>
self.ml = np.argmin(self.losses)
try:
self.mg = (np.gradient(np.array(self.losses[32:self.ml]))).argmin()
except:
except Exception as e:
self.mg = None
warnings.warn(e)
return


Expand All @@ -505,6 +566,7 @@ <h2 class="section-title" id="header-classes">Classes</h2>
Generates two numerical estimates of lr:
1. lr associated with minum numerical gradient (None if gradient computation fails)
2. lr associated with minimum loss divided by 10
3. lr associated with longest valley
Args:
tuple: (float, float)

Expand All @@ -517,7 +579,9 @@ <h2 class="section-title" id="header-classes">Classes</h2>
if self.mg is not None:
lr1 = self.lrs[self.mg]
lr2 = self.lrs[self.ml]/10
return (lr1, lr2)
lr3 = self.lrs[self.valley(self.lrs, self.losses)]

return (lr1, lr2, lr3)


def find_called(self):
Expand Down Expand Up @@ -557,6 +621,7 @@ <h3>Methods</h3>
<div class="desc"><pre><code>Generates two numerical estimates of lr:
1. lr associated with minum numerical gradient (None if gradient computation fails)
2. lr associated with minimum loss divided by 10
3. lr associated with longest valley
Args:
tuple: (float, float)

Expand All @@ -572,6 +637,7 @@ <h3>Methods</h3>
Generates two numerical estimates of lr:
1. lr associated with minum numerical gradient (None if gradient computation fails)
2. lr associated with minimum loss divided by 10
3. lr associated with longest valley
Args:
tuple: (float, float)

Expand All @@ -584,7 +650,9 @@ <h3>Methods</h3>
if self.mg is not None:
lr1 = self.lrs[self.mg]
lr2 = self.lrs[self.ml]/10
return (lr1, lr2)</code></pre>
lr3 = self.lrs[self.valley(self.lrs, self.losses)]

return (lr1, lr2, lr3)</code></pre>
</details>
</dd>
<dt id="ktrain.lroptimize.lrfinder.LRFinder.find"><code class="name flex">
Expand Down Expand Up @@ -782,12 +850,15 @@ <h3>Methods</h3>
print(&#34;Failed to compute the gradients, there might not be enough points.\n&#34; +\
&#34;Plot displayed without suggestion.&#34;)
else:
valley = self.valley(self.lrs, self.losses)
mg = self.mg
ml = self.ml
print(&#39;Two possible suggestions for LR from plot:&#39;)
print(f&#34;\tMin numerical gradient: {self.lrs[mg]:.2E}&#34;)
print(f&#34;\tMin loss divided by 10: {self.lrs[ml]/10:.2E}&#34;)
ax.plot(self.lrs[mg],self.losses[mg], markersize=10,marker=&#39;o&#39;,color=&#39;red&#39;)
print(&#39;Three possible suggestions for LR from plot:&#39;)
print(f&#34;\tLongest valley (red): {self.lrs[valley]:.2E}&#34;)
print(f&#34;\tMin numerical gradient (purple): {self.lrs[mg]:.2E}&#34;)
print(f&#34;\tMin loss divided by 10 (omitted from plot): {self.lrs[ml]/10:.2E}&#34;)
ax.plot(self.lrs[valley],self.losses[valley], markersize=10,marker=&#39;o&#39;,color=&#39;red&#39;)
ax.plot(self.lrs[mg],self.losses[mg], markersize=10,marker=&#39;o&#39;,color=&#39;purple&#39;)
fig = plt.gcf()
plt.show()
if return_fig: return fig
Expand Down Expand Up @@ -833,6 +904,41 @@ <h3>Methods</h3>
plt.ylim(y_lim)</code></pre>
</details>
</dd>
<dt id="ktrain.lroptimize.lrfinder.LRFinder.valley"><code class="name flex">
<span>def <span class="ident">valley</span></span>(<span>self, lrs, losses)</span>
</code></dt>
<dd>
<div class="desc"><p>valley method for LR suggestions:
<a href="https://github.com/fastai/fastai/pull/3377">https://github.com/fastai/fastai/pull/3377</a></p></div>
<details class="source">
<summary>
<span>Expand source code</span>
</summary>
<pre><code class="python">def valley(self, lrs, losses):
&#34;&#34;&#34;
valley method for LR suggestions:
https://github.com/fastai/fastai/pull/3377
&#34;&#34;&#34;
n = len(losses)
max_start, max_end = 0,0

# find the longest valley
lds = [1]*n
for i in range(1,n):
for j in range(0,i):
if (losses[i] &lt; losses[j]) and (lds[i] &lt; lds[j] + 1):
lds[i] = lds[j] + 1
if lds[max_end] &lt; lds[i]:
max_end = i
max_start = max_end - lds[max_end]

sections = (max_end - max_start) / 3
idx = max_start + int(sections) + int(sections/2)

#return lrs[idx], (lrs[idx], losses[idx])
return idx</code></pre>
</details>
</dd>
</dl>
</dd>
</dl>
Expand Down Expand Up @@ -860,6 +966,7 @@ <h4><code><a title="ktrain.lroptimize.lrfinder.LRFinder" href="#ktrain.lroptimiz
<li><code><a title="ktrain.lroptimize.lrfinder.LRFinder.on_batch_end" href="#ktrain.lroptimize.lrfinder.LRFinder.on_batch_end">on_batch_end</a></code></li>
<li><code><a title="ktrain.lroptimize.lrfinder.LRFinder.plot_loss" href="#ktrain.lroptimize.lrfinder.LRFinder.plot_loss">plot_loss</a></code></li>
<li><code><a title="ktrain.lroptimize.lrfinder.LRFinder.plot_loss_change" href="#ktrain.lroptimize.lrfinder.LRFinder.plot_loss_change">plot_loss_change</a></code></li>
<li><code><a title="ktrain.lroptimize.lrfinder.LRFinder.valley" href="#ktrain.lroptimize.lrfinder.LRFinder.valley">valley</a></code></li>
</ul>
</li>
</ul>
Expand Down

0 comments on commit 20da7eb

Please sign in to comment.