From df687da06d9550236b67ca7c13071a0950aa99f8 Mon Sep 17 00:00:00 2001
From: seanmor5
Date: Fri, 10 May 2024 17:46:36 +0000
Subject: [PATCH] =?UTF-8?q?Deploying=20to=20gh-pages=20from=20@=20elixir-n?=
=?UTF-8?q?x/axon@1ccbeba57395712e47d61b3912956fb2a09f58a8=20=F0=9F=9A=80?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
.build | 5 +-
404.html | 2 +-
Axon.Activations.html | 642 ++++++++++-----------
Axon.CompileError.html | 2 +-
Axon.Display.html | 6 +-
Axon.Initializers.html | 352 ++++++------
Axon.Layers.html | 558 +++++++++---------
Axon.Loop.State.html | 22 +-
Axon.Loop.html | 302 +++++-----
Axon.LossScale.html | 4 +-
Axon.Losses.html | 508 ++++++++---------
Axon.Metrics.html | 136 ++---
Axon.MixedPrecision.html | 82 +--
Axon.ModelState.html | 556 ++++++++++++++++++
Axon.None.html | 2 +-
Axon.StatefulOutput.html | 2 +-
Axon.epub | Bin 419383 -> 418394 bytes
Axon.html | 783 +++++++++++---------------
accelerating_axon.html | 140 ++---
api-reference.html | 11 +-
complex_models.html | 86 +--
credit_card_fraud.html | 180 +++---
custom_layers.html | 116 ++--
custom_models_loss_optimizers.html | 560 +++++++++---------
dist/search_data-7F2B0842.js | 1 -
dist/search_data-BDD49AAD.js | 1 +
dist/sidebar_items-B66D7C0E.js | 1 -
dist/sidebar_items-D4AB84D3.js | 1 +
fashionmnist_autoencoder.html | 112 ++--
fashionmnist_vae.html | 522 ++++++++---------
guides.html | 2 +-
horses_or_humans.html | 184 +++---
instrumenting_loops_with_metrics.html | 386 ++++++-------
lstm_generation.html | 158 +++---
mnist.html | 54 +-
mnist_autoencoder_using_kino.html | 198 +++----
model_hooks.html | 556 +++++++++---------
multi_input_multi_output_models.html | 200 +++----
onnx_to_axon.html | 122 ++--
search.html | 4 +-
sequential_models.html | 164 +++---
training_and_inference_mode.html | 152 ++---
using_loop_event_handlers.html | 396 ++++++-------
writing_custom_event_handlers.html | 436 +++++++-------
writing_custom_metrics.html | 586 +++++++++----------
xor.html | 78 +--
your_first_axon_model.html | 32 +-
your_first_evaluation_loop.html | 214 +++----
your_first_training_loop.html | 372 ++++++------
49 files changed, 5203 insertions(+), 4786 deletions(-)
create mode 100644 Axon.ModelState.html
delete mode 100644 dist/search_data-7F2B0842.js
create mode 100644 dist/search_data-BDD49AAD.js
delete mode 100644 dist/sidebar_items-B66D7C0E.js
create mode 100644 dist/sidebar_items-D4AB84D3.js
diff --git a/.build b/.build
index 9f147e29..a84c0c28 100644
--- a/.build
+++ b/.build
@@ -34,6 +34,7 @@ Axon.LossScale.html
Axon.Losses.html
Axon.Metrics.html
Axon.MixedPrecision.html
+Axon.ModelState.html
Axon.None.html
Axon.StatefulOutput.html
Axon.html
@@ -71,8 +72,8 @@ dist/merriweather-latin-ext-300-normal-K6L27CZ5.woff2
dist/merriweather-vietnamese-300-italic-EHHNZPUO.woff2
dist/merriweather-vietnamese-300-normal-U376L4Z4.woff2
dist/remixicon-NKANDIL5.woff2
-dist/search_data-7F2B0842.js
-dist/sidebar_items-B66D7C0E.js
+dist/search_data-BDD49AAD.js
+dist/sidebar_items-D4AB84D3.js
fashionmnist_autoencoder.html
fashionmnist_vae.html
guides.html
diff --git a/404.html b/404.html
index 307a333b..b05e638f 100644
--- a/404.html
+++ b/404.html
@@ -16,7 +16,7 @@
-
+
diff --git a/Axon.Activations.html b/Axon.Activations.html
index 48b38d30..ea24bc8d 100644
--- a/Axon.Activations.html
+++ b/Axon.Activations.html
@@ -14,7 +14,7 @@
-
+
@@ -136,19 +136,19 @@
Activation functions.
Activation functions are element-wise, (typically) non-linear
functions called on the output of another layer, such as
a dense layer:
x
- |> dense ( weight , bias )
- |> relu ( )
Activation functions output the "activation" or how active
+|> dense ( weight , bias )
+ |> relu ( )
Activation functions output the "activation" or how active
a given layer's neurons are in learning a representation
of the data-generating distribution.
Some activations are commonly used as output activations. For
example softmax
is often used as the output in multiclass
classification problems because it returns a categorical
-probability distribution:
iex> Axon.Activations . softmax ( Nx . tensor ( [ [ 1 , 2 , 3 ] ] , type : { :f , 32 } ) )
- # Nx.Tensor <
- f32 [ 1 ] [ 3 ]
- [
- [ 0.09003057330846786 , 0.2447284758090973 , 0.6652409434318542 ]
- ]
- >
Other activations such as tanh
or sigmoid
are used because
+probability distribution:
iex> Axon.Activations . softmax ( Nx . tensor ( [ [ 1 , 2 , 3 ] ] , type : { :f , 32 } ) )
+ # Nx.Tensor <
+ f32 [ 1 ] [ 3 ]
+ [
+ [ 0.09003057330846786 , 0.2447284758090973 , 0.6652409434318542 ]
+ ]
+ >
Other activations such as tanh
or sigmoid
are used because
they have desirable properties, such as keeping the output
tensor constrained within a certain range.
Generally, the choice of activation function is arbitrary;
although some activations work better than others in certain
@@ -442,26 +442,26 @@
celu(x, opts \\ [])
Examples
-iex> Axon.Activations . celu ( Nx . tensor ( [ - 3.0 , - 2.0 , - 1.0 , 0.0 , 1.0 , 2.0 , 3.0 ] ) )
- # Nx.Tensor <
- f32 [ 7 ]
- [ - 0.9502129554748535 , - 0.8646647334098816 , - 0.6321205496788025 , 0.0 , 1.0 , 2.0 , 3.0 ]
- >
-
- iex> Axon.Activations . celu ( Nx . tensor ( [ [ - 1.0 , - 2.0 , - 3.0 ] , [ 1.0 , 2.0 , 3.0 ] ] , type : { :bf , 16 } ) )
- # Nx.Tensor <
- bf16 [ 2 ] [ 3 ]
- [
- [ - 0.62890625 , - 0.86328125 , - 0.94921875 ] ,
- [ 1.0 , 2.0 , 3.0 ]
- ]
- >
+iex> Axon.Activations . celu ( Nx . tensor ( [ - 3.0 , - 2.0 , - 1.0 , 0.0 , 1.0 , 2.0 , 3.0 ] ) )
+ # Nx.Tensor <
+ f32 [ 7 ]
+ [ - 0.9502129554748535 , - 0.8646647334098816 , - 0.6321205496788025 , 0.0 , 1.0 , 2.0 , 3.0 ]
+ >
+
+ iex> Axon.Activations . celu ( Nx . tensor ( [ [ - 1.0 , - 2.0 , - 3.0 ] , [ 1.0 , 2.0 , 3.0 ] ] , type : { :bf , 16 } ) )
+ # Nx.Tensor <
+ bf16 [ 2 ] [ 3 ]
+ [
+ [ - 0.62890625 , - 0.86328125 , - 0.94921875 ] ,
+ [ 1.0 , 2.0 , 3.0 ]
+ ]
+ >
Error cases
-iex> Axon.Activations . celu ( Nx . tensor ( [ 0.0 , 1.0 , 2.0 ] , type : { :f , 32 } ) , alpha : 0.0 )
+iex> Axon.Activations . celu ( Nx . tensor ( [ 0.0 , 1.0 , 2.0 ] , type : { :f , 32 } ) , alpha : 0.0 )
** (ArgumentError) :alpha must be non-zero in CELU activation
-iex> Axon.Activations . elu ( Nx . tensor ( [ - 3.0 , - 2.0 , - 1.0 , 0.0 , 1.0 , 2.0 , 3.0 ] ) )
- # Nx.Tensor <
- f32 [ 7 ]
- [ - 0.9502129554748535 , - 0.8646647334098816 , - 0.6321205496788025 , 0.0 , 1.0 , 2.0 , 3.0 ]
- >
-
- iex> Axon.Activations . elu ( Nx . tensor ( [ [ - 1.0 , - 2.0 , - 3.0 ] , [ 1.0 , 2.0 , 3.0 ] ] , type : { :bf , 16 } ) )
- # Nx.Tensor <
- bf16 [ 2 ] [ 3 ]
- [
- [ - 0.62890625 , - 0.86328125 , - 0.94921875 ] ,
- [ 1.0 , 2.0 , 3.0 ]
- ]
- >
+iex> Axon.Activations . elu ( Nx . tensor ( [ - 3.0 , - 2.0 , - 1.0 , 0.0 , 1.0 , 2.0 , 3.0 ] ) )
+ # Nx.Tensor <
+ f32 [ 7 ]
+ [ - 0.9502129554748535 , - 0.8646647334098816 , - 0.6321205496788025 , 0.0 , 1.0 , 2.0 , 3.0 ]
+ >
+
+ iex> Axon.Activations . elu ( Nx . tensor ( [ [ - 1.0 , - 2.0 , - 3.0 ] , [ 1.0 , 2.0 , 3.0 ] ] , type : { :bf , 16 } ) )
+ # Nx.Tensor <
+ bf16 [ 2 ] [ 3 ]
+ [
+ [ - 0.62890625 , - 0.86328125 , - 0.94921875 ] ,
+ [ 1.0 , 2.0 , 3.0 ]
+ ]
+ >
@@ -555,20 +555,20 @@ exp(x)
Examples
-iex> Axon.Activations . exp ( Nx . tensor ( [ - 3.0 , - 2.0 , - 1.0 , 0.0 , 1.0 , 2.0 , 3.0 ] , names : [ :data ] ) )
- # Nx.Tensor <
- f32 [ data : 7 ]
- [ 0.049787066876888275 , 0.1353352814912796 , 0.3678794503211975 , 1.0 , 2.7182817459106445 , 7.389056205749512 , 20.08553695678711 ]
- >
-
- iex> Axon.Activations . exp ( Nx . tensor ( [ [ - 1.0 , - 2.0 , - 3.0 ] , [ 1.0 , 2.0 , 3.0 ] ] , type : { :bf , 16 } , names : [ :batch , :data ] ) )
- # Nx.Tensor <
- bf16 [ batch : 2 ] [ data : 3 ]
- [
- [ 0.3671875 , 0.134765625 , 0.049560546875 ] ,
- [ 2.703125 , 7.375 , 20.0 ]
- ]
- >
+iex> Axon.Activations . exp ( Nx . tensor ( [ - 3.0 , - 2.0 , - 1.0 , 0.0 , 1.0 , 2.0 , 3.0 ] , names : [ :data ] ) )
+ # Nx.Tensor <
+ f32 [ data : 7 ]
+ [ 0.049787066876888275 , 0.1353352814912796 , 0.3678794503211975 , 1.0 , 2.7182817459106445 , 7.389056205749512 , 20.08553695678711 ]
+ >
+
+ iex> Axon.Activations . exp ( Nx . tensor ( [ [ - 1.0 , - 2.0 , - 3.0 ] , [ 1.0 , 2.0 , 3.0 ] ] , type : { :bf , 16 } , names : [ :batch , :data ] ) )
+ # Nx.Tensor <
+ bf16 [ batch : 2 ] [ data : 3 ]
+ [
+ [ 0.3671875 , 0.134765625 , 0.049560546875 ] ,
+ [ 2.703125 , 7.375 , 20.0 ]
+ ]
+ >
@@ -598,20 +598,20 @@ gelu(x)
Examples
-iex> Axon.Activations . gelu ( Nx . tensor ( [ - 3.0 , - 2.0 , - 1.0 , 0.0 , 1.0 , 2.0 , 3.0 ] , names : [ :data ] ) )
- # Nx.Tensor <
- f32 [ data : 7 ]
- [ - 0.0040496885776519775 , - 0.04550027847290039 , - 0.15865525603294373 , 0.0 , 0.8413447141647339 , 1.9544997215270996 , 2.995950222015381 ]
- >
-
- iex> Axon.Activations . gelu ( Nx . tensor ( [ [ - 1.0 , - 2.0 , - 3.0 ] , [ 1.0 , 2.0 , 3.0 ] ] , type : { :bf , 16 } , names : [ :batch , :data ] ) )
- # Nx.Tensor <
- bf16 [ batch : 2 ] [ data : 3 ]
- [
- [ - 0.16015625 , - 0.046875 , - 0.005859375 ] ,
- [ 0.83984375 , 1.953125 , 2.984375 ]
- ]
- >
+iex> Axon.Activations . gelu ( Nx . tensor ( [ - 3.0 , - 2.0 , - 1.0 , 0.0 , 1.0 , 2.0 , 3.0 ] , names : [ :data ] ) )
+ # Nx.Tensor <
+ f32 [ data : 7 ]
+ [ - 0.0040496885776519775 , - 0.04550027847290039 , - 0.15865525603294373 , 0.0 , 0.8413447141647339 , 1.9544997215270996 , 2.995950222015381 ]
+ >
+
+ iex> Axon.Activations . gelu ( Nx . tensor ( [ [ - 1.0 , - 2.0 , - 3.0 ] , [ 1.0 , 2.0 , 3.0 ] ] , type : { :bf , 16 } , names : [ :batch , :data ] ) )
+ # Nx.Tensor <
+ bf16 [ batch : 2 ] [ data : 3 ]
+ [
+ [ - 0.16015625 , - 0.046875 , - 0.005859375 ] ,
+ [ 0.83984375 , 1.953125 , 2.984375 ]
+ ]
+ >
@@ -647,20 +647,20 @@ hard_sigmoid(x, opts \\ [])
Examples
-iex> Axon.Activations . hard_sigmoid ( Nx . tensor ( [ - 3.0 , - 2.0 , - 1.0 , 0.0 , 1.0 , 2.0 , 3.0 ] , names : [ :data ] ) )
- # Nx.Tensor <
- f32 [ data : 7 ]
- [ 0.0 , 0.0 , 0.0 , 0.20000000298023224 , 0.4000000059604645 , 0.6000000238418579 , 0.800000011920929 ]
- >
-
- iex> Axon.Activations . hard_sigmoid ( Nx . tensor ( [ [ - 1.0 , - 2.0 , - 3.0 ] , [ 1.0 , 2.0 , 3.0 ] ] , type : { :bf , 16 } , names : [ :batch , :data ] ) )
- # Nx.Tensor <
- bf16 [ batch : 2 ] [ data : 3 ]
- [
- [ 7.781982421875e-4 , 0.0 , 0.0 ] ,
- [ 0.3984375 , 0.59765625 , 0.796875 ]
- ]
- >
+iex> Axon.Activations . hard_sigmoid ( Nx . tensor ( [ - 3.0 , - 2.0 , - 1.0 , 0.0 , 1.0 , 2.0 , 3.0 ] , names : [ :data ] ) )
+ # Nx.Tensor <
+ f32 [ data : 7 ]
+ [ 0.0 , 0.0 , 0.0 , 0.20000000298023224 , 0.4000000059604645 , 0.6000000238418579 , 0.800000011920929 ]
+ >
+
+ iex> Axon.Activations . hard_sigmoid ( Nx . tensor ( [ [ - 1.0 , - 2.0 , - 3.0 ] , [ 1.0 , 2.0 , 3.0 ] ] , type : { :bf , 16 } , names : [ :batch , :data ] ) )
+ # Nx.Tensor <
+ bf16 [ batch : 2 ] [ data : 3 ]
+ [
+ [ 7.781982421875e-4 , 0.0 , 0.0 ] ,
+ [ 0.3984375 , 0.59765625 , 0.796875 ]
+ ]
+ >
@@ -694,20 +694,20 @@ hard_silu(x, opts \\ [])
Examples
-iex> Axon.Activations . hard_silu ( Nx . tensor ( [ - 3.0 , - 2.0 , - 1.0 , 0.0 , 1.0 , 2.0 , 3.0 ] , names : [ :data ] ) )
- # Nx.Tensor <
- f32 [ data : 7 ]
- [ - 0.0 , - 0.0 , - 0.0 , 0.0 , 0.4000000059604645 , 1.2000000476837158 , 2.4000000953674316 ]
- >
-
- iex> Axon.Activations . hard_silu ( Nx . tensor ( [ [ - 1.0 , - 2.0 , - 3.0 ] , [ 1.0 , 2.0 , 3.0 ] ] , type : { :bf , 16 } , names : [ :batch , :data ] ) )
- # Nx.Tensor <
- bf16 [ batch : 2 ] [ data : 3 ]
- [
- [ - 7.781982421875e-4 , - 0.0 , - 0.0 ] ,
- [ 0.3984375 , 1.1953125 , 2.390625 ]
- ]
- >
+iex> Axon.Activations . hard_silu ( Nx . tensor ( [ - 3.0 , - 2.0 , - 1.0 , 0.0 , 1.0 , 2.0 , 3.0 ] , names : [ :data ] ) )
+ # Nx.Tensor <
+ f32 [ data : 7 ]
+ [ - 0.0 , - 0.0 , - 0.0 , 0.0 , 0.4000000059604645 , 1.2000000476837158 , 2.4000000953674316 ]
+ >
+
+ iex> Axon.Activations . hard_silu ( Nx . tensor ( [ [ - 1.0 , - 2.0 , - 3.0 ] , [ 1.0 , 2.0 , 3.0 ] ] , type : { :bf , 16 } , names : [ :batch , :data ] ) )
+ # Nx.Tensor <
+ bf16 [ batch : 2 ] [ data : 3 ]
+ [
+ [ - 7.781982421875e-4 , - 0.0 , - 0.0 ] ,
+ [ 0.3984375 , 1.1953125 , 2.390625 ]
+ ]
+ >
@@ -737,20 +737,20 @@ hard_tanh(x)
Examples
-iex> Axon.Activations . hard_tanh ( Nx . tensor ( [ - 3.0 , - 2.0 , - 1.0 , 0.0 , 1.0 , 2.0 , 3.0 ] , names : [ :data ] ) )
- # Nx.Tensor <
- f32 [ data : 7 ]
- [ - 1.0 , - 1.0 , - 1.0 , 0.0 , 1.0 , 1.0 , 1.0 ]
- >
-
- iex> Axon.Activations . hard_tanh ( Nx . tensor ( [ [ - 1.0 , - 2.0 , - 3.0 ] , [ 1.0 , 2.0 , 3.0 ] ] , type : { :bf , 16 } , names : [ :batch , :data ] ) )
- # Nx.Tensor <
- bf16 [ batch : 2 ] [ data : 3 ]
- [
- [ - 1.0 , - 1.0 , - 1.0 ] ,
- [ 1.0 , 1.0 , 1.0 ]
- ]
- >
+iex> Axon.Activations . hard_tanh ( Nx . tensor ( [ - 3.0 , - 2.0 , - 1.0 , 0.0 , 1.0 , 2.0 , 3.0 ] , names : [ :data ] ) )
+ # Nx.Tensor <
+ f32 [ data : 7 ]
+ [ - 1.0 , - 1.0 , - 1.0 , 0.0 , 1.0 , 1.0 , 1.0 ]
+ >
+
+ iex> Axon.Activations . hard_tanh ( Nx . tensor ( [ [ - 1.0 , - 2.0 , - 3.0 ] , [ 1.0 , 2.0 , 3.0 ] ] , type : { :bf , 16 } , names : [ :batch , :data ] ) )
+ # Nx.Tensor <
+ bf16 [ batch : 2 ] [ data : 3 ]
+ [
+ [ - 1.0 , - 1.0 , - 1.0 ] ,
+ [ 1.0 , 1.0 , 1.0 ]
+ ]
+ >
@@ -788,20 +788,20 @@ leaky_relu(x, opts \\ [])
Examples
-iex> Axon.Activations . leaky_relu ( Nx . tensor ( [ - 3.0 , - 2.0 , - 1.0 , 0.0 , 1.0 , 2.0 , 3.0 ] , names : [ :data ] ) , alpha : 0.5 )
- # Nx.Tensor <
- f32 [ data : 7 ]
- [ - 1.5 , - 1.0 , - 0.5 , 0.0 , 1.0 , 2.0 , 3.0 ]
- >
-
- iex> Axon.Activations . leaky_relu ( Nx . tensor ( [ [ - 1.0 , - 2.0 , - 3.0 ] , [ 1.0 , 2.0 , 3.0 ] ] , names : [ :batch , :data ] ) , alpha : 0.5 )
- # Nx.Tensor <
- f32 [ batch : 2 ] [ data : 3 ]
- [
- [ - 0.5 , - 1.0 , - 1.5 ] ,
- [ 1.0 , 2.0 , 3.0 ]
- ]
- >
+iex> Axon.Activations . leaky_relu ( Nx . tensor ( [ - 3.0 , - 2.0 , - 1.0 , 0.0 , 1.0 , 2.0 , 3.0 ] , names : [ :data ] ) , alpha : 0.5 )
+ # Nx.Tensor <
+ f32 [ data : 7 ]
+ [ - 1.5 , - 1.0 , - 0.5 , 0.0 , 1.0 , 2.0 , 3.0 ]
+ >
+
+ iex> Axon.Activations . leaky_relu ( Nx . tensor ( [ [ - 1.0 , - 2.0 , - 3.0 ] , [ 1.0 , 2.0 , 3.0 ] ] , names : [ :batch , :data ] ) , alpha : 0.5 )
+ # Nx.Tensor <
+ f32 [ batch : 2 ] [ data : 3 ]
+ [
+ [ - 0.5 , - 1.0 , - 1.5 ] ,
+ [ 1.0 , 2.0 , 3.0 ]
+ ]
+ >
@@ -831,20 +831,20 @@ linear(x)
Examples
-iex> Axon.Activations . linear ( Nx . tensor ( [ - 3.0 , - 2.0 , - 1.0 , 0.0 , 1.0 , 2.0 , 3.0 ] , names : [ :data ] ) )
- # Nx.Tensor <
- f32 [ data : 7 ]
- [ - 3.0 , - 2.0 , - 1.0 , 0.0 , 1.0 , 2.0 , 3.0 ]
- >
-
- iex> Axon.Activations . linear ( Nx . tensor ( [ [ - 1.0 , - 2.0 , - 3.0 ] , [ 1.0 , 2.0 , 3.0 ] ] , type : { :bf , 16 } , names : [ :batch , :data ] ) )
- # Nx.Tensor <
- bf16 [ batch : 2 ] [ data : 3 ]
- [
- [ - 1.0 , - 2.0 , - 3.0 ] ,
- [ 1.0 , 2.0 , 3.0 ]
- ]
- >
+iex> Axon.Activations . linear ( Nx . tensor ( [ - 3.0 , - 2.0 , - 1.0 , 0.0 , 1.0 , 2.0 , 3.0 ] , names : [ :data ] ) )
+ # Nx.Tensor <
+ f32 [ data : 7 ]
+ [ - 3.0 , - 2.0 , - 1.0 , 0.0 , 1.0 , 2.0 , 3.0 ]
+ >
+
+ iex> Axon.Activations . linear ( Nx . tensor ( [ [ - 1.0 , - 2.0 , - 3.0 ] , [ 1.0 , 2.0 , 3.0 ] ] , type : { :bf , 16 } , names : [ :batch , :data ] ) )
+ # Nx.Tensor <
+ bf16 [ batch : 2 ] [ data : 3 ]
+ [
+ [ - 1.0 , - 2.0 , - 3.0 ] ,
+ [ 1.0 , 2.0 , 3.0 ]
+ ]
+ >
@@ -874,20 +874,20 @@ log_sigmoid(x)
Examples
-iex> Axon.Activations . log_sigmoid ( Nx . tensor ( [ - 3.0 , - 2.0 , - 1.0 , 0.0 , 1.0 , 2.0 , 3.0 ] , type : { :f , 32 } , names : [ :data ] ) )
- # Nx.Tensor <
- f32 [ data : 7 ]
- [ - 3.0485873222351074 , - 2.1269280910491943 , - 1.3132617473602295 , - 0.6931471824645996 , - 0.3132616877555847 , - 0.12692801654338837 , - 0.04858734831213951 ]
- >
-
- iex> Axon.Activations . log_sigmoid ( Nx . tensor ( [ [ - 1.0 , - 2.0 , - 3.0 ] , [ 1.0 , 2.0 , 3.0 ] ] , type : { :bf , 16 } , names : [ :batch , :data ] ) )
- # Nx.Tensor <
- bf16 [ batch : 2 ] [ data : 3 ]
- [
- [ - 1.3125 , - 2.125 , - 3.046875 ] ,
- [ - 0.3125 , - 0.1259765625 , - 0.04833984375 ]
- ]
- >
+iex> Axon.Activations . log_sigmoid ( Nx . tensor ( [ - 3.0 , - 2.0 , - 1.0 , 0.0 , 1.0 , 2.0 , 3.0 ] , type : { :f , 32 } , names : [ :data ] ) )
+ # Nx.Tensor <
+ f32 [ data : 7 ]
+ [ - 3.0485873222351074 , - 2.1269280910491943 , - 1.3132617473602295 , - 0.6931471824645996 , - 0.3132616877555847 , - 0.12692801654338837 , - 0.04858734831213951 ]
+ >
+
+ iex> Axon.Activations . log_sigmoid ( Nx . tensor ( [ [ - 1.0 , - 2.0 , - 3.0 ] , [ 1.0 , 2.0 , 3.0 ] ] , type : { :bf , 16 } , names : [ :batch , :data ] ) )
+ # Nx.Tensor <
+ bf16 [ batch : 2 ] [ data : 3 ]
+ [
+ [ - 1.3125 , - 2.125 , - 3.046875 ] ,
+ [ - 0.3125 , - 0.1259765625 , - 0.04833984375 ]
+ ]
+ >
@@ -919,20 +919,20 @@ log_softmax(x, opts \\ [])
Examples
-iex> Axon.Activations . log_softmax ( Nx . tensor ( [ - 3.0 , - 2.0 , - 1.0 , 0.0 , 1.0 , 2.0 , 3.0 ] , type : { :f , 32 } , names : [ :data ] ) )
- # Nx.Tensor <
- f32 [ data : 7 ]
- [ - 6.457762718200684 , - 5.457762718200684 , - 4.457762718200684 , - 3.4577627182006836 , - 2.4577627182006836 , - 1.4577628374099731 , - 0.45776283740997314 ]
- >
-
- iex> Axon.Activations . log_softmax ( Nx . tensor ( [ [ - 1.0 , - 2.0 , - 3.0 ] , [ 1.0 , 2.0 , 3.0 ] ] , type : { :bf , 16 } , names : [ :batch , :data ] ) )
- # Nx.Tensor <
- bf16 [ batch : 2 ] [ data : 3 ]
- [
- [ - 0.404296875 , - 1.3984375 , - 2.390625 ] ,
- [ - 2.390625 , - 1.3984375 , - 0.404296875 ]
- ]
- >
+iex> Axon.Activations . log_softmax ( Nx . tensor ( [ - 3.0 , - 2.0 , - 1.0 , 0.0 , 1.0 , 2.0 , 3.0 ] , type : { :f , 32 } , names : [ :data ] ) )
+ # Nx.Tensor <
+ f32 [ data : 7 ]
+ [ - 6.457762718200684 , - 5.457762718200684 , - 4.457762718200684 , - 3.4577627182006836 , - 2.4577627182006836 , - 1.4577628374099731 , - 0.45776283740997314 ]
+ >
+
+ iex> Axon.Activations . log_softmax ( Nx . tensor ( [ [ - 1.0 , - 2.0 , - 3.0 ] , [ 1.0 , 2.0 , 3.0 ] ] , type : { :bf , 16 } , names : [ :batch , :data ] ) )
+ # Nx.Tensor <
+ bf16 [ batch : 2 ] [ data : 3 ]
+ [
+ [ - 0.404296875 , - 1.3984375 , - 2.390625 ] ,
+ [ - 2.390625 , - 1.3984375 , - 0.404296875 ]
+ ]
+ >
@@ -964,20 +964,20 @@ log_sumexp(x, opts \\ [])
Examples
-iex> Axon.Activations . log_sumexp ( Nx . tensor ( [ - 3.0 , - 2.0 , - 1.0 , 0.0 , 1.0 , 2.0 , 3.0 ] , names : [ :data ] ) )
- # Nx.Tensor <
- f32 [ data : 1 ]
- [ 3.4577627182006836 ]
- >
-
- iex> Axon.Activations . log_sumexp ( Nx . tensor ( [ [ - 1.0 , - 2.0 , - 3.0 ] , [ 1.0 , 2.0 , 3.0 ] ] , type : { :bf , 16 } , names : [ :batch , :data ] ) )
- # Nx.Tensor <
- bf16 [ batch : 2 ] [ data : 1 ]
- [
- [ - 0.59375 ] ,
- [ 3.390625 ]
- ]
- >
+iex> Axon.Activations . log_sumexp ( Nx . tensor ( [ - 3.0 , - 2.0 , - 1.0 , 0.0 , 1.0 , 2.0 , 3.0 ] , names : [ :data ] ) )
+ # Nx.Tensor <
+ f32 [ data : 1 ]
+ [ 3.4577627182006836 ]
+ >
+
+ iex> Axon.Activations . log_sumexp ( Nx . tensor ( [ [ - 1.0 , - 2.0 , - 3.0 ] , [ 1.0 , 2.0 , 3.0 ] ] , type : { :bf , 16 } , names : [ :batch , :data ] ) )
+ # Nx.Tensor <
+ bf16 [ batch : 2 ] [ data : 1 ]
+ [
+ [ - 0.59375 ] ,
+ [ 3.390625 ]
+ ]
+ >
@@ -1007,20 +1007,20 @@ mish(x)
Examples
-iex> Axon.Activations . mish ( Nx . tensor ( [ - 3.0 , - 2.0 , - 1.0 , 0.0 , 1.0 , 2.0 , 3.0 ] , type : { :f , 32 } , names : [ :data ] ) )
- # Nx.Tensor <
- f32 [ data : 7 ]
- [ - 0.14564745128154755 , - 0.2525014877319336 , - 0.30340147018432617 , 0.0 , 0.8650984168052673 , 1.9439589977264404 , 2.98653507232666 ]
- >
-
- iex> Axon.Activations . mish ( Nx . tensor ( [ [ - 1.0 , - 2.0 , - 3.0 ] , [ 1.0 , 2.0 , 3.0 ] ] , type : { :bf , 16 } , names : [ :batch , :data ] ) )
- # Nx.Tensor <
- bf16 [ batch : 2 ] [ data : 3 ]
- [
- [ - 0.30078125 , - 0.25 , - 0.1435546875 ] ,
- [ 0.86328125 , 1.9375 , 2.96875 ]
- ]
- >
+iex> Axon.Activations . mish ( Nx . tensor ( [ - 3.0 , - 2.0 , - 1.0 , 0.0 , 1.0 , 2.0 , 3.0 ] , type : { :f , 32 } , names : [ :data ] ) )
+ # Nx.Tensor <
+ f32 [ data : 7 ]
+ [ - 0.14564745128154755 , - 0.2525014877319336 , - 0.30340147018432617 , 0.0 , 0.8650984168052673 , 1.9439589977264404 , 2.98653507232666 ]
+ >
+
+ iex> Axon.Activations . mish ( Nx . tensor ( [ [ - 1.0 , - 2.0 , - 3.0 ] , [ 1.0 , 2.0 , 3.0 ] ] , type : { :bf , 16 } , names : [ :batch , :data ] ) )
+ # Nx.Tensor <
+ bf16 [ batch : 2 ] [ data : 3 ]
+ [
+ [ - 0.30078125 , - 0.25 , - 0.1435546875 ] ,
+ [ 0.86328125 , 1.9375 , 2.96875 ]
+ ]
+ >
@@ -1050,20 +1050,20 @@ relu6(x)
Examples
-iex> Axon.Activations . relu6 ( Nx . tensor ( [ - 3.0 , - 2.0 , - 1.0 , 0.0 , 1.0 , 2.0 , 3.0 ] ) )
- # Nx.Tensor <
- f32 [ 7 ]
- [ 0.0 , 0.0 , 0.0 , 0.0 , 1.0 , 2.0 , 3.0 ]
- >
-
- iex> Axon.Activations . relu6 ( Nx . tensor ( [ [ - 1.0 , - 2.0 , - 3.0 ] , [ 1.0 , 2.0 , 3.0 ] ] , type : { :bf , 16 } , names : [ :batch , :data ] ) )
- # Nx.Tensor <
- bf16 [ batch : 2 ] [ data : 3 ]
- [
- [ 0.0 , 0.0 , 0.0 ] ,
- [ 1.0 , 2.0 , 3.0 ]
- ]
- >
+iex> Axon.Activations . relu6 ( Nx . tensor ( [ - 3.0 , - 2.0 , - 1.0 , 0.0 , 1.0 , 2.0 , 3.0 ] ) )
+ # Nx.Tensor <
+ f32 [ 7 ]
+ [ 0.0 , 0.0 , 0.0 , 0.0 , 1.0 , 2.0 , 3.0 ]
+ >
+
+ iex> Axon.Activations . relu6 ( Nx . tensor ( [ [ - 1.0 , - 2.0 , - 3.0 ] , [ 1.0 , 2.0 , 3.0 ] ] , type : { :bf , 16 } , names : [ :batch , :data ] ) )
+ # Nx.Tensor <
+ bf16 [ batch : 2 ] [ data : 3 ]
+ [
+ [ 0.0 , 0.0 , 0.0 ] ,
+ [ 1.0 , 2.0 , 3.0 ]
+ ]
+ >
@@ -1099,20 +1099,20 @@ relu(x)
Examples
-iex> Axon.Activations . relu ( Nx . tensor ( [ - 3.0 , - 2.0 , - 1.0 , 0.0 , 1.0 , 2.0 , 3.0 ] , names : [ :data ] ) )
- # Nx.Tensor <
- f32 [ data : 7 ]
- [ 0.0 , 0.0 , 0.0 , 0.0 , 1.0 , 2.0 , 3.0 ]
- >
-
- iex> Axon.Activations . relu ( Nx . tensor ( [ [ - 1.0 , - 2.0 , - 3.0 ] , [ 1.0 , 2.0 , 3.0 ] ] , type : { :bf , 16 } , names : [ :batch , :data ] ) )
- # Nx.Tensor <
- bf16 [ batch : 2 ] [ data : 3 ]
- [
- [ 0.0 , 0.0 , 0.0 ] ,
- [ 1.0 , 2.0 , 3.0 ]
- ]
- >
+iex> Axon.Activations . relu ( Nx . tensor ( [ - 3.0 , - 2.0 , - 1.0 , 0.0 , 1.0 , 2.0 , 3.0 ] , names : [ :data ] ) )
+ # Nx.Tensor <
+ f32 [ data : 7 ]
+ [ 0.0 , 0.0 , 0.0 , 0.0 , 1.0 , 2.0 , 3.0 ]
+ >
+
+ iex> Axon.Activations . relu ( Nx . tensor ( [ [ - 1.0 , - 2.0 , - 3.0 ] , [ 1.0 , 2.0 , 3.0 ] ] , type : { :bf , 16 } , names : [ :batch , :data ] ) )
+ # Nx.Tensor <
+ bf16 [ batch : 2 ] [ data : 3 ]
+ [
+ [ 0.0 , 0.0 , 0.0 ] ,
+ [ 1.0 , 2.0 , 3.0 ]
+ ]
+ >
@@ -1150,20 +1150,20 @@ selu(x, opts \\ [])
Examples
-iex> Axon.Activations . selu ( Nx . tensor ( [ - 3.0 , - 2.0 , - 1.0 , 0.0 , 1.0 , 2.0 , 3.0 ] , names : [ :data ] ) )
- # Nx.Tensor <
- f32 [ data : 7 ]
- [ - 1.670568823814392 , - 1.5201665163040161 , - 1.1113307476043701 , 0.0 , 1.0507010221481323 , 2.1014020442962646 , 3.1521029472351074 ]
- >
-
- iex> Axon.Activations . selu ( Nx . tensor ( [ [ - 1.0 , - 2.0 , - 3.0 ] , [ 1.0 , 2.0 , 3.0 ] ] , type : { :bf , 16 } , names : [ :batch , :data ] ) )
- # Nx.Tensor <
- bf16 [ batch : 2 ] [ data : 3 ]
- [
- [ - 1.09375 , - 1.5078125 , - 1.6640625 ] ,
- [ 1.046875 , 2.09375 , 3.140625 ]
- ]
- >
+iex> Axon.Activations . selu ( Nx . tensor ( [ - 3.0 , - 2.0 , - 1.0 , 0.0 , 1.0 , 2.0 , 3.0 ] , names : [ :data ] ) )
+ # Nx.Tensor <
+ f32 [ data : 7 ]
+ [ - 1.670568823814392 , - 1.5201665163040161 , - 1.1113307476043701 , 0.0 , 1.0507010221481323 , 2.1014020442962646 , 3.1521029472351074 ]
+ >
+
+ iex> Axon.Activations . selu ( Nx . tensor ( [ [ - 1.0 , - 2.0 , - 3.0 ] , [ 1.0 , 2.0 , 3.0 ] ] , type : { :bf , 16 } , names : [ :batch , :data ] ) )
+ # Nx.Tensor <
+ bf16 [ batch : 2 ] [ data : 3 ]
+ [
+ [ - 1.09375 , - 1.5078125 , - 1.6640625 ] ,
+ [ 1.046875 , 2.09375 , 3.140625 ]
+ ]
+ >
@@ -1202,20 +1202,20 @@ sigmoid(x)
Examples
-iex> Axon.Activations . sigmoid ( Nx . tensor ( [ - 3.0 , - 2.0 , - 1.0 , 0.0 , 1.0 , 2.0 , 3.0 ] , names : [ :data ] ) )
- # Nx.Tensor <
- f32 [ data : 7 ]
- [ 0.04742587357759476 , 0.11920291930437088 , 0.2689414322376251 , 0.5 , 0.7310585975646973 , 0.8807970881462097 , 0.9525741338729858 ]
- >
-
- iex> Axon.Activations . sigmoid ( Nx . tensor ( [ [ - 1.0 , - 2.0 , - 3.0 ] , [ 1.0 , 2.0 , 3.0 ] ] , type : { :bf , 16 } , names : [ :batch , :data ] ) )
- # Nx.Tensor <
- bf16 [ batch : 2 ] [ data : 3 ]
- [
- [ 0.267578125 , 0.119140625 , 0.04736328125 ] ,
- [ 0.73046875 , 0.87890625 , 0.94921875 ]
- ]
- >
+iex> Axon.Activations . sigmoid ( Nx . tensor ( [ - 3.0 , - 2.0 , - 1.0 , 0.0 , 1.0 , 2.0 , 3.0 ] , names : [ :data ] ) )
+ # Nx.Tensor <
+ f32 [ data : 7 ]
+ [ 0.04742587357759476 , 0.11920291930437088 , 0.2689414322376251 , 0.5 , 0.7310585975646973 , 0.8807970881462097 , 0.9525741338729858 ]
+ >
+
+ iex> Axon.Activations . sigmoid ( Nx . tensor ( [ [ - 1.0 , - 2.0 , - 3.0 ] , [ 1.0 , 2.0 , 3.0 ] ] , type : { :bf , 16 } , names : [ :batch , :data ] ) )
+ # Nx.Tensor <
+ bf16 [ batch : 2 ] [ data : 3 ]
+ [
+ [ 0.267578125 , 0.119140625 , 0.04736328125 ] ,
+ [ 0.73046875 , 0.87890625 , 0.94921875 ]
+ ]
+ >
@@ -1245,20 +1245,20 @@ silu(x)
Examples
-iex> Axon.Activations . silu ( Nx . tensor ( [ - 3.0 , - 2.0 , - 1.0 , 0.0 , 1.0 , 2.0 , 3.0 ] , names : [ :data ] ) )
- # Nx.Tensor <
- f32 [ data : 7 ]
- [ - 0.14227762818336487 , - 0.23840583860874176 , - 0.2689414322376251 , 0.0 , 0.7310585975646973 , 1.7615941762924194 , 2.857722282409668 ]
- >
-
- iex> Axon.Activations . silu ( Nx . tensor ( [ [ - 1.0 , - 2.0 , - 3.0 ] , [ 1.0 , 2.0 , 3.0 ] ] , type : { :bf , 16 } , names : [ :batch , :data ] ) )
- # Nx.Tensor <
- bf16 [ batch : 2 ] [ data : 3 ]
- [
- [ - 0.267578125 , - 0.23828125 , - 0.1416015625 ] ,
- [ 0.73046875 , 1.7578125 , 2.84375 ]
- ]
- >
+iex> Axon.Activations . silu ( Nx . tensor ( [ - 3.0 , - 2.0 , - 1.0 , 0.0 , 1.0 , 2.0 , 3.0 ] , names : [ :data ] ) )
+ # Nx.Tensor <
+ f32 [ data : 7 ]
+ [ - 0.14227762818336487 , - 0.23840583860874176 , - 0.2689414322376251 , 0.0 , 0.7310585975646973 , 1.7615941762924194 , 2.857722282409668 ]
+ >
+
+ iex> Axon.Activations . silu ( Nx . tensor ( [ [ - 1.0 , - 2.0 , - 3.0 ] , [ 1.0 , 2.0 , 3.0 ] ] , type : { :bf , 16 } , names : [ :batch , :data ] ) )
+ # Nx.Tensor <
+ bf16 [ batch : 2 ] [ data : 3 ]
+ [
+ [ - 0.267578125 , - 0.23828125 , - 0.1416015625 ] ,
+ [ 0.73046875 , 1.7578125 , 2.84375 ]
+ ]
+ >
@@ -1306,22 +1306,22 @@ softmax(x, opts \\ [])
Examples
-iex> Axon.Activations . softmax ( Nx . tensor ( [ [ - 3.0 , - 2.0 , - 1.0 , 0.0 , 1.0 , 2.0 , 3.0 ] ] , names : [ :batch , :data ] ) )
- # Nx.Tensor <
- f32 [ batch : 1 ] [ data : 7 ]
- [
- [ 0.0015683004166930914 , 0.004263082519173622 , 0.011588259600102901 , 0.03150015324354172 , 0.08562629669904709 , 0.23275642096996307 , 0.6326975226402283 ]
- ]
- >
-
- iex> Axon.Activations . softmax ( Nx . tensor ( [ [ - 1.0 , - 2.0 , - 3.0 ] , [ 1.0 , 2.0 , 3.0 ] ] , type : { :bf , 16 } , names : [ :batch , :data ] ) )
- # Nx.Tensor <
- bf16 [ batch : 2 ] [ data : 3 ]
- [
- [ 0.6640625 , 0.2431640625 , 0.08935546875 ] ,
- [ 0.08935546875 , 0.2431640625 , 0.6640625 ]
- ]
- >
+iex> Axon.Activations . softmax ( Nx . tensor ( [ [ - 3.0 , - 2.0 , - 1.0 , 0.0 , 1.0 , 2.0 , 3.0 ] ] , names : [ :batch , :data ] ) )
+ # Nx.Tensor <
+ f32 [ batch : 1 ] [ data : 7 ]
+ [
+ [ 0.0015683004166930914 , 0.004263082519173622 , 0.011588259600102901 , 0.03150015324354172 , 0.08562629669904709 , 0.23275642096996307 , 0.6326975226402283 ]
+ ]
+ >
+
+ iex> Axon.Activations . softmax ( Nx . tensor ( [ [ - 1.0 , - 2.0 , - 3.0 ] , [ 1.0 , 2.0 , 3.0 ] ] , type : { :bf , 16 } , names : [ :batch , :data ] ) )
+ # Nx.Tensor <
+ bf16 [ batch : 2 ] [ data : 3 ]
+ [
+ [ 0.6640625 , 0.2431640625 , 0.08935546875 ] ,
+ [ 0.08935546875 , 0.2431640625 , 0.6640625 ]
+ ]
+ >
@@ -1351,20 +1351,20 @@ softplus(x)
Examples
-iex> Axon.Activations . softplus ( Nx . tensor ( [ - 3.0 , - 2.0 , - 1.0 , 0.0 , 1.0 , 2.0 , 3.0 ] , names : [ :data ] ) )
- # Nx.Tensor <
- f32 [ data : 7 ]
- [ 0.04858734831213951 , 0.12692801654338837 , 0.3132616877555847 , 0.6931471824645996 , 1.3132617473602295 , 2.1269280910491943 , 3.0485873222351074 ]
- >
-
- iex> Axon.Activations . softplus ( Nx . tensor ( [ [ - 1.0 , - 2.0 , - 3.0 ] , [ 1.0 , 2.0 , 3.0 ] ] , type : { :bf , 16 } , names : [ :batch , :data ] ) )
- # Nx.Tensor <
- bf16 [ batch : 2 ] [ data : 3 ]
- [
- [ 0.3125 , 0.1259765625 , 0.04833984375 ] ,
- [ 1.3125 , 2.125 , 3.046875 ]
- ]
- >
+iex> Axon.Activations . softplus ( Nx . tensor ( [ - 3.0 , - 2.0 , - 1.0 , 0.0 , 1.0 , 2.0 , 3.0 ] , names : [ :data ] ) )
+ # Nx.Tensor <
+ f32 [ data : 7 ]
+ [ 0.04858734831213951 , 0.12692801654338837 , 0.3132616877555847 , 0.6931471824645996 , 1.3132617473602295 , 2.1269280910491943 , 3.0485873222351074 ]
+ >
+
+ iex> Axon.Activations . softplus ( Nx . tensor ( [ [ - 1.0 , - 2.0 , - 3.0 ] , [ 1.0 , 2.0 , 3.0 ] ] , type : { :bf , 16 } , names : [ :batch , :data ] ) )
+ # Nx.Tensor <
+ bf16 [ batch : 2 ] [ data : 3 ]
+ [
+ [ 0.3125 , 0.1259765625 , 0.04833984375 ] ,
+ [ 1.3125 , 2.125 , 3.046875 ]
+ ]
+ >
@@ -1394,20 +1394,20 @@ softsign(x)
Examples
-iex> Axon.Activations . softsign ( Nx . tensor ( [ - 3.0 , - 2.0 , - 1.0 , 0.0 , 1.0 , 2.0 , 3.0 ] , names : [ :data ] ) )
- # Nx.Tensor <
- f32 [ data : 7 ]
- [ - 0.75 , - 0.6666666865348816 , - 0.5 , 0.0 , 0.5 , 0.6666666865348816 , 0.75 ]
- >
-
- iex> Axon.Activations . softsign ( Nx . tensor ( [ [ - 1.0 , - 2.0 , - 3.0 ] , [ 1.0 , 2.0 , 3.0 ] ] , type : { :bf , 16 } , names : [ :batch , :data ] ) )
- # Nx.Tensor <
- bf16 [ batch : 2 ] [ data : 3 ]
- [
- [ - 0.5 , - 0.6640625 , - 0.75 ] ,
- [ 0.5 , 0.6640625 , 0.75 ]
- ]
- >
+iex> Axon.Activations . softsign ( Nx . tensor ( [ - 3.0 , - 2.0 , - 1.0 , 0.0 , 1.0 , 2.0 , 3.0 ] , names : [ :data ] ) )
+ # Nx.Tensor <
+ f32 [ data : 7 ]
+ [ - 0.75 , - 0.6666666865348816 , - 0.5 , 0.0 , 0.5 , 0.6666666865348816 , 0.75 ]
+ >
+
+ iex> Axon.Activations . softsign ( Nx . tensor ( [ [ - 1.0 , - 2.0 , - 3.0 ] , [ 1.0 , 2.0 , 3.0 ] ] , type : { :bf , 16 } , names : [ :batch , :data ] ) )
+ # Nx.Tensor <
+ bf16 [ batch : 2 ] [ data : 3 ]
+ [
+ [ - 0.5 , - 0.6640625 , - 0.75 ] ,
+ [ 0.5 , 0.6640625 , 0.75 ]
+ ]
+ >
@@ -1437,20 +1437,20 @@ tanh(x)
Examples
-iex> Axon.Activations . tanh ( Nx . tensor ( [ - 3.0 , - 2.0 , - 1.0 , 0.0 , 1.0 , 2.0 , 3.0 ] , names : [ :data ] ) )
- # Nx.Tensor <
- f32 [ data : 7 ]
- [ - 0.9950547814369202 , - 0.9640275835990906 , - 0.7615941762924194 , 0.0 , 0.7615941762924194 , 0.9640275835990906 , 0.9950547814369202 ]
- >
-
- iex> Axon.Activations . tanh ( Nx . tensor ( [ [ - 1.0 , - 2.0 , - 3.0 ] , [ 1.0 , 2.0 , 3.0 ] ] , type : { :bf , 16 } , names : [ :batch , :data ] ) )
- # Nx.Tensor <
- bf16 [ batch : 2 ] [ data : 3 ]
- [
- [ - 0.7578125 , - 0.9609375 , - 0.9921875 ] ,
- [ 0.7578125 , 0.9609375 , 0.9921875 ]
- ]
- >
+iex> Axon.Activations . tanh ( Nx . tensor ( [ - 3.0 , - 2.0 , - 1.0 , 0.0 , 1.0 , 2.0 , 3.0 ] , names : [ :data ] ) )
+ # Nx.Tensor <
+ f32 [ data : 7 ]
+ [ - 0.9950547814369202 , - 0.9640275835990906 , - 0.7615941762924194 , 0.0 , 0.7615941762924194 , 0.9640275835990906 , 0.9950547814369202 ]
+ >
+
+ iex> Axon.Activations . tanh ( Nx . tensor ( [ [ - 1.0 , - 2.0 , - 3.0 ] , [ 1.0 , 2.0 , 3.0 ] ] , type : { :bf , 16 } , names : [ :batch , :data ] ) )
+ # Nx.Tensor <
+ bf16 [ batch : 2 ] [ data : 3 ]
+ [
+ [ - 0.7578125 , - 0.9609375 , - 0.9921875 ] ,
+ [ 0.7578125 , 0.9609375 , 0.9921875 ]
+ ]
+ >
diff --git a/Axon.CompileError.html b/Axon.CompileError.html
index 9cc731ec..71287577 100644
--- a/Axon.CompileError.html
+++ b/Axon.CompileError.html
@@ -14,7 +14,7 @@
-
+
diff --git a/Axon.Display.html b/Axon.Display.html
index 9bc6df8f..6f636321 100644
--- a/Axon.Display.html
+++ b/Axon.Display.html
@@ -14,7 +14,7 @@
-
+
@@ -220,7 +220,7 @@ as_graph(axon, input_templates, opts \\ [])
Examples
- Given an Axon model:
model = Axon . input ( "input" ) |> Axon . dense ( 32 )
You can define input templates for each input:
input = Nx . template ( { 1 , 16 } , :f32 )
And then display the execution flow of the model:
Axon.Display . as_graph ( model , input , direction : :top_down )
+Given an Axon model:
model = Axon . input ( "input" ) |> Axon . dense ( 32 )
You can define input templates for each input:
input = Nx . template ( { 1 , 16 } , :f32 )
And then display the execution flow of the model:
Axon.Display . as_graph ( model , input , direction : :top_down )
@@ -250,7 +250,7 @@ as_table(axon, input_templates)
Examples
-Given an Axon model:
model = Axon . input ( "input" ) |> Axon . dense ( 32 )
You can define input templates for each input:
input = Nx . template ( { 1 , 16 } , :f32 )
And then display the execution flow of the model:
Axon.Display . as_table ( model , input )
+Given an Axon model:
model = Axon . input ( "input" ) |> Axon . dense ( 32 )
You can define input templates for each input:
input = Nx . template ( { 1 , 16 } , :f32 )
And then display the execution flow of the model:
Axon.Display . as_table ( model , input )
diff --git a/Axon.Initializers.html b/Axon.Initializers.html
index fcf388cd..16bfc1bc 100644
--- a/Axon.Initializers.html
+++ b/Axon.Initializers.html
@@ -14,7 +14,7 @@
-
+
@@ -153,8 +153,8 @@
small enough to avoid exploding values. The initializers in
this module have a default scale known to work well with
the initialization strategy.
The functions in this module return initialization functions which
-take shapes and types and return tensors:
init_fn = Axon.Initializers . zeros ( )
- init_fn . ( { 1 , 2 } , { :f , 32 } )
You may use these functions from within defn
or outside.
+take shapes and types and return tensors:init_fn = Axon.Initializers . zeros ( )
+ init_fn . ( { 1 , 2 } , { :f , 32 } )
You may use these functions from within defn
or outside.
@@ -349,16 +349,16 @@ full(value)
Examples
-iex> init_fn = Axon.Initializers . full ( 1.00 )
- iex> out = init_fn . ( { 2 , 2 } , { :f , 32 } )
+iex> init_fn = Axon.Initializers . full ( 1.00 )
+ iex> out = init_fn . ( { 2 , 2 } , { :f , 32 } )
iex> out
- # Nx.Tensor <
- f32 [ 2 ] [ 2 ]
- [
- [ 1.0 , 1.0 ] ,
- [ 1.0 , 1.0 ]
- ]
- >
+ # Nx.Tensor <
+ f32 [ 2 ] [ 2 ]
+ [
+ [ 1.0 , 1.0 ] ,
+ [ 1.0 , 1.0 ]
+ ]
+ >
@@ -397,19 +397,19 @@ glorot_normal(opts \\ [])
Examples
-iex> init_fn = Axon.Initializers . glorot_normal ( )
- iex> t = init_fn . ( { 2 , 2 } , { :f , 32 } , Nx.Random . key ( 1 ) )
- iex> Nx . shape ( t )
- { 2 , 2 }
- iex> Nx . type ( t )
- { :f , 32 }
-
- iex> init_fn = Axon.Initializers . glorot_normal ( scale : 1.0e-3 )
- iex> t = init_fn . ( { 2 , 2 } , { :bf , 16 } , Nx.Random . key ( 1 ) )
- iex> Nx . shape ( t )
- { 2 , 2 }
- iex> Nx . type ( t )
- { :bf , 16 }
+iex> init_fn = Axon.Initializers . glorot_normal ( )
+ iex> t = init_fn . ( { 2 , 2 } , { :f , 32 } , Nx.Random . key ( 1 ) )
+ iex> Nx . shape ( t )
+ { 2 , 2 }
+ iex> Nx . type ( t )
+ { :f , 32 }
+
+ iex> init_fn = Axon.Initializers . glorot_normal ( scale : 1.0e-3 )
+ iex> t = init_fn . ( { 2 , 2 } , { :bf , 16 } , Nx.Random . key ( 1 ) )
+ iex> Nx . shape ( t )
+ { 2 , 2 }
+ iex> Nx . type ( t )
+ { :bf , 16 }
@@ -454,19 +454,19 @@ glorot_uniform(opts \\ [])
Examples
-iex> init_fn = Axon.Initializers . glorot_uniform ( )
- iex> t = init_fn . ( { 2 , 2 } , { :f , 32 } , Nx.Random . key ( 1 ) )
- iex> Nx . shape ( t )
- { 2 , 2 }
- iex> Nx . type ( t )
- { :f , 32 }
-
- iex> init_fn = Axon.Initializers . glorot_uniform ( scale : 1.0e-3 )
- iex> t = init_fn . ( { 2 , 2 } , { :bf , 16 } , Nx.Random . key ( 1 ) )
- iex> Nx . shape ( t )
- { 2 , 2 }
- iex> Nx . type ( t )
- { :bf , 16 }
+iex> init_fn = Axon.Initializers . glorot_uniform ( )
+ iex> t = init_fn . ( { 2 , 2 } , { :f , 32 } , Nx.Random . key ( 1 ) )
+ iex> Nx . shape ( t )
+ { 2 , 2 }
+ iex> Nx . type ( t )
+ { :f , 32 }
+
+ iex> init_fn = Axon.Initializers . glorot_uniform ( scale : 1.0e-3 )
+ iex> t = init_fn . ( { 2 , 2 } , { :bf , 16 } , Nx.Random . key ( 1 ) )
+ iex> Nx . shape ( t )
+ { 2 , 2 }
+ iex> Nx . type ( t )
+ { :bf , 16 }
@@ -510,19 +510,19 @@ he_normal(opts \\ [])
Examples
-iex> init_fn = Axon.Initializers . he_normal ( )
- iex> t = init_fn . ( { 2 , 2 } , { :f , 32 } , Nx.Random . key ( 1 ) )
- iex> Nx . shape ( t )
- { 2 , 2 }
- iex> Nx . type ( t )
- { :f , 32 }
-
- iex> init_fn = Axon.Initializers . he_normal ( scale : 1.0e-3 )
- iex> t = init_fn . ( { 2 , 2 } , { :bf , 16 } , Nx.Random . key ( 1 ) )
- iex> Nx . shape ( t )
- { 2 , 2 }
- iex> Nx . type ( t )
- { :bf , 16 }
+iex> init_fn = Axon.Initializers . he_normal ( )
+ iex> t = init_fn . ( { 2 , 2 } , { :f , 32 } , Nx.Random . key ( 1 ) )
+ iex> Nx . shape ( t )
+ { 2 , 2 }
+ iex> Nx . type ( t )
+ { :f , 32 }
+
+ iex> init_fn = Axon.Initializers . he_normal ( scale : 1.0e-3 )
+ iex> t = init_fn . ( { 2 , 2 } , { :bf , 16 } , Nx.Random . key ( 1 ) )
+ iex> Nx . shape ( t )
+ { 2 , 2 }
+ iex> Nx . type ( t )
+ { :bf , 16 }
@@ -566,19 +566,19 @@ he_uniform(opts \\ [])
Examples
-iex> init_fn = Axon.Initializers . he_uniform ( )
- iex> t = init_fn . ( { 2 , 2 } , { :f , 32 } , Nx.Random . key ( 1 ) )
- iex> Nx . shape ( t )
- { 2 , 2 }
- iex> Nx . type ( t )
- { :f , 32 }
-
- iex> init_fn = Axon.Initializers . he_uniform ( scale : 1.0e-3 )
- iex> t = init_fn . ( { 2 , 2 } , { :bf , 16 } , Nx.Random . key ( 1 ) )
- iex> Nx . shape ( t )
- { 2 , 2 }
- iex> Nx . type ( t )
- { :bf , 16 }
+iex> init_fn = Axon.Initializers . he_uniform ( )
+ iex> t = init_fn . ( { 2 , 2 } , { :f , 32 } , Nx.Random . key ( 1 ) )
+ iex> Nx . shape ( t )
+ { 2 , 2 }
+ iex> Nx . type ( t )
+ { :f , 32 }
+
+ iex> init_fn = Axon.Initializers . he_uniform ( scale : 1.0e-3 )
+ iex> t = init_fn . ( { 2 , 2 } , { :bf , 16 } , Nx.Random . key ( 1 ) )
+ iex> Nx . shape ( t )
+ { 2 , 2 }
+ iex> Nx . type ( t )
+ { :bf , 16 }
@@ -612,16 +612,16 @@ identity()
Examples
-iex> init_fn = Axon.Initializers . identity ( )
- iex> out = init_fn . ( { 2 , 2 } , { :f , 32 } )
+iex> init_fn = Axon.Initializers . identity ( )
+ iex> out = init_fn . ( { 2 , 2 } , { :f , 32 } )
iex> out
- # Nx.Tensor <
- f32 [ 2 ] [ 2 ]
- [
- [ 1.0 , 0.0 ] ,
- [ 0.0 , 1.0 ]
- ]
- >
+ # Nx.Tensor <
+ f32 [ 2 ] [ 2 ]
+ [
+ [ 1.0 , 0.0 ] ,
+ [ 0.0 , 1.0 ]
+ ]
+ >
@@ -659,19 +659,19 @@ lecun_normal(opts \\ [])
Examples
-iex> init_fn = Axon.Initializers . lecun_normal ( )
- iex> t = init_fn . ( { 2 , 2 } , { :f , 32 } , Nx.Random . key ( 1 ) )
- iex> Nx . shape ( t )
- { 2 , 2 }
- iex> Nx . type ( t )
- { :f , 32 }
-
- iex> init_fn = Axon.Initializers . lecun_normal ( scale : 1.0e-3 )
- iex> t = init_fn . ( { 2 , 2 } , { :bf , 16 } , Nx.Random . key ( 1 ) )
- iex> Nx . shape ( t )
- { 2 , 2 }
- iex> Nx . type ( t )
- { :bf , 16 }
+iex> init_fn = Axon.Initializers . lecun_normal ( )
+ iex> t = init_fn . ( { 2 , 2 } , { :f , 32 } , Nx.Random . key ( 1 ) )
+ iex> Nx . shape ( t )
+ { 2 , 2 }
+ iex> Nx . type ( t )
+ { :f , 32 }
+
+ iex> init_fn = Axon.Initializers . lecun_normal ( scale : 1.0e-3 )
+ iex> t = init_fn . ( { 2 , 2 } , { :bf , 16 } , Nx.Random . key ( 1 ) )
+ iex> Nx . shape ( t )
+ { 2 , 2 }
+ iex> Nx . type ( t )
+ { :bf , 16 }
@@ -715,19 +715,19 @@ lecun_uniform(opts \\ [])
Examples
-iex> init_fn = Axon.Initializers . lecun_uniform ( )
- iex> t = init_fn . ( { 2 , 2 } , { :f , 32 } , Nx.Random . key ( 1 ) )
- iex> Nx . shape ( t )
- { 2 , 2 }
- iex> Nx . type ( t )
- { :f , 32 }
-
- iex> init_fn = Axon.Initializers . lecun_uniform ( scale : 1.0e-3 )
- iex> t = init_fn . ( { 2 , 2 } , { :bf , 16 } , Nx.Random . key ( 1 ) )
- iex> Nx . shape ( t )
- { 2 , 2 }
- iex> Nx . type ( t )
- { :bf , 16 }
+iex> init_fn = Axon.Initializers . lecun_uniform ( )
+ iex> t = init_fn . ( { 2 , 2 } , { :f , 32 } , Nx.Random . key ( 1 ) )
+ iex> Nx . shape ( t )
+ { 2 , 2 }
+ iex> Nx . type ( t )
+ { :f , 32 }
+
+ iex> init_fn = Axon.Initializers . lecun_uniform ( scale : 1.0e-3 )
+ iex> t = init_fn . ( { 2 , 2 } , { :bf , 16 } , Nx.Random . key ( 1 ) )
+ iex> Nx . shape ( t )
+ { 2 , 2 }
+ iex> Nx . type ( t )
+ { :bf , 16 }
@@ -769,19 +769,19 @@ normal(opts \\ [])
Examples
-iex> init_fn = Axon.Initializers . normal ( )
- iex> t = init_fn . ( { 2 , 2 } , { :f , 32 } , Nx.Random . key ( 1 ) )
- iex> Nx . shape ( t )
- { 2 , 2 }
- iex> Nx . type ( t )
- { :f , 32 }
-
- iex> init_fn = Axon.Initializers . normal ( mean : 1.0 , scale : 1.0 )
- iex> t = init_fn . ( { 2 , 2 } , { :bf , 16 } , Nx.Random . key ( 1 ) )
- iex> Nx . shape ( t )
- { 2 , 2 }
- iex> Nx . type ( t )
- { :bf , 16 }
+iex> init_fn = Axon.Initializers . normal ( )
+ iex> t = init_fn . ( { 2 , 2 } , { :f , 32 } , Nx.Random . key ( 1 ) )
+ iex> Nx . shape ( t )
+ { 2 , 2 }
+ iex> Nx . type ( t )
+ { :f , 32 }
+
+ iex> init_fn = Axon.Initializers . normal ( mean : 1.0 , scale : 1.0 )
+ iex> t = init_fn . ( { 2 , 2 } , { :bf , 16 } , Nx.Random . key ( 1 ) )
+ iex> Nx . shape ( t )
+ { 2 , 2 }
+ iex> Nx . type ( t )
+ { :bf , 16 }
@@ -809,16 +809,16 @@ ones()
Examples
-iex> init_fn = Axon.Initializers . ones ( )
- iex> out = init_fn . ( { 2 , 2 } , { :f , 32 } )
+iex> init_fn = Axon.Initializers . ones ( )
+ iex> out = init_fn . ( { 2 , 2 } , { :f , 32 } )
iex> out
- # Nx.Tensor <
- f32 [ 2 ] [ 2 ]
- [
- [ 1.0 , 1.0 ] ,
- [ 1.0 , 1.0 ]
- ]
- >
+ # Nx.Tensor <
+ f32 [ 2 ] [ 2 ]
+ [
+ [ 1.0 , 1.0 ] ,
+ [ 1.0 , 1.0 ]
+ ]
+ >
@@ -857,19 +857,19 @@ orthogonal(opts \\ [])
Examples
-iex> init_fn = Axon.Initializers . orthogonal ( )
- iex> t = init_fn . ( { 3 , 3 } , { :f , 32 } , Nx.Random . key ( 1 ) )
- iex> Nx . type ( t )
- { :f , 32 }
- iex> Nx . shape ( t )
- { 3 , 3 }
-
- iex> init_fn = Axon.Initializers . orthogonal ( )
- iex> t = init_fn . ( { 1 , 2 , 3 , 4 } , { :f , 64 } , Nx.Random . key ( 1 ) )
- iex> Nx . type ( t )
- { :f , 64 }
- iex> Nx . shape ( t )
- { 1 , 2 , 3 , 4 }
+iex> init_fn = Axon.Initializers . orthogonal ( )
+ iex> t = init_fn . ( { 3 , 3 } , { :f , 32 } , Nx.Random . key ( 1 ) )
+ iex> Nx . type ( t )
+ { :f , 32 }
+ iex> Nx . shape ( t )
+ { 3 , 3 }
+
+ iex> init_fn = Axon.Initializers . orthogonal ( )
+ iex> t = init_fn . ( { 1 , 2 , 3 , 4 } , { :f , 64 } , Nx.Random . key ( 1 ) )
+ iex> Nx . type ( t )
+ { :f , 64 }
+ iex> Nx . shape ( t )
+ { 1 , 2 , 3 , 4 }
@@ -957,26 +957,26 @@ variance_scaling(opts \\ [])
Examples
-iex> init_fn = Axon.Initializers . variance_scaling ( )
- iex> t = init_fn . ( { 2 , 2 } , { :f , 32 } , Nx.Random . key ( 1 ) )
- iex> Nx . shape ( t )
- { 2 , 2 }
- iex> Nx . type ( t )
- { :f , 32 }
-
- iex> init_fn = Axon.Initializers . variance_scaling ( mode : :fan_out , distribution : :truncated_normal )
- iex> t = init_fn . ( { 2 , 2 } , { :bf , 16 } , Nx.Random . key ( 1 ) )
- iex> Nx . shape ( t )
- { 2 , 2 }
- iex> Nx . type ( t )
- { :bf , 16 }
-
- iex> init_fn = Axon.Initializers . variance_scaling ( mode : :fan_out , distribution : :normal )
- iex> t = init_fn . ( { 64 , 3 , 32 , 32 } , { :f , 32 } , Nx.Random . key ( 1 ) )
- iex> Nx . shape ( t )
- { 64 , 3 , 32 , 32 }
- iex> Nx . type ( t )
- { :f , 32 }
+iex> init_fn = Axon.Initializers . variance_scaling ( )
+ iex> t = init_fn . ( { 2 , 2 } , { :f , 32 } , Nx.Random . key ( 1 ) )
+ iex> Nx . shape ( t )
+ { 2 , 2 }
+ iex> Nx . type ( t )
+ { :f , 32 }
+
+ iex> init_fn = Axon.Initializers . variance_scaling ( mode : :fan_out , distribution : :truncated_normal )
+ iex> t = init_fn . ( { 2 , 2 } , { :bf , 16 } , Nx.Random . key ( 1 ) )
+ iex> Nx . shape ( t )
+ { 2 , 2 }
+ iex> Nx . type ( t )
+ { :bf , 16 }
+
+ iex> init_fn = Axon.Initializers . variance_scaling ( mode : :fan_out , distribution : :normal )
+ iex> t = init_fn . ( { 64 , 3 , 32 , 32 } , { :f , 32 } , Nx.Random . key ( 1 ) )
+ iex> Nx . shape ( t )
+ { 64 , 3 , 32 , 32 }
+ iex> Nx . type ( t )
+ { :f , 32 }
@@ -1004,16 +1004,16 @@ zeros()
Examples
-iex> init_fn = Axon.Initializers . zeros ( )
- iex> out = init_fn . ( { 2 , 2 } , { :f , 32 } )
+iex> init_fn = Axon.Initializers . zeros ( )
+ iex> out = init_fn . ( { 2 , 2 } , { :f , 32 } )
iex> out
- # Nx.Tensor <
- f32 [ 2 ] [ 2 ]
- [
- [ 0.0 , 0.0 ] ,
- [ 0.0 , 0.0 ]
- ]
- >
+ # Nx.Tensor <
+ f32 [ 2 ] [ 2 ]
+ [
+ [ 0.0 , 0.0 ] ,
+ [ 0.0 , 0.0 ]
+ ]
+ >
diff --git a/Axon.Layers.html b/Axon.Layers.html
index 726026f0..defaea03 100644
--- a/Axon.Layers.html
+++ b/Axon.Layers.html
@@ -14,7 +14,7 @@
-
+
@@ -141,16 +141,16 @@
These implementations do not assume the responsibility of
managing state - instead opting to delegate this responsibility
to the caller. Basic neural networks can be seen as a composition of functions:
input
- |> dense ( w1 , b1 )
- |> relu ( )
- |> dense ( w2 , b2 )
- |> softmax ( )
These kinds of models are often referred to as deep feedforward networks
+|> dense ( w1 , b1 )
+ |> relu ( )
+ |> dense ( w2 , b2 )
+ |> softmax ( )
These kinds of models are often referred to as deep feedforward networks
or multilayer perceptrons (MLPs) because information flows forward
through the network with no feedback connections. Mathematically,
a feedforward network can be represented as:
$$
f(x) = f^{(3)}(f^{(2)}(f^{(1)}(x)))
$$
You can see a similar pattern emerge if we condense the call stack
-in the previous example:
softmax ( dense ( relu ( dense ( input , w1 , b1 ) ) , w2 , b2 ) )
The chain structure shown here is the most common structure used
+in the previous example:
softmax ( dense ( relu ( dense ( input , w1 , b1 ) ) , w2 , b2 ) )
The chain structure shown here is the most common structure used
in neural networks. You can consider each function $f^{(n)}$ as a
layer in the neural network - for example $f^{(2)} is the 2nd
layer in the network. The number of function calls in the
@@ -158,7 +158,7 @@
deep learning comes from. Neural networks are often written as the mapping:
$$
y = f(x; \theta)
$$
Where $x$ is the input to the neural network and $\theta$ are the
-set of learned parameters. In Elixir, you would write this:
y = model ( input , params )
From the previous example, params
would represent the collection:
{ w1 , b1 , w2 , b2 }
where w1
and w2
are layer kernels , and b1
and b2
are layer
+set of learned parameters. In Elixir, you would write this:
y = model ( input , params )
From the previous example, params
would represent the collection:
{ w1 , b1 , w2 , b2 }
where w1
and w2
are layer kernels , and b1
and b2
are layer
biases .
@@ -735,19 +735,19 @@ bilinear(input1, input2, kernel, bias \\ 0,
Examples
-iex> inp1 = Nx . iota ( { 3 , 2 } , type : { :f , 32 } )
- iex> inp2 = Nx . iota ( { 3 , 4 } , type : { :f , 32 } )
- iex> kernel = Nx . iota ( { 1 , 2 , 4 } , type : { :f , 32 } )
- iex> bias = Nx . tensor ( 1.0 )
- iex> Axon.Layers . bilinear ( inp1 , inp2 , kernel , bias )
- # Nx.Tensor <
- f32 [ 3 ] [ 1 ]
- [
- [ 39.0 ] ,
- [ 455.0 ] ,
- [ 1319.0 ]
- ]
- >
+iex> inp1 = Nx . iota ( { 3 , 2 } , type : { :f , 32 } )
+ iex> inp2 = Nx . iota ( { 3 , 4 } , type : { :f , 32 } )
+ iex> kernel = Nx . iota ( { 1 , 2 , 4 } , type : { :f , 32 } )
+ iex> bias = Nx . tensor ( 1.0 )
+ iex> Axon.Layers . bilinear ( inp1 , inp2 , kernel , bias )
+ # Nx.Tensor <
+ f32 [ 3 ] [ 1 ]
+ [
+ [ 39.0 ] ,
+ [ 455.0 ] ,
+ [ 1319.0 ]
+ ]
+ >
@@ -777,7 +777,7 @@ dense(input, kernel, bias \\ 0, opts \\ [])
y = xW^T + b
$$ A dense layer or fully connected layer transforms
the input using the given kernel matrix and bias
-to compute:
Nx . dot ( input , kernel ) + bias
Typically, both kernel
and bias
are learnable
+to compute:
Nx . dot ( input , kernel ) + bias
Typically, both kernel
and bias
are learnable
parameters trained using gradient-based optimization.
@@ -796,17 +796,17 @@ dense(input, kernel, bias \\ 0, opts \\ [])
Examples
-iex> input = Nx . tensor ( [ [ 1.0 , 0.5 , 1.0 , 0.5 ] , [ 0.0 , 0.0 , 0.0 , 0.0 ] ] , type : { :f , 32 } )
- iex> kernel = Nx . tensor ( [ [ 0.2 ] , [ 0.3 ] , [ 0.5 ] , [ 0.8 ] ] , type : { :f , 32 } )
- iex> bias = Nx . tensor ( [ 1.0 ] , type : { :f , 32 } )
- iex> Axon.Layers . dense ( input , kernel , bias )
- # Nx.Tensor <
- f32 [ 2 ] [ 1 ]
- [
- [ 2.25 ] ,
- [ 1.0 ]
- ]
- >
+iex> input = Nx . tensor ( [ [ 1.0 , 0.5 , 1.0 , 0.5 ] , [ 0.0 , 0.0 , 0.0 , 0.0 ] ] , type : { :f , 32 } )
+ iex> kernel = Nx . tensor ( [ [ 0.2 ] , [ 0.3 ] , [ 0.5 ] , [ 0.8 ] ] , type : { :f , 32 } )
+ iex> bias = Nx . tensor ( [ 1.0 ] , type : { :f , 32 } )
+ iex> Axon.Layers . dense ( input , kernel , bias )
+ # Nx.Tensor <
+ f32 [ 2 ] [ 1 ]
+ [
+ [ 2.25 ] ,
+ [ 1.0 ]
+ ]
+ >
@@ -846,37 +846,37 @@ embedding(input, kernel, arg3 \\ [])
Examples
-iex> input = Nx . tensor ( [ [ 1 , 2 , 4 , 5 ] , [ 4 , 3 , 2 , 9 ] ] )
- iex> kernels = Nx . tensor ( [
- ...> [ 0.46299999952316284 , 0.5562999844551086 , 0.18170000612735748 ] ,
- ...> [ 0.9801999926567078 , 0.09780000150203705 , 0.5333999991416931 ] ,
- ...> [ 0.6980000138282776 , 0.9240999817848206 , 0.23479999601840973 ] ,
- ...> [ 0.31929999589920044 , 0.42250001430511475 , 0.7865999937057495 ] ,
- ...> [ 0.5519000291824341 , 0.5662999749183655 , 0.20559999346733093 ] ,
- ...> [ 0.1898999959230423 , 0.9311000108718872 , 0.8356000185012817 ] ,
- ...> [ 0.6383000016212463 , 0.8794000148773193 , 0.5282999873161316 ] ,
- ...> [ 0.9523000121116638 , 0.7597000002861023 , 0.08250000327825546 ] ,
- ...> [ 0.6622999906539917 , 0.02329999953508377 , 0.8205999732017517 ] ,
- ...> [ 0.9855999946594238 , 0.36419999599456787 , 0.5372999906539917 ]
- ...> ] )
- iex> Axon.Layers . embedding ( input , kernels )
- # Nx.Tensor <
- f32 [ 2 ] [ 4 ] [ 3 ]
- [
- [
- [ 0.9801999926567078 , 0.09780000150203705 , 0.5333999991416931 ] ,
- [ 0.6980000138282776 , 0.9240999817848206 , 0.23479999601840973 ] ,
- [ 0.5519000291824341 , 0.5662999749183655 , 0.20559999346733093 ] ,
- [ 0.1898999959230423 , 0.9311000108718872 , 0.8356000185012817 ]
- ] ,
- [
- [ 0.5519000291824341 , 0.5662999749183655 , 0.20559999346733093 ] ,
- [ 0.31929999589920044 , 0.42250001430511475 , 0.7865999937057495 ] ,
- [ 0.6980000138282776 , 0.9240999817848206 , 0.23479999601840973 ] ,
- [ 0.9855999946594238 , 0.36419999599456787 , 0.5372999906539917 ]
- ]
- ]
- >
+iex> input = Nx . tensor ( [ [ 1 , 2 , 4 , 5 ] , [ 4 , 3 , 2 , 9 ] ] )
+ iex> kernels = Nx . tensor ( [
+ ...> [ 0.46299999952316284 , 0.5562999844551086 , 0.18170000612735748 ] ,
+ ...> [ 0.9801999926567078 , 0.09780000150203705 , 0.5333999991416931 ] ,
+ ...> [ 0.6980000138282776 , 0.9240999817848206 , 0.23479999601840973 ] ,
+ ...> [ 0.31929999589920044 , 0.42250001430511475 , 0.7865999937057495 ] ,
+ ...> [ 0.5519000291824341 , 0.5662999749183655 , 0.20559999346733093 ] ,
+ ...> [ 0.1898999959230423 , 0.9311000108718872 , 0.8356000185012817 ] ,
+ ...> [ 0.6383000016212463 , 0.8794000148773193 , 0.5282999873161316 ] ,
+ ...> [ 0.9523000121116638 , 0.7597000002861023 , 0.08250000327825546 ] ,
+ ...> [ 0.6622999906539917 , 0.02329999953508377 , 0.8205999732017517 ] ,
+ ...> [ 0.9855999946594238 , 0.36419999599456787 , 0.5372999906539917 ]
+ ...> ] )
+ iex> Axon.Layers . embedding ( input , kernels )
+ # Nx.Tensor <
+ f32 [ 2 ] [ 4 ] [ 3 ]
+ [
+ [
+ [ 0.9801999926567078 , 0.09780000150203705 , 0.5333999991416931 ] ,
+ [ 0.6980000138282776 , 0.9240999817848206 , 0.23479999601840973 ] ,
+ [ 0.5519000291824341 , 0.5662999749183655 , 0.20559999346733093 ] ,
+ [ 0.1898999959230423 , 0.9311000108718872 , 0.8356000185012817 ]
+ ] ,
+ [
+ [ 0.5519000291824341 , 0.5662999749183655 , 0.20559999346733093 ] ,
+ [ 0.31929999589920044 , 0.42250001430511475 , 0.7865999937057495 ] ,
+ [ 0.6980000138282776 , 0.9240999817848206 , 0.23479999601840973 ] ,
+ [ 0.9855999946594238 , 0.36419999599456787 , 0.5372999906539917 ]
+ ]
+ ]
+ >
@@ -1303,33 +1303,33 @@ global_avg_pool(input, opts \\ [])
Examples
-iex> Axon.Layers . global_avg_pool ( Nx . iota ( { 3 , 2 , 3 } , type : { :f , 32 } ) , channels : :first )
- # Nx.Tensor <
- f32 [ 3 ] [ 2 ]
- [
- [ 1.0 , 4.0 ] ,
- [ 7.0 , 10.0 ] ,
- [ 13.0 , 16.0 ]
- ]
- >
-
- iex> Axon.Layers . global_avg_pool ( Nx . iota ( { 1 , 3 , 2 , 2 } , type : { :f , 32 } ) , channels : :first , keep_axes : true )
- # Nx.Tensor <
- f32 [ 1 ] [ 3 ] [ 1 ] [ 1 ]
- [
- [
- [
- [ 1.5 ]
- ] ,
- [
- [ 5.5 ]
- ] ,
- [
- [ 9.5 ]
- ]
- ]
- ]
- >
+iex> Axon.Layers . global_avg_pool ( Nx . iota ( { 3 , 2 , 3 } , type : { :f , 32 } ) , channels : :first )
+ # Nx.Tensor <
+ f32 [ 3 ] [ 2 ]
+ [
+ [ 1.0 , 4.0 ] ,
+ [ 7.0 , 10.0 ] ,
+ [ 13.0 , 16.0 ]
+ ]
+ >
+
+ iex> Axon.Layers . global_avg_pool ( Nx . iota ( { 1 , 3 , 2 , 2 } , type : { :f , 32 } ) , channels : :first , keep_axes : true )
+ # Nx.Tensor <
+ f32 [ 1 ] [ 3 ] [ 1 ] [ 1 ]
+ [
+ [
+ [
+ [ 1.5 ]
+ ] ,
+ [
+ [ 5.5 ]
+ ] ,
+ [
+ [ 9.5 ]
+ ]
+ ]
+ ]
+ >
@@ -1376,33 +1376,33 @@ global_lp_pool(input, opts \\ [])
Examples
-iex> Axon.Layers . global_lp_pool ( Nx . iota ( { 3 , 2 , 3 } , type : { :f , 32 } ) , norm : 1 , channels : :first )
- # Nx.Tensor <
- f32 [ 3 ] [ 2 ]
- [
- [ 3.0 , 12.0 ] ,
- [ 21.0 , 30.0 ] ,
- [ 39.0 , 48.0 ]
- ]
- >
-
- iex> Axon.Layers . global_lp_pool ( Nx . iota ( { 1 , 3 , 2 , 2 } , type : { :f , 16 } ) , keep_axes : true , channels : :first )
- # Nx.Tensor <
- f16 [ 1 ] [ 3 ] [ 1 ] [ 1 ]
- [
- [
- [
- [ 3.7421875 ]
- ] ,
- [
- [ 11.2265625 ]
- ] ,
- [
- [ 19.125 ]
- ]
- ]
- ]
- >
+iex> Axon.Layers . global_lp_pool ( Nx . iota ( { 3 , 2 , 3 } , type : { :f , 32 } ) , norm : 1 , channels : :first )
+ # Nx.Tensor <
+ f32 [ 3 ] [ 2 ]
+ [
+ [ 3.0 , 12.0 ] ,
+ [ 21.0 , 30.0 ] ,
+ [ 39.0 , 48.0 ]
+ ]
+ >
+
+ iex> Axon.Layers . global_lp_pool ( Nx . iota ( { 1 , 3 , 2 , 2 } , type : { :f , 16 } ) , keep_axes : true , channels : :first )
+ # Nx.Tensor <
+ f16 [ 1 ] [ 3 ] [ 1 ] [ 1 ]
+ [
+ [
+ [
+ [ 3.7421875 ]
+ ] ,
+ [
+ [ 11.2265625 ]
+ ] ,
+ [
+ [ 19.125 ]
+ ]
+ ]
+ ]
+ >
@@ -1447,33 +1447,33 @@ global_max_pool(input, opts \\ [])
Examples
-iex> Axon.Layers . global_max_pool ( Nx . iota ( { 3 , 2 , 3 } , type : { :f , 32 } ) , channels : :first )
- # Nx.Tensor <
- f32 [ 3 ] [ 2 ]
- [
- [ 2.0 , 5.0 ] ,
- [ 8.0 , 11.0 ] ,
- [ 14.0 , 17.0 ]
- ]
- >
-
- iex> Axon.Layers . global_max_pool ( Nx . iota ( { 1 , 3 , 2 , 2 } , type : { :f , 32 } ) , keep_axes : true , channels : :first )
- # Nx.Tensor <
- f32 [ 1 ] [ 3 ] [ 1 ] [ 1 ]
- [
- [
- [
- [ 3.0 ]
- ] ,
- [
- [ 7.0 ]
- ] ,
- [
- [ 11.0 ]
- ]
- ]
- ]
- >
+iex> Axon.Layers . global_max_pool ( Nx . iota ( { 3 , 2 , 3 } , type : { :f , 32 } ) , channels : :first )
+ # Nx.Tensor <
+ f32 [ 3 ] [ 2 ]
+ [
+ [ 2.0 , 5.0 ] ,
+ [ 8.0 , 11.0 ] ,
+ [ 14.0 , 17.0 ]
+ ]
+ >
+
+ iex> Axon.Layers . global_max_pool ( Nx . iota ( { 1 , 3 , 2 , 2 } , type : { :f , 32 } ) , keep_axes : true , channels : :first )
+ # Nx.Tensor <
+ f32 [ 1 ] [ 3 ] [ 1 ] [ 1 ]
+ [
+ [
+ [
+ [ 3.0 ]
+ ] ,
+ [
+ [ 7.0 ]
+ ] ,
+ [
+ [ 11.0 ]
+ ]
+ ]
+ ]
+ >
@@ -1527,18 +1527,18 @@ lp_pool(input, opts \\ [])
Examples
-iex> t = Nx . tensor ( [ [ [ 0.9450 , 0.4684 , 1.8146 ] , [ 1.2663 , 0.4354 , - 0.0781 ] , [ - 0.4759 , 0.3251 , 0.8742 ] ] ] , type : { :f , 32 } )
- iex> Axon.Layers . lp_pool ( t , kernel_size : 2 , norm : 2 , channels : :first )
- # Nx.Tensor <
- f32 [ 1 ] [ 3 ] [ 1 ]
- [
- [
- [ 1.0547149181365967 ] ,
- [ 1.3390626907348633 ] ,
- [ 0.5763426423072815 ]
- ]
- ]
- >
+iex> t = Nx . tensor ( [ [ [ 0.9450 , 0.4684 , 1.8146 ] , [ 1.2663 , 0.4354 , - 0.0781 ] , [ - 0.4759 , 0.3251 , 0.8742 ] ] ] , type : { :f , 32 } )
+ iex> Axon.Layers . lp_pool ( t , kernel_size : 2 , norm : 2 , channels : :first )
+ # Nx.Tensor <
+ f32 [ 1 ] [ 3 ] [ 1 ]
+ [
+ [
+ [ 1.0547149181365967 ] ,
+ [ 1.3390626907348633 ] ,
+ [ 0.5763426423072815 ]
+ ]
+ ]
+ >
@@ -1589,21 +1589,21 @@ max_pool(input, opts \\ [])
Examples
-iex> t = Nx . tensor ( [ [
- ...> [ 0.051500000059604645 , - 0.7042999863624573 , - 0.32899999618530273 ] ,
- ...> [ - 0.37130001187324524 , 1.6191999912261963 , - 0.11829999834299088 ] ,
- ...> [ 0.7099999785423279 , 0.7282999753952026 , - 0.18639999628067017 ] ] ] , type : { :f , 32 } )
- iex> Axon.Layers . max_pool ( t , kernel_size : 2 , channels : :first )
- # Nx.Tensor <
- f32 [ 1 ] [ 3 ] [ 1 ]
- [
- [
- [ 0.051500000059604645 ] ,
- [ 1.6191999912261963 ] ,
- [ 0.7282999753952026 ]
- ]
- ]
- >
+iex> t = Nx . tensor ( [ [
+ ...> [ 0.051500000059604645 , - 0.7042999863624573 , - 0.32899999618530273 ] ,
+ ...> [ - 0.37130001187324524 , 1.6191999912261963 , - 0.11829999834299088 ] ,
+ ...> [ 0.7099999785423279 , 0.7282999753952026 , - 0.18639999628067017 ] ] ] , type : { :f , 32 } )
+ iex> Axon.Layers . max_pool ( t , kernel_size : 2 , channels : :first )
+ # Nx.Tensor <
+ f32 [ 1 ] [ 3 ] [ 1 ]
+ [
+ [
+ [ 0.051500000059604645 ] ,
+ [ 1.6191999912261963 ] ,
+ [ 0.7282999753952026 ]
+ ]
+ ]
+ >
@@ -1826,13 +1826,13 @@ flatten(input, opts \\ [])
Examples
-iex> Axon.Layers . flatten ( Nx . iota ( { 1 , 2 , 2 } , type : { :f , 32 } ) )
- # Nx.Tensor <
- f32 [ 1 ] [ 4 ]
- [
- [ 0.0 , 1.0 , 2.0 , 3.0 ]
- ]
- >
+iex> Axon.Layers . flatten ( Nx . iota ( { 1 , 2 , 2 } , type : { :f , 32 } ) )
+ # Nx.Tensor <
+ f32 [ 1 ] [ 4 ]
+ [
+ [ 0.0 , 1.0 , 2.0 , 3.0 ]
+ ]
+ >
@@ -1878,28 +1878,28 @@ resize(input, opts \\ [])
Examples
-iex> img = Nx . iota ( { 1 , 1 , 3 , 3 } , type : { :f , 32 } )
- iex> Axon.Layers . resize ( img , size : { 4 , 4 } , channels : :first )
- # Nx.Tensor <
- f32 [ 1 ] [ 1 ] [ 4 ] [ 4 ]
- [
- [
- [
- [ 0.0 , 1.0 , 1.0 , 2.0 ] ,
- [ 3.0 , 4.0 , 4.0 , 5.0 ] ,
- [ 3.0 , 4.0 , 4.0 , 5.0 ] ,
- [ 6.0 , 7.0 , 7.0 , 8.0 ]
- ]
- ]
- ]
- >
+iex> img = Nx . iota ( { 1 , 1 , 3 , 3 } , type : { :f , 32 } )
+ iex> Axon.Layers . resize ( img , size : { 4 , 4 } , channels : :first )
+ # Nx.Tensor <
+ f32 [ 1 ] [ 1 ] [ 4 ] [ 4 ]
+ [
+ [
+ [
+ [ 0.0 , 1.0 , 1.0 , 2.0 ] ,
+ [ 3.0 , 4.0 , 4.0 , 5.0 ] ,
+ [ 3.0 , 4.0 , 4.0 , 5.0 ] ,
+ [ 6.0 , 7.0 , 7.0 , 8.0 ]
+ ]
+ ]
+ ]
+ >
Error cases
-iex> img = Nx . iota ( { 1 , 1 , 3 , 3 } , type : { :f , 32 } )
- iex> Axon.Layers . resize ( img , size : { 4 , 4 } , method : :foo )
+iex> img = Nx . iota ( { 1 , 1 , 3 , 3 } , type : { :f , 32 } )
+ iex> Axon.Layers . resize ( img , size : { 4 , 4 } , method : :foo )
** (ArgumentError) expected :method to be either of :nearest, :bilinear, :bicubic, :lanczos3, :lanczos5, got: :foo
@@ -1979,83 +1979,83 @@
One-dimensional convolution
-iex> input = Nx . tensor ( [ [ [ 0.1294 , - 0.6638 , 1.0251 ] ] , [ [ 0.9182 , 1.1512 , - 1.6149 ] ] ] , type : { :f , 32 } )
- iex> kernel = Nx . tensor ( [ [ [ - 1.5475 , 1.2425 ] ] , [ [ 0.1871 , 0.5458 ] ] , [ [ - 0.4488 , 0.8879 ] ] ] , type : { :f , 32 } )
- iex> bias = Nx . tensor ( [ 0.7791 , 0.1676 , 1.5971 ] , type : { :f , 32 } )
- iex> Axon.Layers . conv ( input , kernel , bias , channels : :first )
- # Nx.Tensor <
- f32 [ 2 ] [ 3 ] [ 2 ]
- [
- [
- [ - 0.24591797590255737 , 3.08001708984375 ] ,
- [ - 0.1704912781715393 , 0.6029025316238403 ] ,
- [ 0.9496372938156128 , 2.80519962310791 ]
- ] ,
- [
- [ 0.7885514497756958 , - 3.0088953971862793 ] ,
- [ 0.9677201509475708 , - 0.4984228312969208 ] ,
- [ 2.207162380218506 , - 0.3534282445907593 ]
- ]
- ]
- >
+iex> input = Nx . tensor ( [ [ [ 0.1294 , - 0.6638 , 1.0251 ] ] , [ [ 0.9182 , 1.1512 , - 1.6149 ] ] ] , type : { :f , 32 } )
+ iex> kernel = Nx . tensor ( [ [ [ - 1.5475 , 1.2425 ] ] , [ [ 0.1871 , 0.5458 ] ] , [ [ - 0.4488 , 0.8879 ] ] ] , type : { :f , 32 } )
+ iex> bias = Nx . tensor ( [ 0.7791 , 0.1676 , 1.5971 ] , type : { :f , 32 } )
+ iex> Axon.Layers . conv ( input , kernel , bias , channels : :first )
+ # Nx.Tensor <
+ f32 [ 2 ] [ 3 ] [ 2 ]
+ [
+ [
+ [ - 0.24591797590255737 , 3.08001708984375 ] ,
+ [ - 0.1704912781715393 , 0.6029025316238403 ] ,
+ [ 0.9496372938156128 , 2.80519962310791 ]
+ ] ,
+ [
+ [ 0.7885514497756958 , - 3.0088953971862793 ] ,
+ [ 0.9677201509475708 , - 0.4984228312969208 ] ,
+ [ 2.207162380218506 , - 0.3534282445907593 ]
+ ]
+ ]
+ >
Two-dimensional convolution
-iex> input = Nx . tensor ( [ [ [ [ - 1.0476 , - 0.5041 ] , [ - 0.9336 , 1.5907 ] ] ] ] , type : { :f , 32 } )
- iex> kernel = Nx . tensor ( [
- ...> [ [ [ 0.7514 , 0.7356 ] , [ 1.3909 , 0.6800 ] ] ] ,
- ...> [ [ [ - 0.3450 , 0.4551 ] , [ - 0.6275 , - 0.9875 ] ] ] ,
- ...> [ [ [ 1.8587 , 0.4722 ] , [ 0.6058 , - 1.0301 ] ] ]
- ...> ] , type : { :f , 32 } )
- iex> bias = Nx . tensor ( [ 1.9564 , 0.2822 , - 0.5385 ] , type : { :f , 32 } )
- iex> Axon.Layers . conv ( input , kernel , bias , channels : :first )
- # Nx.Tensor <
- f32 [ 1 ] [ 3 ] [ 1 ] [ 1 ]
- [
- [
- [
- [ 0.5815491676330566 ]
- ] ,
- [
- [ - 0.5707762241363525 ]
- ] ,
- [
- [ - 4.927865028381348 ]
- ]
- ]
- ]
- >
+iex> input = Nx . tensor ( [ [ [ [ - 1.0476 , - 0.5041 ] , [ - 0.9336 , 1.5907 ] ] ] ] , type : { :f , 32 } )
+ iex> kernel = Nx . tensor ( [
+ ...> [ [ [ 0.7514 , 0.7356 ] , [ 1.3909 , 0.6800 ] ] ] ,
+ ...> [ [ [ - 0.3450 , 0.4551 ] , [ - 0.6275 , - 0.9875 ] ] ] ,
+ ...> [ [ [ 1.8587 , 0.4722 ] , [ 0.6058 , - 1.0301 ] ] ]
+ ...> ] , type : { :f , 32 } )
+ iex> bias = Nx . tensor ( [ 1.9564 , 0.2822 , - 0.5385 ] , type : { :f , 32 } )
+ iex> Axon.Layers . conv ( input , kernel , bias , channels : :first )
+ # Nx.Tensor <
+ f32 [ 1 ] [ 3 ] [ 1 ] [ 1 ]
+ [
+ [
+ [
+ [ 0.5815491676330566 ]
+ ] ,
+ [
+ [ - 0.5707762241363525 ]
+ ] ,
+ [
+ [ - 4.927865028381348 ]
+ ]
+ ]
+ ]
+ >
Three-dimensional convolution
-iex> input = Nx . tensor ( [ [ [ [ [ - 0.6497 ] , [ 1.0939 ] ] , [ [ - 2.5465 ] , [ 0.7801 ] ] ] ] ] , type : { :f , 32 } )
- iex> kernel = Nx . tensor ( [
- ...> [ [ [ [ 0.7390 ] , [ - 0.0927 ] ] , [ [ - 0.8675 ] , [ - 0.9209 ] ] ] ] ,
- ...> [ [ [ [ - 0.6638 ] , [ 0.4341 ] ] , [ [ 0.6368 ] , [ 1.1846 ] ] ] ]
- ...> ] , type : { :f , 32 } )
- iex> bias = Nx . tensor ( [ - 0.4101 , 0.1776 ] , type : { :f , 32 } )
- iex> Axon.Layers . conv ( input , kernel , bias , channels : :first )
- # Nx.Tensor <
- f32 [ 1 ] [ 2 ] [ 1 ] [ 1 ] [ 1 ]
- [
- [
- [
- [
- [ 0.49906185269355774 ]
- ]
- ] ,
- [
- [
- [ 0.38622811436653137 ]
- ]
- ]
- ]
- ]
- >
+iex> input = Nx . tensor ( [ [ [ [ [ - 0.6497 ] , [ 1.0939 ] ] , [ [ - 2.5465 ] , [ 0.7801 ] ] ] ] ] , type : { :f , 32 } )
+ iex> kernel = Nx . tensor ( [
+ ...> [ [ [ [ 0.7390 ] , [ - 0.0927 ] ] , [ [ - 0.8675 ] , [ - 0.9209 ] ] ] ] ,
+ ...> [ [ [ [ - 0.6638 ] , [ 0.4341 ] ] , [ [ 0.6368 ] , [ 1.1846 ] ] ] ]
+ ...> ] , type : { :f , 32 } )
+ iex> bias = Nx . tensor ( [ - 0.4101 , 0.1776 ] , type : { :f , 32 } )
+ iex> Axon.Layers . conv ( input , kernel , bias , channels : :first )
+ # Nx.Tensor <
+ f32 [ 1 ] [ 2 ] [ 1 ] [ 1 ] [ 1 ]
+ [
+ [
+ [
+ [
+ [ 0.49906185269355774 ]
+ ]
+ ] ,
+ [
+ [
+ [ 0.38622811436653137 ]
+ ]
+ ]
+ ]
+ ]
+ >
@@ -2113,23 +2113,23 @@ conv_transpose(input, kernel, bias \\ 0, op
Examples
-iex> input = Nx . iota ( { 1 , 3 , 3 } , type : { :f , 32 } )
- iex> kernel = Nx . iota ( { 6 , 3 , 2 } , type : { :f , 32 } )
- iex> bias = Nx . tensor ( 1.0 , type : { :f , 32 } )
- iex> Axon.Layers . conv_transpose ( input , kernel , bias , channels : :first )
- # Nx.Tensor <
- f32 [ 1 ] [ 6 ] [ 4 ]
- [
- [
- [ 40.0 , 79.0 , 94.0 , 43.0 ] ,
- [ 94.0 , 205.0 , 256.0 , 133.0 ] ,
- [ 148.0 , 331.0 , 418.0 , 223.0 ] ,
- [ 202.0 , 457.0 , 580.0 , 313.0 ] ,
- [ 256.0 , 583.0 , 742.0 , 403.0 ] ,
- [ 310.0 , 709.0 , 904.0 , 493.0 ]
- ]
- ]
- >
+iex> input = Nx . iota ( { 1 , 3 , 3 } , type : { :f , 32 } )
+ iex> kernel = Nx . iota ( { 6 , 3 , 2 } , type : { :f , 32 } )
+ iex> bias = Nx . tensor ( 1.0 , type : { :f , 32 } )
+ iex> Axon.Layers . conv_transpose ( input , kernel , bias , channels : :first )
+ # Nx.Tensor <
+ f32 [ 1 ] [ 6 ] [ 4 ]
+ [
+ [
+ [ 40.0 , 79.0 , 94.0 , 43.0 ] ,
+ [ 94.0 , 205.0 , 256.0 , 133.0 ] ,
+ [ 148.0 , 331.0 , 418.0 , 223.0 ] ,
+ [ 202.0 , 457.0 , 580.0 , 313.0 ] ,
+ [ 256.0 , 583.0 , 742.0 , 403.0 ] ,
+ [ 310.0 , 709.0 , 904.0 , 493.0 ]
+ ]
+ ]
+ >
diff --git a/Axon.Loop.State.html b/Axon.Loop.State.html
index e0d3fc32..e70b01e3 100644
--- a/Axon.Loop.State.html
+++ b/Axon.Loop.State.html
@@ -14,7 +14,7 @@
-
+
@@ -133,16 +133,16 @@
-Accumulated state in an Axon.Loop.
Loop state is a struct:
% State {
- epoch : integer ( ) ,
- max_epoch : integer ( ) ,
- iteration : integer ( ) ,
- max_iteration : integer ( ) ,
- metrics : map ( string ( ) , container ( ) ) ,
- times : map ( integer ( ) , integer ( ) ) ,
- step_state : container ( ) ,
- handler_metadata : container ( )
- }
epoch
is the current epoch, starting at 0, of the nested loop.
+
Accumulated state in an Axon.Loop.
Loop state is a struct:
% State {
+ epoch : integer ( ) ,
+ max_epoch : integer ( ) ,
+ iteration : integer ( ) ,
+ max_iteration : integer ( ) ,
+ metrics : map ( string ( ) , container ( ) ) ,
+ times : map ( integer ( ) , integer ( ) ) ,
+ step_state : container ( ) ,
+ handler_metadata : container ( )
+ }
epoch
is the current epoch, starting at 0, of the nested loop.
Defaults to 0.
max_epoch
is the maximum number of epochs the loop should run
for. Defaults to 1.
iteration
is the current iteration of the inner loop. In supervised
settings, this will be the current batch. Defaults to 0.
max_iteration
is the maximum number of iterations the loop should
diff --git a/Axon.Loop.html b/Axon.Loop.html
index e954b64d..6260d82e 100644
--- a/Axon.Loop.html
+++ b/Axon.Loop.html
@@ -14,7 +14,7 @@
-
+
@@ -135,66 +135,66 @@
Abstraction for modeling a reduction of a dataset with an accumulated
state for a number of epochs.
Inspired heavily by PyTorch Ignite .
The main abstraction is the %Axon.Loop{}
struct, which controls a nested
-reduction of the form:
Enum . reduce ( 1 .. max_epochs , state , fn epoch , state ->
- Enum . reduce ( data , state , & batch_step / 2 )
- end )
data
is assumed to be an Enumerable
or Stream
of input data which is
+reduction of the form:
Enum . reduce ( 1 .. max_epochs , state , fn epoch , state ->
+ Enum . reduce ( data , state , & batch_step / 2 )
+ end )
data
is assumed to be an Enumerable
or Stream
of input data which is
handled by a processing function, batch_step
. The purpose of the loop
abstraction is to take away much of the boilerplate code used in solving machine
learning tasks. Tasks such as normalizing a dataset, hyperparameter optimization,
-or training machine learning models boil down to writing one function:
defn batch_step ( batch , state ) do
+or training machine learning models boil down to writing one function:defn batch_step ( batch , state ) do
# ...do something with batch...
updated_state
- end
For tasks such as training a neural network, state
will encapsulate things
+
end
For tasks such as training a neural network, state
will encapsulate things
such as model and optimizer state. For supervised learning tasks, batch_step
-might look something like:
defn batch_step ( { inputs , targets } , state ) do
- %{ parameters : params , optimizer_state : optim_state } = state
+might look something like:defn batch_step ( { inputs , targets } , state ) do
+ %{ parameters : params , optimizer_state : optim_state } = state
- gradients = grad ( params , objective_fn . ( &1 , inputs , targets ) )
- { updates , new_optim_state } = optimizer . ( optim_state , params , gradients )
+ gradients = grad ( params , objective_fn . ( &1 , inputs , targets ) )
+ { updates , new_optim_state } = optimizer . ( optim_state , params , gradients )
- new_params = apply_updates ( params , updates )
+ new_params = apply_updates ( params , updates )
- %{ parameters : new_params , optimizer_state : optim_state }
- end
batch_step
takes a batch of {input, target}
pairs and the current state,
+
%{ parameters : new_params , optimizer_state : optim_state }
+ end
batch_step
takes a batch of {input, target}
pairs and the current state,
and updates the model parameters based on the gradients received from some arbitrary
objective function. This function will run in a nested loop, iterating over the entire
dataset for N
epochs before finally returning the trained model state. By defining
1 function, we've created a training loop that works for most machine learning models.
In actuality, the loop abstraction accumulates a struct, %Axon.Loop.State{}
, which looks
-like (assuming container
is a generic Elixir container of tensors, e.g. map, tuple, etc.):
% Axon.Loop.State {
- epoch : integer ( ) ,
- max_epoch : integer ( ) ,
- iteration : integer ( ) ,
- max_iteration : integer ( ) ,
- metrics : map ( string ( ) , container ( ) ) ,
- times : map ( integer ( ) , integer ( ) ) ,
- step_state : container ( )
- }
batch_step
takes in the batch and the step state field and returns a step_state
,
+like (assuming container
is a generic Elixir container of tensors, e.g. map, tuple, etc.):
% Axon.Loop.State {
+ epoch : integer ( ) ,
+ max_epoch : integer ( ) ,
+ iteration : integer ( ) ,
+ max_iteration : integer ( ) ,
+ metrics : map ( string ( ) , container ( ) ) ,
+ times : map ( integer ( ) , integer ( ) ) ,
+ step_state : container ( )
+ }
batch_step
takes in the batch and the step state field and returns a step_state
,
which is a generic container of state accumulated at each iteration. The rest of the fields
in the state struct are updated automatically behind the scenes.
The loop must start from some initial step state, thus most tasks must also provide
an additional initialization function to provide some starting point for the step
state. For machine learning tasks, the initialization function will return things like
initial model parameters and optimizer state.
Typically, the final output of the loop is the accumulated final state; however, you
may optionally apply an output transform to extract specific values at the end of the
-loop. For example, Axon.Loop.trainer/4
by default extracts trained model state:
output_transform = fn state ->
- state . step_state [ :model_state ]
- end
+loop. For example, Axon.Loop.trainer/4
by default extracts trained model state:output_transform = fn state ->
+ state . step_state [ :model_state ]
+ end
Initialize and Step
The core of the Axon loop are the init and step functions. The initialization is an
-arity-0 function which provides an initial step state:
init = fn ->
- %{ params : Axon . init ( model ) }
- end
While the step function is the batch_step
function mentioned earlier:
step = fn data , state ->
+arity-0 function which provides an initial step state:init = fn ->
+ %{ params : Axon . init ( model ) }
+ end
While the step function is the batch_step
function mentioned earlier:
step = fn data , state ->
new_state = # ...do something...
new_state
- end
Note that any optimization and training anonymous functions that need to be used in the
-batch_step
function can be passed as extra arguments. For example:
step_with_training_arguments = fn data , state , optimizer_update_fn , state_update_fn ->
+ end
Note that any optimization and training anonymous functions that need to be used in the
+batch_step
function can be passed as extra arguments. For example:
step_with_training_arguments = fn data , state , optimizer_update_fn , state_update_fn ->
# ...do something...
- end
+ end
- step = & ( step_with_training_arguments . ( &1 , &2 , actual_optimizer_update_fn , actual_state_update_fn ) )
+ step = & ( step_with_training_arguments . ( &1 , &2 , actual_optimizer_update_fn , actual_state_update_fn ) )
@@ -202,27 +202,27 @@
Often times you want to compute metrics associated with your training iterations.
To accomplish this, you can attach metrics to each Axon.Loop
. Assuming a batch_step
-function which looks like:
defn batch_step ( { inputs , targets } , state ) do
- %{ parameters : params , optimizer_state : optim_state } = state
+function which looks like:defn batch_step ( { inputs , targets } , state ) do
+ %{ parameters : params , optimizer_state : optim_state } = state
- gradients = grad ( params , objective_fn . ( &1 , inputs , targets ) )
- { updates , new_optim_state } = optimizer . ( optim_state , params , gradients )
+ gradients = grad ( params , objective_fn . ( &1 , inputs , targets ) )
+ { updates , new_optim_state } = optimizer . ( optim_state , params , gradients )
- new_params = apply_updates ( params , updates )
+ new_params = apply_updates ( params , updates )
# Shown for simplicity, you can optimize this by calculating preds
# along with the gradient calculation
- preds = model_fn . ( params , inputs )
+ preds = model_fn . ( params , inputs )
- %{
+ %{
y_true : targets ,
y_pred : preds ,
parameters : new_params ,
optimizer_state : optim_state
- }
- end
You can attach metrics to this by using Axon.Loop.metric/4
:
Axon.Loop . loop ( & batch_step / 2 )
- |> Axon.Loop . metric ( "Accuracy" , :accuracy , fn %{ y_true : y_ , y_pred : y } -> [ y_ , y ] end )
- |> Axon.Loop . run ( data )
Because metrics work directly on step_state
, you typically need to provide an output
+
}
+ end
You can attach metrics to this by using Axon.Loop.metric/4
:
Axon.Loop . loop ( & batch_step / 2 )
+ |> Axon.Loop . metric ( "Accuracy" , :accuracy , fn %{ y_true : y_ , y_pred : y } -> [ y_ , y ] end )
+ |> Axon.Loop . run ( data )
Because metrics work directly on step_state
, you typically need to provide an output
transform to indicate which values should be passed to your metric function. By default,
Axon assumes a supervised training task with the fields :y_true
and :y_pred
present
in the step state. See Axon.Loop.metric/4
for more information.
Metrics will be tracked in the loop state using the user-provided key. Metrics integrate
@@ -234,24 +234,24 @@
Events and Handlers
You can instrument several points in the loop using event handlers. By default, several events
-are fired when running a loop:
events = [
+are fired when running a loop:events = [
:started , # After loop state initialization
:epoch_started , # On epoch start
:iteration_started , # On iteration start
:iteration_completed , # On iteration complete
:epoch_completed , # On epoch complete
:epoch_halted , # On epoch halt, if early halted
- ]
You can attach event handlers to events using Axon.Loop.handle_event/4
:
loop
- |> Axon.Loop . handle_event ( :iteration_completed , & log_metrics / 1 , every : 100 )
- |> Axon.Loop . run ( data )
The above will trigger log_metrics/1
every 100 times the :iteration_completed
event
+
]
You can attach event handlers to events using Axon.Loop.handle_event/4
:
loop
+ |> Axon.Loop . handle_event ( :iteration_completed , & log_metrics / 1 , every : 100 )
+ |> Axon.Loop . run ( data )
The above will trigger log_metrics/1
every 100 times the :iteration_completed
event
is fired. Event handlers must return a tuple {status, state}
, where status
is an
atom with one of the following values:
:continue # Continue epoch, continue looping
:halt_epoch # Halt the epoch, continue looping
:halt_loop # Halt looping
And state
is an updated Axon.Loop.State
struct. Handler functions take as input
the current loop state.
It's important to note that event handlers are triggered in the order they are attached
to the loop. If you have two handlers on the same event, they will trigger in order:
loop
- |> Axon.Loop . handle_event ( :epoch_completed , & normalize_state / 1 ) # Runs first
- |> Axon.Loop . handle_event ( :epoch_completed , & log_state / 1 ) # Runs second
You may provide filters to filter when event handlers trigger. See Axon.Loop.handle_event/4
+|> Axon.Loop . handle_event ( :epoch_completed , & normalize_state / 1 ) # Runs first
+ |> Axon.Loop . handle_event ( :epoch_completed , & log_state / 1 ) # Runs second
You may provide filters to filter when event handlers trigger. See Axon.Loop.handle_event/4
for more details on valid filters.
@@ -267,7 +267,7 @@
Running loops
-In order to execute a loop, you should use Axon.Loop.run/3
:
Axon.Loop . run ( loop , data , epochs : 10 )
+ In order to execute a loop, you should use Axon.Loop.run/3
:
Axon.Loop . run ( loop , data , epochs : 10 )
@@ -275,8 +275,8 @@
At times you may want to resume a loop from some previous state. You can accomplish this
with Axon.Loop.from_state/2
:
loop
- |> Axon.Loop . from_state ( state )
- |> Axon.Loop . run ( data )
+|> Axon.Loop . from_state ( state )
+ |> Axon.Loop . run ( data )
@@ -511,7 +511,7 @@
checkpoint(loop, opts \\ [])
-
+
View Source
@@ -528,21 +528,21 @@ checkpoint(loop, opts \\ [])
obtained from Axon.Loop.serialize_state/2
. Serialization
options will be forwarded to Axon.Loop.serialize_state/2
. You can customize checkpoint events by passing :event
and :filter
options:
loop
- |> Axon.Loop . checkpoint ( event : :iteration_completed , filter : [ every : 50 ] )
Checkpoints are saved under the checkpoint/
directory with a pattern
+|> Axon.Loop . checkpoint ( event : :iteration_completed , filter : [ every : 50 ] )
Checkpoints are saved under the checkpoint/
directory with a pattern
of checkpoint_{epoch}_{iteration}.ckpt
. You can customize the path and pattern
with the :path
and :file_pattern
options:
my_file_pattern =
- fn % Axon.Loop.State { epoch : epoch , iteration : iter } ->
- "checkpoint_ #{ epoch } _ #{ iter } "
- end
+ fn % Axon.Loop.State { epoch : epoch , iteration : iter } ->
+ "checkpoint_ #{ epoch } _ #{ iter } "
+ end
loop
- |> Axon.Loop . checkpoint ( path : "my_checkpoints" , file_pattern : my_file_pattern )
If you'd like to only save checkpoints based on some metric criteria,
+|> Axon.Loop . checkpoint ( path : "my_checkpoints" , file_pattern : my_file_pattern )
If you'd like to only save checkpoints based on some metric criteria,
you can specify the :criteria
option. :criteria
must be a valid key
in metrics:
loop
- |> Axon.Loop . checkpoint ( criteria : "validation_loss" )
The default criteria mode is :min
, meaning the min score metric will
+|> Axon.Loop . checkpoint ( criteria : "validation_loss" )
The default criteria mode is :min
, meaning the min score metric will
be considered "best" when deciding to save on a given event. Valid modes
are :min
and :max
:
loop
- |> Axon.Loop . checkpoint ( criteria : "validation_accuracy" , mode : :max )
+|> Axon.Loop . checkpoint ( criteria : "validation_accuracy" , mode : :max )
@@ -567,7 +567,7 @@ checkpoint(loop, opts \\ [])
deserialize_state(serialized, opts \\ [])
-
+
View Source
@@ -596,7 +596,7 @@ deserialize_state(serialized, opts \\ [])
early_stop(loop, monitor, opts \\ [])
-
+
View Source
@@ -611,18 +611,18 @@ early_stop(loop, monitor, opts \\ [])
improvement of a given metric. You must specify a metric to monitor and the metric must
be present in the loop state. Typically, this will be
a validation metric:
model
- |> Axon.Loop . trainer ( loss , optim )
- |> Axon.Loop . metric ( :accuracy )
- |> Axon.Loop . validate ( val_data )
- |> Axon.Loop . early_stop ( "validation_accuracy" )
It's important to remember that handlers are executed in the
+|> Axon.Loop . trainer ( loss , optim )
+ |> Axon.Loop . metric ( :accuracy )
+ |> Axon.Loop . validate ( val_data )
+ |> Axon.Loop . early_stop ( "validation_accuracy" )
It's important to remember that handlers are executed in the
order they are added to the loop. For example, if you'd like
to checkpoint a loop after every epoch and use early stopping,
most likely you want to add the checkpoint handler before
the early stopping handler:
model
- |> Axon.Loop . trainer ( loss , optim )
- |> Axon.Loop . metric ( :accuracy )
- |> Axon.Loop . checkpoint ( )
- |> Axon.Loop . early_stop ( "accuracy" )
That will ensure checkpoint is always fired, even if the loop
+|> Axon.Loop . trainer ( loss , optim )
+ |> Axon.Loop . metric ( :accuracy )
+ |> Axon.Loop . checkpoint ( )
+ |> Axon.Loop . early_stop ( "accuracy" )
That will ensure checkpoint is always fired, even if the loop
exited early.
@@ -635,7 +635,7 @@ early_stop(loop, monitor, opts \\ [])
eval_step(model)
-
+
View Source
@@ -660,7 +660,7 @@ eval_step(model)
evaluator(model)
-
+
View Source
@@ -673,18 +673,18 @@ evaluator(model)
Creates a supervised evaluator from a model.
An evaluator can be used for things such as testing and validation of models
after or during training. It assumes model
is an Axon struct, container of
structs, or a tuple of init
/ apply
functions. model_state
must be a
-container usable from within model
.
The evaluator returns a step state of the form:
%{
+container usable from within model
.The evaluator returns a step state of the form:
%{
y_true : labels ,
y_pred : predictions
- }
Such that you can attach any number of supervised metrics to the evaluation
+
}
Such that you can attach any number of supervised metrics to the evaluation
loop:
model
- |> Axon.Loop . evaluator ( )
- |> Axon.Loop . metric ( "Accuracy" , :accuracy )
You must pass a compatible trained model state to Axon.Loop.run/4
when using
+|> Axon.Loop . evaluator ( )
+ |> Axon.Loop . metric ( "Accuracy" , :accuracy )
You must pass a compatible trained model state to Axon.Loop.run/4
when using
supervised evaluation loops. For example, if you've binded the result of a training
run to trained_model_state
, you can run the trained model through an evaluation
run like this:
model
- |> Axon.Loop . evaluator ( )
- |> Axon.Loop . run ( data , trained_model_state , compiler : EXLA )
This function applies an output transform which returns the map of metrics accumulated
+|> Axon.Loop . evaluator ( )
+ |> Axon.Loop . run ( data , trained_model_state , compiler : EXLA )
This function applies an output transform which returns the map of metrics accumulated
over the given loop.
@@ -697,7 +697,7 @@ evaluator(model)
from_state(loop, state)
-
+
View Source
@@ -709,7 +709,7 @@ from_state(loop, state)
Attaches state
to the given loop in order to resume looping
from a previous state.
It's important to note that a loop's attached state takes precedence
-over defined initialization functions. Given initialization function:
defn init_state ( ) , do : %{ foo : 1 , bar : 2 }
And an attached state:
state = % State { step_state : %{ foo : 2 , bar : 3 } }
init_state/0
will never execute, and instead the initial step state
+over defined initialization functions. Given initialization function:
defn init_state ( ) , do : %{ foo : 1 , bar : 2 }
And an attached state:
state = % State { step_state : %{ foo : 2 , bar : 3 } }
init_state/0
will never execute, and instead the initial step state
of %{foo: 2, bar: 3}
will be used.
@@ -724,7 +724,7 @@ from_state(loop, state)
handle_event(loop, event, handler, filter \\ :always)
-
+
View Source
@@ -736,20 +736,20 @@ handle_event(loop, event, handler, filter \
Adds a handler function to the loop which will be triggered on event
with an optional filter.
Events take place at different points during loop execution. The default
-events are:
events = [
+events are:events = [
:started , # After loop state initialization
:epoch_started , # On epoch start
:iteration_started , # On iteration start
:iteration_completed , # On iteration complete
:epoch_completed , # On epoch complete
:epoch_halted , # On epoch halt, if early halted
- ]
Generally, event handlers are side-effecting operations which provide some
+
]
Generally, event handlers are side-effecting operations which provide some
sort of inspection into the loop's progress. It's important to note that
if you define multiple handlers to be triggered on the same event, they
will execute in order from when they were attached to the training
loop:
loop
- |> Axon.Loop . handle_event ( :epoch_started , & normalize_step_state / 1 ) # executes first
- |> Axon.Loop . handle_event ( :epoch_started , & log_step_state / 1 ) # executes second
Thus, if you have separate handlers which alter or depend on loop state,
+|> Axon.Loop . handle_event ( :epoch_started , & normalize_step_state / 1 ) # executes first
+ |> Axon.Loop . handle_event ( :epoch_started , & log_step_state / 1 ) # executes second
Thus, if you have separate handlers which alter or depend on loop state,
you need to ensure they are ordered correctly, or combined into a single
event handler for maximum control over execution.
event
must be an atom representing the event to trigger handler
or a
list of atoms indicating handler
should be triggered on multiple events.
@@ -779,7 +779,7 @@
handle_event(loop, event, handler, filter \
kino_vega_lite_plot(loop, plot, metric, opts \\ [])
-
+
View Source
@@ -790,16 +790,16 @@ kino_vega_lite_plot(loop, plot, metric, opt
Adds a handler function which updates a Kino.VegaLite
plot.
By default, this will run after every iteration.
You must specify a plot to push to and a metric to track. The :x
axis will be the iteration count, labeled "step"
. The metric must match the name given to the :y
axis in your VegaLite
plot:
plot =
- Vl . new ( )
- |> Vl . mark ( :line )
- |> Vl . encode_field ( :x , "step" , type : :quantitative )
- |> Vl . encode_field ( :y , "loss" , type : :quantitative )
- |> Kino.VegaLite . new ( )
- |> Kino . render ( )
+ Vl . new ( )
+ |> Vl . mark ( :line )
+ |> Vl . encode_field ( :x , "step" , type : :quantitative )
+ |> Vl . encode_field ( :y , "loss" , type : :quantitative )
+ |> Kino.VegaLite . new ( )
+ |> Kino . render ( )
model
- |> Axon.Loop . trainer ( loss , optim )
- |> Axon.Loop . kino_vega_lite_plot ( plot , "loss" )
+|> Axon.Loop . trainer ( loss , optim )
+ |> Axon.Loop . kino_vega_lite_plot ( plot , "loss" )
@@ -819,7 +819,7 @@ kino_vega_lite_plot(loop, plot, metric, opt
log(loop, message_fn, opts \\ [])
-
+
View Source
@@ -852,7 +852,7 @@ log(loop, message_fn, opts \\ [])
loop(step_fn, init_fn \\ &default_init/2, output_transform \\ & &1)
-
+
View Source
@@ -864,13 +864,13 @@ loop(step_fn, init_fn \\ &default_init/
Creates a loop from step_fn
, an optional init_fn
, and an
optional output_transform
.
step_fn
is an arity-2 function which takes a batch and state
-and returns an updated step state:
defn batch_step ( batch , step_state ) do
+and returns an updated step state:defn batch_step ( batch , step_state ) do
step_state + 1
- end
init_fn
by default is an identity function which forwards its
+
end
init_fn
by default is an identity function which forwards its
initial arguments as the model state. You should define a custom
-initialization function if you require a different behavior:
defn init_step_state ( state ) do
- Map . merge ( %{ foo : 1 } , state )
- end
You may use state
in conjunction with initialization functions in
+initialization function if you require a different behavior:
defn init_step_state ( state ) do
+ Map . merge ( %{ foo : 1 } , state )
+ end
You may use state
in conjunction with initialization functions in
init_fn
. For example, train_step/3
uses initial state as initial
model parameters to allow initializing models from partial parameterizations.
step_batch/2
and init_step_state/1
are typically called from
within Nx.Defn.jit/3
. While JIT-compilation will work with anonymous functions,
@@ -895,7 +895,7 @@
loop(step_fn, init_fn \\ &default_init/
metric(loop, metric, name \\ nil, accumulate \\ :running_average, transform_or_fields \\ [:y_true, :y_pred])
-
+
View Source
@@ -908,20 +908,20 @@ metric(loop, metric, name \\ nil, accumulat
Adds a metric of the given name to the loop.
A metric is a function which tracks or measures some value with respect
to values in the step state. For example, when training classification
models, it's common to track the model's accuracy during training:
loop
- |> Axon.Loop . metric ( :accuracy , "Accuracy" )
By default, metrics assume a supervised learning task and extract the fields
+|> Axon.Loop . metric ( :accuracy , "Accuracy" )
By default, metrics assume a supervised learning task and extract the fields
[:y_true, :y_pred]
from the step state. If you wish to work on a different
value, you can use an output transform. An output transform is a list of keys
to extract from the output state, or a function which returns a flattened list
of values to pass to the given metric function. Values received from output
-transforms are passed to the given metric using:
value = output_transform . ( step_state )
- apply ( metric , value )
Thus, even if you want your metric to work on a container, your output transform
+transforms are passed to the given metric using:
value = output_transform . ( step_state )
+ apply ( metric , value )
Thus, even if you want your metric to work on a container, your output transform
must return a list.
metric
must be an atom which matches the name of a metric in Axon.Metrics
, or
an arbitrary function which returns a tensor or container.
name
must be a string or atom used to store the computed metric in the loop
state. If names conflict, the last attached metric will take precedence:
loop
- |> Axon.Loop . metric ( :mean_squared_error , "Error" ) # Will be overwritten
- |> Axon.Loop . metric ( :mean_absolute_error , "Error" ) # Will be used
By default, metrics keep a running average of the metric calculation. You can
+|> Axon.Loop . metric ( :mean_squared_error , "Error" ) # Will be overwritten
+ |> Axon.Loop . metric ( :mean_absolute_error , "Error" ) # Will be used
By default, metrics keep a running average of the metric calculation. You can
override this behavior by changing accumulate
:
loop
- |> Axon.Loop . metric ( :true_negatives , "tn" , :running_sum )
Accumulation function can be one of the accumulation combinators in Axon.Metrics
+|> Axon.Loop . metric ( :true_negatives , "tn" , :running_sum )
Accumulation function can be one of the accumulation combinators in Axon.Metrics
or an arity-3 function of the form: accumulate(acc, obs, i) :: new_acc
.
@@ -936,7 +936,7 @@ metric(loop, metric, name \\ nil, accumulat
monitor(loop, metric, fun, name, opts \\ [])
-
+
View Source
@@ -981,7 +981,7 @@ monitor(loop, metric, fun, name, opts \\ []
reduce_lr_on_plateau(loop, monitor, opts \\ [])
-
+
View Source
@@ -997,10 +997,10 @@ reduce_lr_on_plateau(loop, monitor, opts \\
improvement of a given metric. You must specify a metric to monitor and the metric must
be present in the loop state. Typically, this will be
a validation metric:
model
- |> Axon.Loop . trainer ( loss , optim )
- |> Axon.Loop . metric ( :accuracy )
- |> Axon.Loop . validate ( model , val_data )
- |> Axon.Loop . reduce_lr_on_plateau ( "accuracy" , mode : :max )
+|> Axon.Loop . trainer ( loss , optim )
+ |> Axon.Loop . metric ( :accuracy )
+ |> Axon.Loop . validate ( model , val_data )
+ |> Axon.Loop . reduce_lr_on_plateau ( "accuracy" , mode : :max )
@@ -1024,7 +1024,7 @@ reduce_lr_on_plateau(loop, monitor, opts \\
run(loop, data, init_state \\ %{}, opts \\ [])
-
+
View Source
@@ -1049,7 +1049,9 @@ run(loop, data, init_state \\ %{}, opts \\
to true. :garbage_collect
- whether or not to garbage collect after
each loop iteration. This may prevent OOMs, but it will slow down training.
:strict?
- whether or not to compile step functions strictly. If this flag
is set, the loop will raise on any cache miss during the training loop. Defaults
-to true.
:debug
- run loop in debug mode to trace loop progress. Defaults to
+to true.
:force_garbage_collect?
- whether or not to force garbage collection after each
+iteration. This may help avoid OOMs when training large models, but it will slow
+training down.
:debug
- run loop in debug mode to trace loop progress. Defaults to
false.
Additional options are forwarded to Nx.Defn.jit
as JIT-options. If no JIT
options are set, the default options set with Nx.Defn.default_options
are
used.
@@ -1066,7 +1068,7 @@ run(loop, data, init_state \\ %{}, opts \\
serialize_state(state, opts \\ [])
-
+
View Source
@@ -1098,7 +1100,7 @@ serialize_state(state, opts \\ [])
train_step(model, loss, optimizer, opts \\ [])
-
+
View Source
@@ -1155,7 +1157,7 @@ train_step(model, loss, optimizer, opts \\
trainer(model, loss, optimizer, opts \\ [])
-
+
View Source
@@ -1186,13 +1188,13 @@ trainer(model, loss, optimizer, opts \\ [])
arity-3 function which scales gradient updates with respect to input parameters,
optimizer state, and gradients. See Polaris.Updates
for more information on building
optimizers. This function creates a step function which outputs a map consisting of the following
-fields for step_state
:
%{
- y_pred : tensor ( ) | container ( tensor ( ) ) , # Model predictions for use in metrics
- y_true : tensor ( ) | container ( tensor ( ) ) , # True labels for use in metrics
- loss : tensor ( ) , # Running average of loss over epoch
- model_state : container ( tensor ( ) ) , # Model parameters and state
- optimizer_state : container ( tensor ( ) ) # Optimizer state associated with each parameter
- }
+fields for step_state
:%{
+ y_pred : tensor ( ) | container ( tensor ( ) ) , # Model predictions for use in metrics
+ y_true : tensor ( ) | container ( tensor ( ) ) , # True labels for use in metrics
+ loss : tensor ( ) , # Running average of loss over epoch
+ model_state : container ( tensor ( ) ) , # Model parameters and state
+ optimizer_state : container ( tensor ( ) ) # Optimizer state associated with each parameter
+ }
@@ -1204,42 +1206,42 @@
Basic usage
-data = Stream . zip ( input , target )
+data = Stream . zip ( input , target )
- model = Axon . input ( "input" , shape : { nil , 32 } ) |> Axon . dense ( 1 , activation : :sigmoid )
+ model = Axon . input ( "input" , shape : { nil , 32 } ) |> Axon . dense ( 1 , activation : :sigmoid )
model
- |> Axon.Loop . trainer ( :binary_cross_entropy , :adam )
- |> Axon.Loop . run ( data )
+ |> Axon.Loop . trainer ( :binary_cross_entropy , :adam )
+ |> Axon.Loop . run ( data )
Customizing Optimizer
model
- |> Axon.Loop . trainer ( :binary_cross_entropy , Polaris.Optimizers . adam ( learning_rate : 0.05 ) )
- |> Axon.Loop . run ( data )
+|> Axon.Loop . trainer ( :binary_cross_entropy , Polaris.Optimizers . adam ( learning_rate : 0.05 ) )
+ |> Axon.Loop . run ( data )
Custom loss
-loss_fn = fn y_true , y_pred -> Nx . cos ( y_true , y_pred ) end
+loss_fn = fn y_true , y_pred -> Nx . cos ( y_true , y_pred ) end
model
- |> Axon.Loop . trainer ( loss_fn , Polaris.Optimizers . rmsprop ( learning_rate : 0.01 ) )
- |> Axon.Loop . run ( data )
+ |> Axon.Loop . trainer ( loss_fn , Polaris.Optimizers . rmsprop ( learning_rate : 0.01 ) )
+ |> Axon.Loop . run ( data )
Multiple objectives with multi-output model
-model = { Axon . input ( "input_0" , shape : { nil , 1 } ) , Axon . input ( "input_1" , shape : { nil , 2 } ) }
- loss_weights = [ mean_squared_error : 0.5 , mean_absolute_error : 0.5 ]
+model = { Axon . input ( "input_0" , shape : { nil , 1 } ) , Axon . input ( "input_1" , shape : { nil , 2 } ) }
+ loss_weights = [ mean_squared_error : 0.5 , mean_absolute_error : 0.5 ]
model
- |> Axon.Loop . trainer ( loss_weights , :sgd )
- |> Axon.Loop . run ( data )
+ |> Axon.Loop . trainer ( loss_weights , :sgd )
+ |> Axon.Loop . run ( data )
@@ -1266,7 +1268,7 @@
validate(loop, model, validation_data, opts \\ [])
-
+
View Source
@@ -1280,25 +1282,25 @@ validate(loop, model, validation_data, opts
against the given validation set. This handler assumes the loop state matches the state initialized
in a supervised training loop. Typically, you'd call this immediately
after creating a supervised training loop:
model
- |> Axon.Loop . trainer ( :mean_squared_error , :sgd )
- |> Axon.Loop . validate ( model , validation_data )
Please note that you must pass the same (or an equivalent) model
+|> Axon.Loop . trainer ( :mean_squared_error , :sgd )
+ |> Axon.Loop . validate ( model , validation_data )
Please note that you must pass the same (or an equivalent) model
into this method so it can be used during the validation loop. The
metrics which are computed are those which are present BEFORE the
validation handler was added to the loop. For the following loop:
model
- |> Axon.Loop . trainer ( :mean_squared_error , :sgd )
- |> Axon.Loop . metric ( :mean_absolute_error )
- |> Axon.Loop . validate ( model , validation_data )
- |> Axon.Loop . metric ( :binary_cross_entropy )
only :mean_absolute_error
will be computed at validation time.
The returned loop state is altered to contain validation
+|> Axon.Loop . trainer ( :mean_squared_error , :sgd )
+ |> Axon.Loop . metric ( :mean_absolute_error )
+ |> Axon.Loop . validate ( model , validation_data )
+ |> Axon.Loop . metric ( :binary_cross_entropy )
only :mean_absolute_error
will be computed at validation time.
The returned loop state is altered to contain validation
metrics for use in later handlers such as early stopping and model
checkpoints. Since the order of execution of event handlers is in
the same order they are declared in the training loop, you MUST call
this method before any other handler which expects or may use
validation metrics.
By default the validation loop runs after every epoch; however, you
can customize it by overriding the default event and event filters:
model
- |> Axon.Loop . trainer ( :mean_squared_error , :sgd )
- |> Axon.Loop . metric ( :mean_absolute_error )
- |> Axon.Loop . validate ( model , validation_data , event : :iteration_completed , filter : [ every : 10_000 ] )
- |> Axon.Loop . metric ( :binary_cross_entropy )
+|> Axon.Loop . trainer ( :mean_squared_error , :sgd )
+ |> Axon.Loop . metric ( :mean_absolute_error )
+ |> Axon.Loop . validate ( model , validation_data , event : :iteration_completed , filter : [ every : 10_000 ] )
+ |> Axon.Loop . metric ( :binary_cross_entropy )
diff --git a/Axon.LossScale.html b/Axon.LossScale.html
index 3f7303b1..93f9ea83 100644
--- a/Axon.LossScale.html
+++ b/Axon.LossScale.html
@@ -14,7 +14,7 @@
-
+
@@ -136,7 +136,7 @@
Implementations of loss-scalers for use in mixed precision
training.
Loss scaling is used to prevent underflow when using mixed
precision during the model training process. Each loss-scale
-implementation here returns a 3-tuple of the functions:
{ init_fn , scale_fn , unscale_fn , adjust_fn } = Axon.LossScale . static ( Nx . pow ( 2 , 15 ) )
You can use these to scale/unscale loss and gradients as well
+implementation here returns a 3-tuple of the functions:
{ init_fn , scale_fn , unscale_fn , adjust_fn } = Axon.LossScale . static ( Nx . pow ( 2 , 15 ) )
You can use these to scale/unscale loss and gradients as well
as adjust the loss scale state.
Axon.Loop.trainer/3
builds loss-scaling in by default. You
can reference the Axon.Loop.train_step/3
implementation to
see how loss-scaling is applied in practice.
diff --git a/Axon.Losses.html b/Axon.Losses.html
index 33943727..3685fbd0 100644
--- a/Axon.Losses.html
+++ b/Axon.Losses.html
@@ -14,7 +14,7 @@
-
+
@@ -140,31 +140,31 @@
measuring the loss with respect to the input target y_true
and input prediction y_pred
. As an example, the mean_squared_error/2
loss function produces a tensor whose values are the mean squared
-error between targets and predictions:iex> y_true = Nx . tensor ( [ [ 0.0 , 1.0 ] , [ 0.0 , 0.0 ] ] , type : { :f , 32 } )
- iex> y_pred = Nx . tensor ( [ [ 1.0 , 1.0 ] , [ 1.0 , 0.0 ] ] , type : { :f , 32 } )
- iex> Axon.Losses . mean_squared_error ( y_true , y_pred )
- # Nx.Tensor <
- f32 [ 2 ]
- [ 0.5 , 0.5 ]
- >
It's common to compute the loss across an entire minibatch.
+error between targets and predictions:
iex> y_true = Nx . tensor ( [ [ 0.0 , 1.0 ] , [ 0.0 , 0.0 ] ] , type : { :f , 32 } )
+ iex> y_pred = Nx . tensor ( [ [ 1.0 , 1.0 ] , [ 1.0 , 0.0 ] ] , type : { :f , 32 } )
+ iex> Axon.Losses . mean_squared_error ( y_true , y_pred )
+ # Nx.Tensor <
+ f32 [ 2 ]
+ [ 0.5 , 0.5 ]
+ >
It's common to compute the loss across an entire minibatch.
You can easily do so by specifying a :reduction
mode, or
-by composing one of these with an Nx
reduction method:
iex> y_true = Nx . tensor ( [ [ 0.0 , 1.0 ] , [ 0.0 , 0.0 ] ] , type : { :f , 32 } )
- iex> y_pred = Nx . tensor ( [ [ 1.0 , 1.0 ] , [ 1.0 , 0.0 ] ] , type : { :f , 32 } )
- iex> Axon.Losses . mean_squared_error ( y_true , y_pred , reduction : :mean )
- # Nx.Tensor <
+by composing one of these with an Nx
reduction method:iex> y_true = Nx . tensor ( [ [ 0.0 , 1.0 ] , [ 0.0 , 0.0 ] ] , type : { :f , 32 } )
+ iex> y_pred = Nx . tensor ( [ [ 1.0 , 1.0 ] , [ 1.0 , 0.0 ] ] , type : { :f , 32 } )
+ iex> Axon.Losses . mean_squared_error ( y_true , y_pred , reduction : :mean )
+ # Nx.Tensor <
f32
0.5
- >
You can even compose loss functions:
defn my_strange_loss ( y_true , y_pred ) do
+ >
You can even compose loss functions:
defn my_strange_loss ( y_true , y_pred ) do
y_true
- |> Axon.Losses . mean_squared_error ( y_pred )
- |> Axon.Losses . binary_cross_entropy ( y_pred )
- |> Nx . sum ( )
- end
Or, more commonly, you can combine loss functions with penalties for
-regularization:
defn regularized_loss ( params , y_true , y_pred ) do
- loss = Axon . mean_squared_error ( y_true , y_pred )
- penalty = l2_penalty ( params )
- Nx . sum ( loss ) + penalty
- end
All of the functions in this module are implemented as
+
|> Axon.Losses . mean_squared_error ( y_pred )
+ |> Axon.Losses . binary_cross_entropy ( y_pred )
+ |> Nx . sum ( )
+ end
Or, more commonly, you can combine loss functions with penalties for
+regularization:
defn regularized_loss ( params , y_true , y_pred ) do
+ loss = Axon . mean_squared_error ( y_true , y_pred )
+ penalty = l2_penalty ( params )
+ Nx . sum ( loss ) + penalty
+ end
All of the functions in this module are implemented as
numerical functions and can be JIT or AOT compiled with
any supported Nx
compiler.
@@ -444,29 +444,29 @@ binary_cross_entropy(y_true, y_pred, opts \
Examples
-iex> y_true = Nx . tensor ( [ [ 0 , 1 ] , [ 1 , 0 ] , [ 1 , 0 ] ] )
- iex> y_pred = Nx . tensor ( [ [ 0.6811 , 0.5565 ] , [ 0.6551 , 0.4551 ] , [ 0.5422 , 0.2648 ] ] )
- iex> Axon.Losses . binary_cross_entropy ( y_true , y_pred )
- # Nx.Tensor <
- f32 [ 3 ]
- [ 0.8644826412200928 , 0.5150600075721741 , 0.45986634492874146 ]
- >
-
- iex> y_true = Nx . tensor ( [ [ 0 , 1 ] , [ 1 , 0 ] , [ 1 , 0 ] ] )
- iex> y_pred = Nx . tensor ( [ [ 0.6811 , 0.5565 ] , [ 0.6551 , 0.4551 ] , [ 0.5422 , 0.2648 ] ] )
- iex> Axon.Losses . binary_cross_entropy ( y_true , y_pred , reduction : :mean )
- # Nx.Tensor <
+iex> y_true = Nx . tensor ( [ [ 0 , 1 ] , [ 1 , 0 ] , [ 1 , 0 ] ] )
+ iex> y_pred = Nx . tensor ( [ [ 0.6811 , 0.5565 ] , [ 0.6551 , 0.4551 ] , [ 0.5422 , 0.2648 ] ] )
+ iex> Axon.Losses . binary_cross_entropy ( y_true , y_pred )
+ # Nx.Tensor <
+ f32 [ 3 ]
+ [ 0.8644826412200928 , 0.5150600075721741 , 0.45986634492874146 ]
+ >
+
+ iex> y_true = Nx . tensor ( [ [ 0 , 1 ] , [ 1 , 0 ] , [ 1 , 0 ] ] )
+ iex> y_pred = Nx . tensor ( [ [ 0.6811 , 0.5565 ] , [ 0.6551 , 0.4551 ] , [ 0.5422 , 0.2648 ] ] )
+ iex> Axon.Losses . binary_cross_entropy ( y_true , y_pred , reduction : :mean )
+ # Nx.Tensor <
f32
0.613136351108551
- >
+ >
- iex> y_true = Nx . tensor ( [ [ 0 , 1 ] , [ 1 , 0 ] , [ 1 , 0 ] ] )
- iex> y_pred = Nx . tensor ( [ [ 0.6811 , 0.5565 ] , [ 0.6551 , 0.4551 ] , [ 0.5422 , 0.2648 ] ] )
- iex> Axon.Losses . binary_cross_entropy ( y_true , y_pred , reduction : :sum )
- # Nx.Tensor <
+ iex> y_true = Nx . tensor ( [ [ 0 , 1 ] , [ 1 , 0 ] , [ 1 , 0 ] ] )
+ iex> y_pred = Nx . tensor ( [ [ 0.6811 , 0.5565 ] , [ 0.6551 , 0.4551 ] , [ 0.5422 , 0.2648 ] ] )
+ iex> Axon.Losses . binary_cross_entropy ( y_true , y_pred , reduction : :sum )
+ # Nx.Tensor <
f32
1.8394089937210083
- >
+ >
@@ -495,8 +495,8 @@ categorical_cross_entropy(y_true, y_pred, o
$$ Categorical cross-entropy is typically used for multi-class classification problems.
By default, it expects y_pred
to encode a probability distribution along the last
axis. You can specify from_logits: true
to indicate y_pred
is a logits tensor.
# Batch size of 3 with 3 target classes
- y_true = Nx . tensor ( [ 0 , 2 , 1 ] )
- y_pred = Nx . tensor ( [ [ 0.2 , 0.8 , 0.0 ] , [ 0.1 , 0.2 , 0.7 ] , [ 0.1 , 0.2 , 0.7 ] ] )
+y_true = Nx . tensor ( [ 0 , 2 , 1 ] )
+ y_pred = Nx . tensor ( [ [ 0.2 , 0.8 , 0.0 ] , [ 0.1 , 0.2 , 0.7 ] , [ 0.1 , 0.2 , 0.7 ] ] )
@@ -520,37 +520,37 @@ categorical_cross_entropy(y_true, y_pred, o
Examples
-iex> y_true = Nx . tensor ( [ [ 0 , 1 , 0 ] , [ 0 , 0 , 1 ] ] , type : { :s , 8 } )
- iex> y_pred = Nx . tensor ( [ [ 0.05 , 0.95 , 0 ] , [ 0.1 , 0.8 , 0.1 ] ] )
- iex> Axon.Losses . categorical_cross_entropy ( y_true , y_pred )
- # Nx.Tensor <
- f32 [ 2 ]
- [ 0.051293306052684784 , 2.3025851249694824 ]
- >
-
- iex> y_true = Nx . tensor ( [ [ 0 , 1 , 0 ] , [ 0 , 0 , 1 ] ] , type : { :s , 8 } )
- iex> y_pred = Nx . tensor ( [ [ 0.05 , 0.95 , 0 ] , [ 0.1 , 0.8 , 0.1 ] ] )
- iex> Axon.Losses . categorical_cross_entropy ( y_true , y_pred , reduction : :mean )
- # Nx.Tensor <
+iex> y_true = Nx . tensor ( [ [ 0 , 1 , 0 ] , [ 0 , 0 , 1 ] ] , type : { :s , 8 } )
+ iex> y_pred = Nx . tensor ( [ [ 0.05 , 0.95 , 0 ] , [ 0.1 , 0.8 , 0.1 ] ] )
+ iex> Axon.Losses . categorical_cross_entropy ( y_true , y_pred )
+ # Nx.Tensor <
+ f32 [ 2 ]
+ [ 0.051293306052684784 , 2.3025851249694824 ]
+ >
+
+ iex> y_true = Nx . tensor ( [ [ 0 , 1 , 0 ] , [ 0 , 0 , 1 ] ] , type : { :s , 8 } )
+ iex> y_pred = Nx . tensor ( [ [ 0.05 , 0.95 , 0 ] , [ 0.1 , 0.8 , 0.1 ] ] )
+ iex> Axon.Losses . categorical_cross_entropy ( y_true , y_pred , reduction : :mean )
+ # Nx.Tensor <
f32
1.1769392490386963
- >
+ >
- iex> y_true = Nx . tensor ( [ [ 0 , 1 , 0 ] , [ 0 , 0 , 1 ] ] , type : { :s , 8 } )
- iex> y_pred = Nx . tensor ( [ [ 0.05 , 0.95 , 0 ] , [ 0.1 , 0.8 , 0.1 ] ] )
- iex> Axon.Losses . categorical_cross_entropy ( y_true , y_pred , reduction : :sum )
- # Nx.Tensor <
+ iex> y_true = Nx . tensor ( [ [ 0 , 1 , 0 ] , [ 0 , 0 , 1 ] ] , type : { :s , 8 } )
+ iex> y_pred = Nx . tensor ( [ [ 0.05 , 0.95 , 0 ] , [ 0.1 , 0.8 , 0.1 ] ] )
+ iex> Axon.Losses . categorical_cross_entropy ( y_true , y_pred , reduction : :sum )
+ # Nx.Tensor <
f32
2.3538784980773926
- >
+ >
- iex> y_true = Nx . tensor ( [ 1 , 2 ] , type : { :s , 8 } )
- iex> y_pred = Nx . tensor ( [ [ 0.05 , 0.95 , 0 ] , [ 0.1 , 0.8 , 0.1 ] ] )
- iex> Axon.Losses . categorical_cross_entropy ( y_true , y_pred , reduction : :sum , sparse : true )
- # Nx.Tensor <
+ iex> y_true = Nx . tensor ( [ 1 , 2 ] , type : { :s , 8 } )
+ iex> y_pred = Nx . tensor ( [ [ 0.05 , 0.95 , 0 ] , [ 0.1 , 0.8 , 0.1 ] ] )
+ iex> Axon.Losses . categorical_cross_entropy ( y_true , y_pred , reduction : :sum , sparse : true )
+ # Nx.Tensor <
f32
2.3538784980773926
- >
+ >
@@ -593,29 +593,29 @@ categorical_hinge(y_true, y_pred, opts \\ [
Examples
-iex> y_true = Nx . tensor ( [ [ 1 , 0 , 0 ] , [ 0 , 0 , 1 ] ] , type : { :s , 8 } )
- iex> y_pred = Nx . tensor ( [ [ 0.05300799 , 0.21617081 , 0.68642382 ] , [ 0.3754382 , 0.08494169 , 0.13442067 ] ] )
- iex> Axon.Losses . categorical_hinge ( y_true , y_pred )
- # Nx.Tensor <
- f32 [ 2 ]
- [ 1.6334158182144165 , 1.2410175800323486 ]
- >
-
- iex> y_true = Nx . tensor ( [ [ 1 , 0 , 0 ] , [ 0 , 0 , 1 ] ] , type : { :s , 8 } )
- iex> y_pred = Nx . tensor ( [ [ 0.05300799 , 0.21617081 , 0.68642382 ] , [ 0.3754382 , 0.08494169 , 0.13442067 ] ] )
- iex> Axon.Losses . categorical_hinge ( y_true , y_pred , reduction : :mean )
- # Nx.Tensor <
+iex> y_true = Nx . tensor ( [ [ 1 , 0 , 0 ] , [ 0 , 0 , 1 ] ] , type : { :s , 8 } )
+ iex> y_pred = Nx . tensor ( [ [ 0.05300799 , 0.21617081 , 0.68642382 ] , [ 0.3754382 , 0.08494169 , 0.13442067 ] ] )
+ iex> Axon.Losses . categorical_hinge ( y_true , y_pred )
+ # Nx.Tensor <
+ f32 [ 2 ]
+ [ 1.6334158182144165 , 1.2410175800323486 ]
+ >
+
+ iex> y_true = Nx . tensor ( [ [ 1 , 0 , 0 ] , [ 0 , 0 , 1 ] ] , type : { :s , 8 } )
+ iex> y_pred = Nx . tensor ( [ [ 0.05300799 , 0.21617081 , 0.68642382 ] , [ 0.3754382 , 0.08494169 , 0.13442067 ] ] )
+ iex> Axon.Losses . categorical_hinge ( y_true , y_pred , reduction : :mean )
+ # Nx.Tensor <
f32
1.4372167587280273
- >
+ >
- iex> y_true = Nx . tensor ( [ [ 1 , 0 , 0 ] , [ 0 , 0 , 1 ] ] , type : { :s , 8 } )
- iex> y_pred = Nx . tensor ( [ [ 0.05300799 , 0.21617081 , 0.68642382 ] , [ 0.3754382 , 0.08494169 , 0.13442067 ] ] )
- iex> Axon.Losses . categorical_hinge ( y_true , y_pred , reduction : :sum )
- # Nx.Tensor <
+ iex> y_true = Nx . tensor ( [ [ 1 , 0 , 0 ] , [ 0 , 0 , 1 ] ] , type : { :s , 8 } )
+ iex> y_pred = Nx . tensor ( [ [ 0.05300799 , 0.21617081 , 0.68642382 ] , [ 0.3754382 , 0.08494169 , 0.13442067 ] ] )
+ iex> Axon.Losses . categorical_hinge ( y_true , y_pred , reduction : :sum )
+ # Nx.Tensor <
f32
2.8744335174560547
- >
+ >
@@ -710,13 +710,13 @@ cosine_similarity(y_true, y_pred, opts \\ [
Examples
-iex> y_pred = Nx . tensor ( [ [ 1.0 , 0.0 ] , [ 1.0 , 1.0 ] ] )
- iex> y_true = Nx . tensor ( [ [ 0.0 , 1.0 ] , [ 1.0 , 1.0 ] ] )
- iex> Axon.Losses . cosine_similarity ( y_true , y_pred )
- # Nx.Tensor <
- f32 [ 2 ]
- [ 0.0 , 1.0000001192092896 ]
- >
+iex> y_pred = Nx . tensor ( [ [ 1.0 , 0.0 ] , [ 1.0 , 1.0 ] ] )
+ iex> y_true = Nx . tensor ( [ [ 0.0 , 1.0 ] , [ 1.0 , 1.0 ] ] )
+ iex> Axon.Losses . cosine_similarity ( y_true , y_pred )
+ # Nx.Tensor <
+ f32 [ 2 ]
+ [ 0.0 , 1.0000001192092896 ]
+ >
@@ -761,29 +761,29 @@ hinge(y_true, y_pred, opts \\ [])
Examples
-iex> y_true = Nx . tensor ( [ [ 1 , 1 , - 1 ] , [ 1 , 1 , - 1 ] ] , type : { :s , 8 } )
- iex> y_pred = Nx . tensor ( [ [ 0.45440044 , 0.31470688 , 0.67920924 ] , [ 0.24311459 , 0.93466766 , 0.10914676 ] ] )
- iex> Axon.Losses . hinge ( y_true , y_pred )
- # Nx.Tensor <
- f32 [ 2 ]
- [ 0.9700339436531067 , 0.6437881588935852 ]
- >
-
- iex> y_true = Nx . tensor ( [ [ 1 , 1 , - 1 ] , [ 1 , 1 , - 1 ] ] , type : { :s , 8 } )
- iex> y_pred = Nx . tensor ( [ [ 0.45440044 , 0.31470688 , 0.67920924 ] , [ 0.24311459 , 0.93466766 , 0.10914676 ] ] )
- iex> Axon.Losses . hinge ( y_true , y_pred , reduction : :mean )
- # Nx.Tensor <
+iex> y_true = Nx . tensor ( [ [ 1 , 1 , - 1 ] , [ 1 , 1 , - 1 ] ] , type : { :s , 8 } )
+ iex> y_pred = Nx . tensor ( [ [ 0.45440044 , 0.31470688 , 0.67920924 ] , [ 0.24311459 , 0.93466766 , 0.10914676 ] ] )
+ iex> Axon.Losses . hinge ( y_true , y_pred )
+ # Nx.Tensor <
+ f32 [ 2 ]
+ [ 0.9700339436531067 , 0.6437881588935852 ]
+ >
+
+ iex> y_true = Nx . tensor ( [ [ 1 , 1 , - 1 ] , [ 1 , 1 , - 1 ] ] , type : { :s , 8 } )
+ iex> y_pred = Nx . tensor ( [ [ 0.45440044 , 0.31470688 , 0.67920924 ] , [ 0.24311459 , 0.93466766 , 0.10914676 ] ] )
+ iex> Axon.Losses . hinge ( y_true , y_pred , reduction : :mean )
+ # Nx.Tensor <
f32
0.806911051273346
- >
+ >
- iex> y_true = Nx . tensor ( [ [ 1 , 1 , - 1 ] , [ 1 , 1 , - 1 ] ] , type : { :s , 8 } )
- iex> y_pred = Nx . tensor ( [ [ 0.45440044 , 0.31470688 , 0.67920924 ] , [ 0.24311459 , 0.93466766 , 0.10914676 ] ] )
- iex> Axon.Losses . hinge ( y_true , y_pred , reduction : :sum )
- # Nx.Tensor <
+ iex> y_true = Nx . tensor ( [ [ 1 , 1 , - 1 ] , [ 1 , 1 , - 1 ] ] , type : { :s , 8 } )
+ iex> y_pred = Nx . tensor ( [ [ 0.45440044 , 0.31470688 , 0.67920924 ] , [ 0.24311459 , 0.93466766 , 0.10914676 ] ] )
+ iex> Axon.Losses . hinge ( y_true , y_pred , reduction : :sum )
+ # Nx.Tensor <
f32
1.613822102546692
- >
+ >
@@ -827,25 +827,25 @@ huber(y_true, y_pred, opts \\ [])
Examples
-iex> y_true = Nx . tensor ( [ [ 1 ] , [ 1.5 ] , [ 2.0 ] ] )
- iex> y_pred = Nx . tensor ( [ [ 0.8 ] , [ 1.8 ] , [ 2.1 ] ] )
- iex> Axon.Losses . huber ( y_true , y_pred )
- # Nx.Tensor <
- f32 [ 3 ] [ 1 ]
- [
- [ 0.019999997690320015 ] ,
- [ 0.04499998688697815 ] ,
- [ 0.004999990575015545 ]
- ]
- >
-
- iex> y_true = Nx . tensor ( [ [ 1 ] , [ 1.5 ] , [ 2.0 ] ] )
- iex> y_pred = Nx . tensor ( [ [ 0.8 ] , [ 1.8 ] , [ 2.1 ] ] )
- iex> Axon.Losses . huber ( y_true , y_pred , reduction : :mean )
- # Nx.Tensor <
+iex> y_true = Nx . tensor ( [ [ 1 ] , [ 1.5 ] , [ 2.0 ] ] )
+ iex> y_pred = Nx . tensor ( [ [ 0.8 ] , [ 1.8 ] , [ 2.1 ] ] )
+ iex> Axon.Losses . huber ( y_true , y_pred )
+ # Nx.Tensor <
+ f32 [ 3 ] [ 1 ]
+ [
+ [ 0.019999997690320015 ] ,
+ [ 0.04499998688697815 ] ,
+ [ 0.004999990575015545 ]
+ ]
+ >
+
+ iex> y_true = Nx . tensor ( [ [ 1 ] , [ 1.5 ] , [ 2.0 ] ] )
+ iex> y_pred = Nx . tensor ( [ [ 0.8 ] , [ 1.8 ] , [ 2.1 ] ] )
+ iex> Axon.Losses . huber ( y_true , y_pred , reduction : :mean )
+ # Nx.Tensor <
f32
0.02333332598209381
- >
+ >
@@ -890,29 +890,29 @@ kl_divergence(y_true, y_pred, opts \\ [])
Examples
-iex> y_true = Nx . tensor ( [ [ 0 , 1 ] , [ 0 , 0 ] ] , type : { :u , 8 } )
- iex> y_pred = Nx . tensor ( [ [ 0.6 , 0.4 ] , [ 0.4 , 0.6 ] ] )
- iex> Axon.Losses . kl_divergence ( y_true , y_pred )
- # Nx.Tensor <
- f32 [ 2 ]
- [ 0.916289210319519 , - 3.080907390540233e-6 ]
- >
-
- iex> y_true = Nx . tensor ( [ [ 0 , 1 ] , [ 0 , 0 ] ] , type : { :u , 8 } )
- iex> y_pred = Nx . tensor ( [ [ 0.6 , 0.4 ] , [ 0.4 , 0.6 ] ] )
- iex> Axon.Losses . kl_divergence ( y_true , y_pred , reduction : :mean )
- # Nx.Tensor <
+iex> y_true = Nx . tensor ( [ [ 0 , 1 ] , [ 0 , 0 ] ] , type : { :u , 8 } )
+ iex> y_pred = Nx . tensor ( [ [ 0.6 , 0.4 ] , [ 0.4 , 0.6 ] ] )
+ iex> Axon.Losses . kl_divergence ( y_true , y_pred )
+ # Nx.Tensor <
+ f32 [ 2 ]
+ [ 0.916289210319519 , - 3.080907390540233e-6 ]
+ >
+
+ iex> y_true = Nx . tensor ( [ [ 0 , 1 ] , [ 0 , 0 ] ] , type : { :u , 8 } )
+ iex> y_pred = Nx . tensor ( [ [ 0.6 , 0.4 ] , [ 0.4 , 0.6 ] ] )
+ iex> Axon.Losses . kl_divergence ( y_true , y_pred , reduction : :mean )
+ # Nx.Tensor <
f32
0.45814305543899536
- >
+ >
- iex> y_true = Nx . tensor ( [ [ 0 , 1 ] , [ 0 , 0 ] ] , type : { :u , 8 } )
- iex> y_pred = Nx . tensor ( [ [ 0.6 , 0.4 ] , [ 0.4 , 0.6 ] ] )
- iex> Axon.Losses . kl_divergence ( y_true , y_pred , reduction : :sum )
- # Nx.Tensor <
+ iex> y_true = Nx . tensor ( [ [ 0 , 1 ] , [ 0 , 0 ] ] , type : { :u , 8 } )
+ iex> y_pred = Nx . tensor ( [ [ 0.6 , 0.4 ] , [ 0.4 , 0.6 ] ] )
+ iex> Axon.Losses . kl_divergence ( y_true , y_pred , reduction : :sum )
+ # Nx.Tensor <
f32
0.9162861108779907
- >
+ >
@@ -988,29 +988,29 @@ log_cosh(y_true, y_pred, opts \\ [])
Examples
-iex> y_true = Nx . tensor ( [ [ 0.0 , 1.0 ] , [ 0.0 , 0.0 ] ] )
- iex> y_pred = Nx . tensor ( [ [ 1.0 , 1.0 ] , [ 0.0 , 0.0 ] ] )
- iex> Axon.Losses . log_cosh ( y_true , y_pred )
- # Nx.Tensor <
- f32 [ 2 ]
- [ 0.2168903946876526 , 0.0 ]
- >
-
- iex> y_true = Nx . tensor ( [ [ 0.0 , 1.0 ] , [ 0.0 , 0.0 ] ] )
- iex> y_pred = Nx . tensor ( [ [ 1.0 , 1.0 ] , [ 0.0 , 0.0 ] ] )
- iex> Axon.Losses . log_cosh ( y_true , y_pred , reduction : :mean )
- # Nx.Tensor <
+iex> y_true = Nx . tensor ( [ [ 0.0 , 1.0 ] , [ 0.0 , 0.0 ] ] )
+ iex> y_pred = Nx . tensor ( [ [ 1.0 , 1.0 ] , [ 0.0 , 0.0 ] ] )
+ iex> Axon.Losses . log_cosh ( y_true , y_pred )
+ # Nx.Tensor <
+ f32 [ 2 ]
+ [ 0.2168903946876526 , 0.0 ]
+ >
+
+ iex> y_true = Nx . tensor ( [ [ 0.0 , 1.0 ] , [ 0.0 , 0.0 ] ] )
+ iex> y_pred = Nx . tensor ( [ [ 1.0 , 1.0 ] , [ 0.0 , 0.0 ] ] )
+ iex> Axon.Losses . log_cosh ( y_true , y_pred , reduction : :mean )
+ # Nx.Tensor <
f32
0.1084451973438263
- >
+ >
- iex> y_true = Nx . tensor ( [ [ 0.0 , 1.0 ] , [ 0.0 , 0.0 ] ] )
- iex> y_pred = Nx . tensor ( [ [ 1.0 , 1.0 ] , [ 0.0 , 0.0 ] ] )
- iex> Axon.Losses . log_cosh ( y_true , y_pred , reduction : :sum )
- # Nx.Tensor <
+ iex> y_true = Nx . tensor ( [ [ 0.0 , 1.0 ] , [ 0.0 , 0.0 ] ] )
+ iex> y_pred = Nx . tensor ( [ [ 1.0 , 1.0 ] , [ 0.0 , 0.0 ] ] )
+ iex> Axon.Losses . log_cosh ( y_true , y_pred , reduction : :sum )
+ # Nx.Tensor <
f32
0.2168903946876526
- >
+ >
@@ -1049,32 +1049,32 @@ margin_ranking(y_true, arg2, opts \\ [])
Examples
-iex> y_true = Nx . tensor ( [ 1.0 , 1.0 , 1.0 ] , type : { :f , 32 } )
- iex> y_pred1 = Nx . tensor ( [ 0.6934 , - 0.7239 , 1.1954 ] , type : { :f , 32 } )
- iex> y_pred2 = Nx . tensor ( [ - 0.4691 , 0.2670 , - 1.7452 ] , type : { :f , 32 } )
- iex> Axon.Losses . margin_ranking ( y_true , { y_pred1 , y_pred2 } )
- # Nx.Tensor <
- f32 [ 3 ]
- [ 0.0 , 0.9909000396728516 , 0.0 ]
- >
-
- iex> y_true = Nx . tensor ( [ 1.0 , 1.0 , 1.0 ] , type : { :f , 32 } )
- iex> y_pred1 = Nx . tensor ( [ 0.6934 , - 0.7239 , 1.1954 ] , type : { :f , 32 } )
- iex> y_pred2 = Nx . tensor ( [ - 0.4691 , 0.2670 , - 1.7452 ] , type : { :f , 32 } )
- iex> Axon.Losses . margin_ranking ( y_true , { y_pred1 , y_pred2 } , reduction : :mean )
- # Nx.Tensor <
+iex> y_true = Nx . tensor ( [ 1.0 , 1.0 , 1.0 ] , type : { :f , 32 } )
+ iex> y_pred1 = Nx . tensor ( [ 0.6934 , - 0.7239 , 1.1954 ] , type : { :f , 32 } )
+ iex> y_pred2 = Nx . tensor ( [ - 0.4691 , 0.2670 , - 1.7452 ] , type : { :f , 32 } )
+ iex> Axon.Losses . margin_ranking ( y_true , { y_pred1 , y_pred2 } )
+ # Nx.Tensor <
+ f32 [ 3 ]
+ [ 0.0 , 0.9909000396728516 , 0.0 ]
+ >
+
+ iex> y_true = Nx . tensor ( [ 1.0 , 1.0 , 1.0 ] , type : { :f , 32 } )
+ iex> y_pred1 = Nx . tensor ( [ 0.6934 , - 0.7239 , 1.1954 ] , type : { :f , 32 } )
+ iex> y_pred2 = Nx . tensor ( [ - 0.4691 , 0.2670 , - 1.7452 ] , type : { :f , 32 } )
+ iex> Axon.Losses . margin_ranking ( y_true , { y_pred1 , y_pred2 } , reduction : :mean )
+ # Nx.Tensor <
f32
0.3303000032901764
- >
+ >
- iex> y_true = Nx . tensor ( [ 1.0 , 1.0 , 1.0 ] , type : { :f , 32 } )
- iex> y_pred1 = Nx . tensor ( [ 0.6934 , - 0.7239 , 1.1954 ] , type : { :f , 32 } )
- iex> y_pred2 = Nx . tensor ( [ - 0.4691 , 0.2670 , - 1.7452 ] , type : { :f , 32 } )
- iex> Axon.Losses . margin_ranking ( y_true , { y_pred1 , y_pred2 } , reduction : :sum )
- # Nx.Tensor <
+ iex> y_true = Nx . tensor ( [ 1.0 , 1.0 , 1.0 ] , type : { :f , 32 } )
+ iex> y_pred1 = Nx . tensor ( [ 0.6934 , - 0.7239 , 1.1954 ] , type : { :f , 32 } )
+ iex> y_pred2 = Nx . tensor ( [ - 0.4691 , 0.2670 , - 1.7452 ] , type : { :f , 32 } )
+ iex> Axon.Losses . margin_ranking ( y_true , { y_pred1 , y_pred2 } , reduction : :sum )
+ # Nx.Tensor <
f32
0.9909000396728516
- >
+ >
@@ -1119,29 +1119,29 @@ mean_absolute_error(y_true, y_pred, opts \\
Examples
-iex> y_true = Nx . tensor ( [ [ 0.0 , 1.0 ] , [ 0.0 , 0.0 ] ] , type : { :f , 32 } )
- iex> y_pred = Nx . tensor ( [ [ 1.0 , 1.0 ] , [ 1.0 , 0.0 ] ] , type : { :f , 32 } )
- iex> Axon.Losses . mean_absolute_error ( y_true , y_pred )
- # Nx.Tensor <
- f32 [ 2 ]
- [ 0.5 , 0.5 ]
- >
-
- iex> y_true = Nx . tensor ( [ [ 0.0 , 1.0 ] , [ 0.0 , 0.0 ] ] , type : { :f , 32 } )
- iex> y_pred = Nx . tensor ( [ [ 1.0 , 1.0 ] , [ 1.0 , 0.0 ] ] , type : { :f , 32 } )
- iex> Axon.Losses . mean_absolute_error ( y_true , y_pred , reduction : :mean )
- # Nx.Tensor <
+iex> y_true = Nx . tensor ( [ [ 0.0 , 1.0 ] , [ 0.0 , 0.0 ] ] , type : { :f , 32 } )
+ iex> y_pred = Nx . tensor ( [ [ 1.0 , 1.0 ] , [ 1.0 , 0.0 ] ] , type : { :f , 32 } )
+ iex> Axon.Losses . mean_absolute_error ( y_true , y_pred )
+ # Nx.Tensor <
+ f32 [ 2 ]
+ [ 0.5 , 0.5 ]
+ >
+
+ iex> y_true = Nx . tensor ( [ [ 0.0 , 1.0 ] , [ 0.0 , 0.0 ] ] , type : { :f , 32 } )
+ iex> y_pred = Nx . tensor ( [ [ 1.0 , 1.0 ] , [ 1.0 , 0.0 ] ] , type : { :f , 32 } )
+ iex> Axon.Losses . mean_absolute_error ( y_true , y_pred , reduction : :mean )
+ # Nx.Tensor <
f32
0.5
- >
+ >
- iex> y_true = Nx . tensor ( [ [ 0.0 , 1.0 ] , [ 0.0 , 0.0 ] ] , type : { :f , 32 } )
- iex> y_pred = Nx . tensor ( [ [ 1.0 , 1.0 ] , [ 1.0 , 0.0 ] ] , type : { :f , 32 } )
- iex> Axon.Losses . mean_absolute_error ( y_true , y_pred , reduction : :sum )
- # Nx.Tensor <
+ iex> y_true = Nx . tensor ( [ [ 0.0 , 1.0 ] , [ 0.0 , 0.0 ] ] , type : { :f , 32 } )
+ iex> y_pred = Nx . tensor ( [ [ 1.0 , 1.0 ] , [ 1.0 , 0.0 ] ] , type : { :f , 32 } )
+ iex> Axon.Losses . mean_absolute_error ( y_true , y_pred , reduction : :sum )
+ # Nx.Tensor <
f32
1.0
- >
+ >
@@ -1186,29 +1186,29 @@ mean_squared_error(y_true, y_pred, opts \\
Examples
-iex> y_true = Nx . tensor ( [ [ 0.0 , 1.0 ] , [ 0.0 , 0.0 ] ] , type : { :f , 32 } )
- iex> y_pred = Nx . tensor ( [ [ 1.0 , 1.0 ] , [ 1.0 , 0.0 ] ] , type : { :f , 32 } )
- iex> Axon.Losses . mean_squared_error ( y_true , y_pred )
- # Nx.Tensor <
- f32 [ 2 ]
- [ 0.5 , 0.5 ]
- >
-
- iex> y_true = Nx . tensor ( [ [ 0.0 , 1.0 ] , [ 0.0 , 0.0 ] ] , type : { :f , 32 } )
- iex> y_pred = Nx . tensor ( [ [ 1.0 , 1.0 ] , [ 1.0 , 0.0 ] ] , type : { :f , 32 } )
- iex> Axon.Losses . mean_squared_error ( y_true , y_pred , reduction : :mean )
- # Nx.Tensor <
+iex> y_true = Nx . tensor ( [ [ 0.0 , 1.0 ] , [ 0.0 , 0.0 ] ] , type : { :f , 32 } )
+ iex> y_pred = Nx . tensor ( [ [ 1.0 , 1.0 ] , [ 1.0 , 0.0 ] ] , type : { :f , 32 } )
+ iex> Axon.Losses . mean_squared_error ( y_true , y_pred )
+ # Nx.Tensor <
+ f32 [ 2 ]
+ [ 0.5 , 0.5 ]
+ >
+
+ iex> y_true = Nx . tensor ( [ [ 0.0 , 1.0 ] , [ 0.0 , 0.0 ] ] , type : { :f , 32 } )
+ iex> y_pred = Nx . tensor ( [ [ 1.0 , 1.0 ] , [ 1.0 , 0.0 ] ] , type : { :f , 32 } )
+ iex> Axon.Losses . mean_squared_error ( y_true , y_pred , reduction : :mean )
+ # Nx.Tensor <
f32
0.5
- >
+ >
- iex> y_true = Nx . tensor ( [ [ 0.0 , 1.0 ] , [ 0.0 , 0.0 ] ] , type : { :f , 32 } )
- iex> y_pred = Nx . tensor ( [ [ 1.0 , 1.0 ] , [ 1.0 , 0.0 ] ] , type : { :f , 32 } )
- iex> Axon.Losses . mean_squared_error ( y_true , y_pred , reduction : :sum )
- # Nx.Tensor <
+ iex> y_true = Nx . tensor ( [ [ 0.0 , 1.0 ] , [ 0.0 , 0.0 ] ] , type : { :f , 32 } )
+ iex> y_pred = Nx . tensor ( [ [ 1.0 , 1.0 ] , [ 1.0 , 0.0 ] ] , type : { :f , 32 } )
+ iex> Axon.Losses . mean_squared_error ( y_true , y_pred , reduction : :sum )
+ # Nx.Tensor <
f32
1.0
- >
+ >
@@ -1253,29 +1253,29 @@ poisson(y_true, y_pred, opts \\ [])
Examples
-iex> y_true = Nx . tensor ( [ [ 0.0 , 1.0 ] , [ 0.0 , 0.0 ] ] , type : { :f , 32 } )
- iex> y_pred = Nx . tensor ( [ [ 1.0 , 1.0 ] , [ 0.0 , 0.0 ] ] , type : { :f , 32 } )
- iex> Axon.Losses . poisson ( y_true , y_pred )
- # Nx.Tensor <
- f32 [ 2 ]
- [ 0.9999999403953552 , 0.0 ]
- >
-
- iex> y_true = Nx . tensor ( [ [ 0.0 , 1.0 ] , [ 0.0 , 0.0 ] ] , type : { :f , 32 } )
- iex> y_pred = Nx . tensor ( [ [ 1.0 , 1.0 ] , [ 0.0 , 0.0 ] ] , type : { :f , 32 } )
- iex> Axon.Losses . poisson ( y_true , y_pred , reduction : :mean )
- # Nx.Tensor <
+iex> y_true = Nx . tensor ( [ [ 0.0 , 1.0 ] , [ 0.0 , 0.0 ] ] , type : { :f , 32 } )
+ iex> y_pred = Nx . tensor ( [ [ 1.0 , 1.0 ] , [ 0.0 , 0.0 ] ] , type : { :f , 32 } )
+ iex> Axon.Losses . poisson ( y_true , y_pred )
+ # Nx.Tensor <
+ f32 [ 2 ]
+ [ 0.9999999403953552 , 0.0 ]
+ >
+
+ iex> y_true = Nx . tensor ( [ [ 0.0 , 1.0 ] , [ 0.0 , 0.0 ] ] , type : { :f , 32 } )
+ iex> y_pred = Nx . tensor ( [ [ 1.0 , 1.0 ] , [ 0.0 , 0.0 ] ] , type : { :f , 32 } )
+ iex> Axon.Losses . poisson ( y_true , y_pred , reduction : :mean )
+ # Nx.Tensor <
f32
0.4999999701976776
- >
+ >
- iex> y_true = Nx . tensor ( [ [ 0.0 , 1.0 ] , [ 0.0 , 0.0 ] ] , type : { :f , 32 } )
- iex> y_pred = Nx . tensor ( [ [ 1.0 , 1.0 ] , [ 0.0 , 0.0 ] ] , type : { :f , 32 } )
- iex> Axon.Losses . poisson ( y_true , y_pred , reduction : :sum )
- # Nx.Tensor <
+ iex> y_true = Nx . tensor ( [ [ 0.0 , 1.0 ] , [ 0.0 , 0.0 ] ] , type : { :f , 32 } )
+ iex> y_pred = Nx . tensor ( [ [ 1.0 , 1.0 ] , [ 0.0 , 0.0 ] ] , type : { :f , 32 } )
+ iex> Axon.Losses . poisson ( y_true , y_pred , reduction : :sum )
+ # Nx.Tensor <
f32
0.9999999403953552
- >
+ >
@@ -1314,29 +1314,29 @@ soft_margin(y_true, y_pred, opts \\ [])
Examples
-iex> y_true = Nx . tensor ( [ [ - 1.0 , 1.0 , 1.0 ] ] , type : { :f , 32 } )
- iex> y_pred = Nx . tensor ( [ [ 0.2953 , - 0.1709 , 0.9486 ] ] , type : { :f , 32 } )
- iex> Axon.Losses . soft_margin ( y_true , y_pred )
- # Nx.Tensor <
- f32 [ 3 ]
- [ 0.851658046245575 , 0.7822436094284058 , 0.3273470401763916 ]
- >
-
- iex> y_true = Nx . tensor ( [ [ - 1.0 , 1.0 , 1.0 ] ] , type : { :f , 32 } )
- iex> y_pred = Nx . tensor ( [ [ 0.2953 , - 0.1709 , 0.9486 ] ] , type : { :f , 32 } )
- iex> Axon.Losses . soft_margin ( y_true , y_pred , reduction : :mean )
- # Nx.Tensor <
+iex> y_true = Nx . tensor ( [ [ - 1.0 , 1.0 , 1.0 ] ] , type : { :f , 32 } )
+ iex> y_pred = Nx . tensor ( [ [ 0.2953 , - 0.1709 , 0.9486 ] ] , type : { :f , 32 } )
+ iex> Axon.Losses . soft_margin ( y_true , y_pred )
+ # Nx.Tensor <
+ f32 [ 3 ]
+ [ 0.851658046245575 , 0.7822436094284058 , 0.3273470401763916 ]
+ >
+
+ iex> y_true = Nx . tensor ( [ [ - 1.0 , 1.0 , 1.0 ] ] , type : { :f , 32 } )
+ iex> y_pred = Nx . tensor ( [ [ 0.2953 , - 0.1709 , 0.9486 ] ] , type : { :f , 32 } )
+ iex> Axon.Losses . soft_margin ( y_true , y_pred , reduction : :mean )
+ # Nx.Tensor <
f32
0.6537495255470276
- >
+ >
- iex> y_true = Nx . tensor ( [ [ - 1.0 , 1.0 , 1.0 ] ] , type : { :f , 32 } )
- iex> y_pred = Nx . tensor ( [ [ 0.2953 , - 0.1709 , 0.9486 ] ] , type : { :f , 32 } )
- iex> Axon.Losses . soft_margin ( y_true , y_pred , reduction : :sum )
- # Nx.Tensor <
+ iex> y_true = Nx . tensor ( [ [ - 1.0 , 1.0 , 1.0 ] ] , type : { :f , 32 } )
+ iex> y_pred = Nx . tensor ( [ [ 0.2953 , - 0.1709 , 0.9486 ] ] , type : { :f , 32 } )
+ iex> Axon.Losses . soft_margin ( y_true , y_pred , reduction : :sum )
+ # Nx.Tensor <
f32
1.9612486362457275
- >
+ >
diff --git a/Axon.Metrics.html b/Axon.Metrics.html
index 34d16965..12a87199 100644
--- a/Axon.Metrics.html
+++ b/Axon.Metrics.html
@@ -14,7 +14,7 @@
-
+
@@ -360,23 +360,23 @@ accuracy(y_true, y_pred, opts \\ [])
Examples
-iex> Axon.Metrics . accuracy ( Nx . tensor ( [ [ 1 ] , [ 0 ] , [ 0 ] ] ) , Nx . tensor ( [ [ 1 ] , [ 1 ] , [ 1 ] ] ) )
- # Nx.Tensor <
+iex> Axon.Metrics . accuracy ( Nx . tensor ( [ [ 1 ] , [ 0 ] , [ 0 ] ] ) , Nx . tensor ( [ [ 1 ] , [ 1 ] , [ 1 ] ] ) )
+ # Nx.Tensor <
f32
0.3333333432674408
- >
+ >
- iex> Axon.Metrics . accuracy ( Nx . tensor ( [ [ 0 , 1 ] , [ 1 , 0 ] , [ 1 , 0 ] ] ) , Nx . tensor ( [ [ 0 , 1 ] , [ 1 , 0 ] , [ 0 , 1 ] ] ) )
- # Nx.Tensor <
+ iex> Axon.Metrics . accuracy ( Nx . tensor ( [ [ 0 , 1 ] , [ 1 , 0 ] , [ 1 , 0 ] ] ) , Nx . tensor ( [ [ 0 , 1 ] , [ 1 , 0 ] , [ 0 , 1 ] ] ) )
+ # Nx.Tensor <
f32
0.6666666865348816
- >
+ >
- iex> Axon.Metrics . accuracy ( Nx . tensor ( [ [ 0 , 1 , 0 ] , [ 1 , 0 , 0 ] ] ) , Nx . tensor ( [ [ 0 , 1 , 0 ] , [ 0 , 1 , 0 ] ] ) )
- # Nx.Tensor <
+ iex> Axon.Metrics . accuracy ( Nx . tensor ( [ [ 0 , 1 , 0 ] , [ 1 , 0 , 0 ] ] ) , Nx . tensor ( [ [ 0 , 1 , 0 ] , [ 0 , 1 , 0 ] ] ) )
+ # Nx.Tensor <
f32
0.5
- >
+ >
@@ -480,13 +480,13 @@ false_positives(y_true, y_pred, opts \\ [])
Examples
-iex> y_true = Nx . tensor ( [ 1 , 0 , 1 , 1 , 0 , 1 , 0 ] )
- iex> y_pred = Nx . tensor ( [ 0.8 , 0.6 , 0.4 , 0.2 , 0.8 , 0.2 , 0.2 ] )
- iex> Axon.Metrics . false_positives ( y_true , y_pred )
- # Nx.Tensor <
+iex> y_true = Nx . tensor ( [ 1 , 0 , 1 , 1 , 0 , 1 , 0 ] )
+ iex> y_pred = Nx . tensor ( [ 0.8 , 0.6 , 0.4 , 0.2 , 0.8 , 0.2 , 0.2 ] )
+ iex> Axon.Metrics . false_positives ( y_true , y_pred )
+ # Nx.Tensor <
u64
2
- >
+ >
@@ -523,13 +523,13 @@ mean_absolute_error(y_true, y_pred)
Examples
-iex> y_true = Nx . tensor ( [ [ 0.0 , 1.0 ] , [ 0.0 , 0.0 ] ] , type : { :f , 32 } )
- iex> y_pred = Nx . tensor ( [ [ 1.0 , 1.0 ] , [ 1.0 , 0.0 ] ] , type : { :f , 32 } )
- iex> Axon.Metrics . mean_absolute_error ( y_true , y_pred )
- # Nx.Tensor <
+iex> y_true = Nx . tensor ( [ [ 0.0 , 1.0 ] , [ 0.0 , 0.0 ] ] , type : { :f , 32 } )
+ iex> y_pred = Nx . tensor ( [ [ 1.0 , 1.0 ] , [ 1.0 , 0.0 ] ] , type : { :f , 32 } )
+ iex> Axon.Metrics . mean_absolute_error ( y_true , y_pred )
+ # Nx.Tensor <
f32
0.5
- >
+ >
@@ -573,11 +573,11 @@ precision(y_true, y_pred, opts \\ [])
Examples
-iex> Axon.Metrics . precision ( Nx . tensor ( [ 0 , 1 , 1 , 1 ] ) , Nx . tensor ( [ 1 , 0 , 1 , 1 ] ) )
- # Nx.Tensor <
+iex> Axon.Metrics . precision ( Nx . tensor ( [ 0 , 1 , 1 , 1 ] ) , Nx . tensor ( [ 1 , 0 , 1 , 1 ] ) )
+ # Nx.Tensor <
f32
0.6666666865348816
- >
+ >
@@ -621,11 +621,11 @@ recall(y_true, y_pred, opts \\ [])
Examples
-iex> Axon.Metrics . recall ( Nx . tensor ( [ 0 , 1 , 1 , 1 ] ) , Nx . tensor ( [ 1 , 0 , 1 , 1 ] ) )
- # Nx.Tensor <
+iex> Axon.Metrics . recall ( Nx . tensor ( [ 0 , 1 , 1 , 1 ] ) , Nx . tensor ( [ 1 , 0 , 1 , 1 ] ) )
+ # Nx.Tensor <
f32
0.6666666865348816
- >
+ >
@@ -656,14 +656,14 @@ running_average(metric)
iex> cur_avg = 0.5
iex> iteration = 1
- iex> y_true = Nx . tensor ( [ [ 0 , 1 ] , [ 1 , 0 ] , [ 1 , 0 ] ] )
- iex> y_pred = Nx . tensor ( [ [ 0 , 1 ] , [ 1 , 0 ] , [ 1 , 0 ] ] )
- iex> avg_acc = Axon.Metrics . running_average ( & Axon.Metrics . accuracy / 2 )
- iex> avg_acc . ( cur_avg , [ y_true , y_pred ] , iteration )
- # Nx.Tensor <
+ iex> y_true = Nx . tensor ( [ [ 0 , 1 ] , [ 1 , 0 ] , [ 1 , 0 ] ] )
+ iex> y_pred = Nx . tensor ( [ [ 0 , 1 ] , [ 1 , 0 ] , [ 1 , 0 ] ] )
+ iex> avg_acc = Axon.Metrics . running_average ( & Axon.Metrics . accuracy / 2 )
+ iex> avg_acc . ( cur_avg , [ y_true , y_pred ] , iteration )
+ # Nx.Tensor <
f32
0.75
- >
+>
@@ -694,14 +694,14 @@ running_sum(metric)
iex> cur_sum = 12
iex> iteration = 2
- iex> y_true = Nx . tensor ( [ 0 , 1 , 0 , 1 ] )
- iex> y_pred = Nx . tensor ( [ 1 , 1 , 0 , 1 ] )
- iex> fps = Axon.Metrics . running_sum ( & Axon.Metrics . false_positives / 2 )
- iex> fps . ( cur_sum , [ y_true , y_pred ] , iteration )
- # Nx.Tensor <
+ iex> y_true = Nx . tensor ( [ 0 , 1 , 0 , 1 ] )
+ iex> y_pred = Nx . tensor ( [ 1 , 1 , 0 , 1 ] )
+ iex> fps = Axon.Metrics . running_sum ( & Axon.Metrics . false_positives / 2 )
+ iex> fps . ( cur_sum , [ y_true , y_pred ] , iteration )
+ # Nx.Tensor <
s64
13
- >
+>
@@ -745,11 +745,11 @@ sensitivity(y_true, y_pred, opts \\ [])
Examples
-iex> Axon.Metrics . sensitivity ( Nx . tensor ( [ 0 , 1 , 1 , 1 ] ) , Nx . tensor ( [ 1 , 0 , 1 , 1 ] ) )
- # Nx.Tensor <
+iex> Axon.Metrics . sensitivity ( Nx . tensor ( [ 0 , 1 , 1 , 1 ] ) , Nx . tensor ( [ 1 , 0 , 1 , 1 ] ) )
+ # Nx.Tensor <
f32
0.6666666865348816
- >
+ >
@@ -793,11 +793,11 @@ specificity(y_true, y_pred, opts \\ [])
Examples
-iex> Axon.Metrics . specificity ( Nx . tensor ( [ 0 , 1 , 1 , 1 ] ) , Nx . tensor ( [ 1 , 0 , 1 , 1 ] ) )
- # Nx.Tensor <
+iex> Axon.Metrics . specificity ( Nx . tensor ( [ 0 , 1 , 1 , 1 ] ) , Nx . tensor ( [ 1 , 0 , 1 , 1 ] ) )
+ # Nx.Tensor <
f32
0.0
- >
+ >
@@ -839,23 +839,23 @@ top_k_categorical_accuracy(y_true, y_pred,
Examples
-iex> Axon.Metrics . top_k_categorical_accuracy ( Nx . tensor ( [ 0 , 1 , 0 , 0 , 0 ] ) , Nx . tensor ( [ 0.1 , 0.4 , 0.3 , 0.7 , 0.1 ] ) , k : 2 )
- # Nx.Tensor <
+iex> Axon.Metrics . top_k_categorical_accuracy ( Nx . tensor ( [ 0 , 1 , 0 , 0 , 0 ] ) , Nx . tensor ( [ 0.1 , 0.4 , 0.3 , 0.7 , 0.1 ] ) , k : 2 )
+ # Nx.Tensor <
f32
1.0
- >
+ >
- iex> Axon.Metrics . top_k_categorical_accuracy ( Nx . tensor ( [ [ 0 , 1 , 0 ] , [ 1 , 0 , 0 ] ] ) , Nx . tensor ( [ [ 0.1 , 0.4 , 0.7 ] , [ 0.1 , 0.4 , 0.7 ] ] ) , k : 2 )
- # Nx.Tensor <
+ iex> Axon.Metrics . top_k_categorical_accuracy ( Nx . tensor ( [ [ 0 , 1 , 0 ] , [ 1 , 0 , 0 ] ] ) , Nx . tensor ( [ [ 0.1 , 0.4 , 0.7 ] , [ 0.1 , 0.4 , 0.7 ] ] ) , k : 2 )
+ # Nx.Tensor <
f32
0.5
- >
+ >
- iex> Axon.Metrics . top_k_categorical_accuracy ( Nx . tensor ( [ [ 0 ] , [ 2 ] ] ) , Nx . tensor ( [ [ 0.1 , 0.4 , 0.7 ] , [ 0.1 , 0.4 , 0.7 ] ] ) , k : 2 , sparse : true )
- # Nx.Tensor <
+ iex> Axon.Metrics . top_k_categorical_accuracy ( Nx . tensor ( [ [ 0 ] , [ 2 ] ] ) , Nx . tensor ( [ [ 0.1 , 0.4 , 0.7 ] , [ 0.1 , 0.4 , 0.7 ] ] ) , k : 2 , sparse : true )
+ # Nx.Tensor <
f32
0.5
- >
+ >
@@ -893,13 +893,13 @@ true_negatives(y_true, y_pred, opts \\ [])<
Examples
-iex> y_true = Nx . tensor ( [ 1 , 0 , 1 , 1 , 0 , 1 , 0 ] )
- iex> y_pred = Nx . tensor ( [ 0.8 , 0.6 , 0.4 , 0.2 , 0.8 , 0.2 , 0.2 ] )
- iex> Axon.Metrics . true_negatives ( y_true , y_pred )
- # Nx.Tensor <
+iex> y_true = Nx . tensor ( [ 1 , 0 , 1 , 1 , 0 , 1 , 0 ] )
+ iex> y_pred = Nx . tensor ( [ 0.8 , 0.6 , 0.4 , 0.2 , 0.8 , 0.2 , 0.2 ] )
+ iex> Axon.Metrics . true_negatives ( y_true , y_pred )
+ # Nx.Tensor <
u64
1
- >
+ >
@@ -937,13 +937,13 @@ true_positives(y_true, y_pred, opts \\ [])<
Examples
-iex> y_true = Nx . tensor ( [ 1 , 0 , 1 , 1 , 0 , 1 , 0 ] )
- iex> y_pred = Nx . tensor ( [ 0.8 , 0.6 , 0.4 , 0.2 , 0.8 , 0.2 , 0.2 ] )
- iex> Axon.Metrics . true_positives ( y_true , y_pred )
- # Nx.Tensor <
+iex> y_true = Nx . tensor ( [ 1 , 0 , 1 , 1 , 0 , 1 , 0 ] )
+ iex> y_pred = Nx . tensor ( [ 0.8 , 0.6 , 0.4 , 0.2 , 0.8 , 0.2 , 0.2 ] )
+ iex> Axon.Metrics . true_positives ( y_true , y_pred )
+ # Nx.Tensor <
u64
1
- >
+ >
diff --git a/Axon.MixedPrecision.html b/Axon.MixedPrecision.html
index 966c1035..c0fce94c 100644
--- a/Axon.MixedPrecision.html
+++ b/Axon.MixedPrecision.html
@@ -14,7 +14,7 @@
-
+
@@ -140,24 +140,24 @@
during intermediate computations in the model's forward pass. The output
policy dictates what type the model should output. Here's an example of creating a mixed precision policy and applying it
to a model:
model =
- Axon . input ( "input" , shape : { nil , 784 } )
- |> Axon . dense ( 128 , activation : :relu )
- |> Axon . batch_norm ( )
- |> Axon . dropout ( rate : 0.5 )
- |> Axon . dense ( 64 , activation : :relu )
- |> Axon . batch_norm ( )
- |> Axon . dropout ( rate : 0.5 )
- |> Axon . dense ( 10 , activation : :softmax )
-
- policy = Axon.MixedPrecision . create_policy (
- params : { :f , 32 } ,
- compute : { :f , 16 } ,
- output : { :f , 32 }
- )
+ Axon . input ( "input" , shape : { nil , 784 } )
+ |> Axon . dense ( 128 , activation : :relu )
+ |> Axon . batch_norm ( )
+ |> Axon . dropout ( rate : 0.5 )
+ |> Axon . dense ( 64 , activation : :relu )
+ |> Axon . batch_norm ( )
+ |> Axon . dropout ( rate : 0.5 )
+ |> Axon . dense ( 10 , activation : :softmax )
+
+ policy = Axon.MixedPrecision . create_policy (
+ params : { :f , 32 } ,
+ compute : { :f , 16 } ,
+ output : { :f , 32 }
+ )
mp_model =
model
- |> Axon.MixedPrecision . apply_policy ( policy , except : [ :batch_norm ] )
The example above applies the mixed precision policy to every layer in
+ |> Axon.MixedPrecision . apply_policy ( policy , except : [ :batch_norm ] )
The example above applies the mixed precision policy to every layer in
the model except Batch Normalization layers. The policy will cast parameters
and inputs to {:f, 16}
for intermediate computations in the model's forward
pass before casting the output back to {:f, 32}
.
@@ -236,27 +236,27 @@ cast(policy, tensor_or_container, variable_
Examples
-iex> policy = Axon.MixedPrecision . create_policy ( params : { :f , 16 } )
- iex> params = %{ "dense" => %{ "kernel" => Nx . tensor ( [ 1.0 , 2.0 , 3.0 ] ) } }
- iex> params = Axon.MixedPrecision . cast ( policy , params , :params )
- iex> Nx . type ( params [ "dense" ] [ "kernel" ] )
- { :f , 16 }
-
- iex> policy = Axon.MixedPrecision . create_policy ( compute : { :bf , 16 } )
- iex> value = Nx . tensor ( [ 1.0 , 2.0 , 3.0 ] )
- iex> value = Axon.MixedPrecision . cast ( policy , value , :compute )
- iex> Nx . type ( value )
- { :bf , 16 }
-
- iex> policy = Axon.MixedPrecision . create_policy ( output : { :bf , 16 } )
- iex> value = Nx . tensor ( [ 1.0 , 2.0 , 3.0 ] )
- iex> value = Axon.MixedPrecision . cast ( policy , value , :output )
- iex> Nx . type ( value )
- { :bf , 16 }
Note that integers are never promoted to floats:
iex> policy = Axon.MixedPrecision . create_policy ( output : { :f , 16 } )
- iex> value = Nx . tensor ( [ 1 , 2 , 3 ] , type : :s64 )
- iex> value = Axon.MixedPrecision . cast ( policy , value , :params )
- iex> Nx . type ( value )
- { :s , 64 }
+iex> policy = Axon.MixedPrecision . create_policy ( params : { :f , 16 } )
+ iex> params = %{ "dense" => %{ "kernel" => Nx . tensor ( [ 1.0 , 2.0 , 3.0 ] ) } }
+ iex> params = Axon.MixedPrecision . cast ( policy , params , :params )
+ iex> Nx . type ( params [ "dense" ] [ "kernel" ] )
+ { :f , 16 }
+
+ iex> policy = Axon.MixedPrecision . create_policy ( compute : { :bf , 16 } )
+ iex> value = Nx . tensor ( [ 1.0 , 2.0 , 3.0 ] )
+ iex> value = Axon.MixedPrecision . cast ( policy , value , :compute )
+ iex> Nx . type ( value )
+ { :bf , 16 }
+
+ iex> policy = Axon.MixedPrecision . create_policy ( output : { :bf , 16 } )
+ iex> value = Nx . tensor ( [ 1.0 , 2.0 , 3.0 ] )
+ iex> value = Axon.MixedPrecision . cast ( policy , value , :output )
+ iex> Nx . type ( value )
+ { :bf , 16 }
Note that integers are never promoted to floats:
iex> policy = Axon.MixedPrecision . create_policy ( output : { :f , 16 } )
+ iex> value = Nx . tensor ( [ 1 , 2 , 3 ] , type : :s64 )
+ iex> value = Axon.MixedPrecision . cast ( policy , value , :params )
+ iex> Nx . type ( value )
+ { :s , 64 }
@@ -292,11 +292,11 @@ create_policy(opts \\ [])
Examples
-iex> Axon.MixedPrecision . create_policy ( params : { :f , 16 } , output : { :f , 16 } )
- # Axon.MixedPrecision.Policy < p = f16 c = f32 o = f16 >
+iex> Axon.MixedPrecision . create_policy ( params : { :f , 16 } , output : { :f , 16 } )
+ # Axon.MixedPrecision.Policy < p = f16 c = f32 o = f16 >
- iex> Axon.MixedPrecision . create_policy ( compute : { :bf , 16 } )
- # Axon.MixedPrecision.Policy < p = f32 c = bf16 o = f32 >
+ iex> Axon.MixedPrecision . create_policy ( compute : { :bf , 16 } )
+ # Axon.MixedPrecision.Policy < p = f32 c = bf16 o = f32 >
diff --git a/Axon.ModelState.html b/Axon.ModelState.html
new file mode 100644
index 00000000..a42ce760
--- /dev/null
+++ b/Axon.ModelState.html
@@ -0,0 +1,556 @@
+
+
+
+
+
+
+
+
+
+
+ Axon.ModelState — Axon v0.6.1
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ View Source
+
+
+ Axon.ModelState
+ (Axon v0.6.1)
+
+
+
+
+
+Model State Data Structure.
This data structure represents all the state needed for
+a model to perform inference.
+
+
+
+
+
+
+
+
+ Summary
+
+
+
+ Functions
+
+
+
+
+
+
Returns an empty model state.
+
+
+
+
+
+
+
Freezes parameters and state in the given model state
+using the given mask.
+
+
+
+
+
+
+
Returns the frozen parameters in the given model state.
+
+
+
+
+
+
+
Returns the frozen state in the given model state.
+
+
+
+
+
+
+
Returns a new model state struct from the given parameter
+map.
+
+
+
+
+
+
+
Returns the trainable parameters in the given model state.
+
+
+
+
+
+
+
Returns the trainable state in the given model state.
+
+
+
+
+
+
+
Unfreezes parameters and state in the given model state
+using the given mask.
+
+
+
+
+
+
+
Updates the given model state.
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Functions
+
+
+
+
+
+
+
+
+Returns an empty model state.
+
+
+
+
+
+
+
+
+
+
+Freezes parameters and state in the given model state
+using the given mask.
The mask is an arity 1 function which takes the access path to the
+leaf parameter and returns true
if the parameter should be frozen
+or false
otherwise. With this, you can construct flexible masking
+policies:
fn
+ [ "dense_" <> n , "kernel" ] -> String . to_integer ( n ) < 3
+ _ -> false
+ end
The default mask returns true
for all paths, and is equivalent to
+freezing the entire model.
+
+
+
+
+
+
+
+
+Returns the frozen parameters in the given model state.
+
+
+
+
+
+
+
+
+Returns the frozen state in the given model state.
+
+
+
+
+
+
+
+
+Returns a new model state struct from the given parameter
+map.
+
+
+
+
+
+
+
+
+Returns the trainable parameters in the given model state.
+
+
+
+
+
+
+
+
+Returns the trainable state in the given model state.
+
+
+
+
+
+
+
+
+
+
+Unfreezes parameters and state in the given model state
+using the given mask.
The mask is an arity 1 function which takes the access path to the
+leaf parameter and returns true
if the parameter should be unfrozen
+or false
otherwise. With this, you can construct flexible masking
+policies:
fn
+ [ "dense_" <> n , "kernel" ] -> n < 3
+ _ -> false
+ end
The default mask returns true
for all paths, and is equivalent to
+unfreezing the entire model.
+
+
+
+
+
+
+
+
+
+
+Updates the given model state.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/Axon.None.html b/Axon.None.html
index eef9a4a0..cba16db6 100644
--- a/Axon.None.html
+++ b/Axon.None.html
@@ -14,7 +14,7 @@
-
+
diff --git a/Axon.StatefulOutput.html b/Axon.StatefulOutput.html
index e88b4517..135d965d 100644
--- a/Axon.StatefulOutput.html
+++ b/Axon.StatefulOutput.html
@@ -14,7 +14,7 @@
-
+
diff --git a/Axon.epub b/Axon.epub
index f5860d72cf8faf1956a79c474934df92ce2bfc7d..9fe42e7ff64c235554aa5d16cc09bb6738d8e393 100644
GIT binary patch
delta 357599
zcmZ6xV|ON8*F_oIwr#Uw+qP}bThGP@qxS)q)t2Dia(xuN6<(L6V4!E2s{&V$)?Uvr)HG1`?8%?x~2^98>
zg5LC-Pf-O_Al~VVO@~RiNj6eepL-5rx|7NZx-wNR6YqmbQnQYyN1Vgh-d~f^cX|E(
zkKgdrcH~WzCSOF+(vmLtsjI6RQY`s%NAE=H-dQE->FJXoYoZAzDQaZtl@f_O`V=yc@_VnGIMjY`k=hdU?zl2p7P;C*j$U6_h)&!L2pFvwRGbodp$if&mNlB_t(k(3g_<^&kwvKHl@GP%Q3WYN(~dvUX`(w
zvk*L<(v@Jm%s=Rcr_HI9f5v>!!^9(B4<)h($2K!fPIWMCi+THr9vV5sCms93gr`
zV)P6PYZ?t1GXk@{2k;EI;4PH~wq16H;usl|N~CV{s|d8^#a-3vvfDz|@5gA@b;8(>
zVSOC4UoOoGLbLvBlq??zf+~9?v;I4jAd%TRss_jQv1>BJVR=}=wIz?(CIkN5O~(33
zt3NRc-vi7qKiJN{Sabls(`7)PCN%j3>ziW^EV2T7o4qaPJ>Ir;n~KB6t5N}WtKb_!
zD&L=sT^bN?EZ_xg)G7LKEjZSUr=y1K00z0_f2XK
zRYpip5pLMvEFXO3=ejF0szM5%13VGaLDIH5Ui;6mRGJ^$$u#|aq&(o?>ok%M%d|Up
za7{6V%?0YvZG|OOm%p&|glktB&SHsKR>kH+RKEjdC7om`8>z1L$y4pY;Lf3jp~UrF
z*v-)~3LYfkTJ`F7{6+Qg7hlj}9*afgRw
z@Lt4QvkUydP1uoRtQF8bWXVHAcQ2u54lwkRq)2z(6TBuLlQf|WK-Ysg46~YXu_60*
zX&3?wZ`ih4(f)j6dOit?4~3iaN(rdbce-5Sp(>u7qI-t
zfq21y_1?E_y9NQwk}2(x+^TL%NzQImwTkoVhM4=Ii&>f4QnEg;u)UiZJyu5(|7NH_JVtt7<#%pSI^zIN+4U%DxO%&vtIho?W}=x}
z5A?Sl6MQgHb%@=cjRjhxk3#IR=IkgHNK@Yo69*e&n+gg`N;q^j=6b@8=nC$uIQW`R
z4LMLpjSlQBMq(8>Ac7dD%rwMYaqTD!L+z~YTn+fGbI7jHz#%D}ATJn~inDAn9|LCDh(|?2x
zc*i;GML^#`QwsG6ZZesEL2k$V+Gh%{+1c#sz+K?IJG>5Wy2H%xa1h|3u>~!_n#W$@
zrA+}QgUuE3WU5z%CJ
zHu?1f*idH%3E*FT^9R!A$6<$^{#;x_&&4gg+=IBHDZDs=0r
zfPR0kwe%d8n9%&rG&J)#1hu<)q_#|LNbiSGd+vjfs$>s2FyeAI5Vi!rJg#qbqErsc
zkcEA$R>odG2tMDoi@8iRE6IC;s5NscVIEzL1ccl_0pLp~li~(!d@iGoz$)@uE<6b)
zu@bn{m)4?>ZH?V={-HzWbqWyK-wzUf3;<@@l=qq
zRN$-Zb8P6kz|$^7G;%T@$#X1zN!NqVOy#;GlTHr}|9G*d9-
zLW#o#HS&i1xI1SNb3&3mq>2R4>FshUrPBxE@YhaWg`n(zVR9HNe&Of6`8V#9MCng5^%fs@>QjxbeH`x;@?z9p|@I%?rO!b
z%o@CB^ze!UxdY#}RB2!|9y)tBl(cJVd42f{OS~RYbA5zhCR=S)P{ju5XOccA(+}PH-k2{(5c%${?
zN}0-%usOZ{)v-KY(v$&&O=4)p*__rR31o7HpQ07!H3*fQ2FX(=qE;LWOd!u8@2~P$
z?k{>R4=qM1d?L!=?Nw$BZ|SpRA08YgC{VNYdq!?cpKsa;azygv32sF4F=!nwSU&;T
zw^=N5$3%9R#B+VwEiZbAaifrlA0W-dt_0xxJ2T?=K+TJq2p|BSClLG>IPVJjxSn}&
zzT#4G><08@U_H337YCF1>8bJQZ=40+d~r6wJ#+X2^JAc&?6pL`C*j8kudnQrvFQgB
zOJjLrqwsQla-*{s(F|ctE2aJn!v*%6qGNC*qAucNQ-5Kxi2@aM!?xqDGb=ir&cX-
zUzeV9WwqQk+YQ^MyKK+Z?+EjHh2P=#A<3?)Y3iZL%&W6HtGi~ZI;nT>ESvB1M^NB%
zwY{b~?kKCuaA%hl@Gtv^t6>x008LGeab4l5f8)a(Yxn6S-yp9UQEg88u6-C2P!a8$
z>@f3pcGREancn;8H{+W~_&{N#mz-*s=;oiBf?m6@s`bv)vh5qzuZ@u3
zhbM@NakiT-CB2&SsZ2S(zt?A1)z>%EekJNcJXi$O5lZ$=0R+$hqpL`MdhdL_OHKhV
zm^kY3Wz|FmbzCM+EHU`yw^$v=Dr>b%I?Z*6d*6mYGSCh3OsAxFoISg~ZbwWeV0p^O
zEPSf@sJrdHf5Q9vY^>w>)T{&l>iTmjV5;}0Iw#ItL#fs2!BApQ+c7q&jq9WDhc7vY
zDT=oo9wF1UuLLl$TEFf!m;7|v-gnuz_0ja~@OCk0CYD#{u&d8wF%nJ0oYRx>?`Z9h
zO@06>i&TE@$@<$AY|50*ESH7`IWdi*Rvk5U%2g9u@%9dkBAOkcTuz(Gp4c6VF8o
zC{SF+qP5kqGY(6z5?F~y!n!5S#i$NR@F+!unx2oXU8(Ilgs#IeO&%C0)y+^yA_kQ*
zn_l|_$ntI{XA_%#Tgj6W-7z%OzcCh3}YK{XM
zZh5x|FnUo#1SXQub`02|*f&mR1uMPi9RjtI615~R6&@w!KnV)PNhQp3qWY!{9DBG*B_eT1)pP(>lT$FC$
zFKvOxjDb_m3{xK8Tzsmo&AE+^V=o9<#MTHMn1g@TsHMXojPtOIi?q!nr4WpBr0Wtp
zju_sp$gLaT1V#(uq1r48QAB11OvHxdpMwb@ut3AE7Da|#iC1f3W;hx7-6_xU1)MB{
z$OmIScb&=2npGN_l$?Y}3gBO&iJJSU+Y@8H?A8~*_gW_h_K?LI0$U1G>J5^JBgd4c
z5>MHOs~1GE5zkDBlA
zCgq~H4uEK5nNthhCD4$LUe6zespkq<#`fuBh$gY3jR5tnEdj;c^u|79Q7HLNOCr*);GvJ_$qzN8Sv?F5ms
z!h%v}vd8MkS&(?E0+1jFlME{ALdZx}7C@*pM@Y+8DrK-rQKg`&5JC3gu~o<*TSAM&
z%7qj}KZU7)vTMr70uxY>7J_6GK)*#@qz>~5Qjw<)%9_;eyG;sGQ&cQ787j>q>N9oY
z*<3;*7DXOsMS&MaA`81jr7Fh=7g@A;VoTn+;82>N@C9+>1CT}EURHZygXP0Rl-~L;
zx^6#90%yLL(r$NFF=D?3;~mOOQGN9h|79yl&_n^gJggD&BDPJX9{C!8hB=1*iEBfLG(_*?W~hWvFTJ$TASrf{J4$J@4z!lK51EFT>cC3p^+1QG
zBeu+NrP8+p0HWi4^ANPB#kH{607&-XiJGkYsnfL>gI?>IhW%Rbip&cU1lau0aP`pH
zmPeTMPYiyFqm_M~VF-?U=DYg*;#n5o!HW@Wu
z=M~IB{Uo^Tv{oTPs}=ph1#rnEY`t+@`iW(E<#Jqd0LNhFb5sOxQ-qnuN)PjQhEMQy
zw@WihD*>Aq`9%VU58WS-YZCf)4E-vcyy`a^q0i0Rx?+R7DdR^8E%ofrUfyGe%eIaH
zNj=v9Xx7NY-g?_hoRk7Qeu=seS`NEhJ`p*uh8>FUmD{C<ok9d=Hxrr9VxlU+{jd>y@VdMbDjVRng+YjTGxz&O3^rf?>Cwhwl=mfkkE
zRDuQn0RE%*AA?dB+rIso)7IbB{H8Jc#A;M10AkGx?Cb!8jW^RkqD?a1kebefg9+@!
z$kJl5lzNF*_wyYu2jqUvuWJ*CgqkuXBy34GyyY=FTGFxH1*tLZx~|rH2K%{vzR<*C
zlcU>)Sf9ztpD^r!d7ruW4xg<8{rdEATd$Gi@wvckgicFD&IDPg`)2MSNm89uLmy>(
zKr`c(?`MU63$23mjmI|qH&405QbYEX$+n+YpR1cB|7M#o$J}i*&%-+nj$%rv6JpoD
zbd?Ji5D+^bR4hDVJ%crUCJI%lpv;$>lQSS)g7v{=9af+f>ZjnnQIvq-5X6z#+!WD-rfSDhCGkf4YUhntfuNwwcR+F3BRao@c(&vnX
zAGJBKzMs%9;s4GJO}Y3bNu=IKS`yOJ5kWr>P9#G0!>;n)S|k=$VDS*Lkw{JG`#ov}
z@tPLtt@Wi4BVtGir{xtjJ+-@)E3fCqDvLRS_H1)CfvddcXtSlZ$11z(k9dH&$NkHC
zes-0=^R?gX`e$_&MrcfBxQVQrKmD4W&PeT=8hkuaV~~^iL2nM~9OspDcyMXlDW+5=
z#1WMMo7oeuS+Oz&T4ta7#!>Y1$xuIs4nSX^3)3K-?GEE}#6>`3jQp?j5Jn8vqk3NCvWMM^FlgUCSp+`O#X2Ay%2SSfa9-s{hlGIb83PbN`
z3>pIspA*h0Kt*JxWxC2*q6JZ$5|y~RuHs(i?3(9<$YFclKs#aG?9sPXk3Jym_sR|V
zbi?m%Mc(s2?faP*+yUC%TSmMwM~hNWr&Q$sa=q10LW?7pA#VhZ!QqE6k1B?Yf3&+c
znTop2d#sc3RB2KKk%vnPCbEe(X3*|gan;<`WP8L&oJ}VA#!KY}O67Xy0;1(nu;``7
zrT*rf2c@kL24s>e7zK?H9P_Ta$aGiY~aGt^2s4-twAhgm4eniLhY
z71{oI?RteM`sFY}O$wey)YSRt{)kG6$s)obBC9|Rgpg6c!!c8%nk7DW?f$OOfVknr
zoYxwPD!`Mfy8>eFreo7l)+sG047KFvwjrjRozL8@Ny?QFV(USIgw`RA!5O^vh$G2Z
zLddR%O3vA?L46(FkbqY3!W07HVG>Oq{Fns=@d2Yjor5k=lN98+&L|HQIHr`MYDQ|L
z)1oR5UqMUJDo&!Vz$S%6T1_%=hAw4@r^00ut(yt+2mt-{D!gLorqd$qi8<(MV4v#1
z>631YF?4fDBKM$ZPzU`q1`}f;r9KTv0Pz3K;k4G4{SgO}-<<}Mw(9~&tcbJc3N&|6h&>_abl4pcAt-m_yN!TBfB*mL
zFv+C}8pUeG-Y!GsizJ&0LR_OBBvWRviv~mrpbHYzE);4ApeJqQCPpC&2r;5vDBpy!
z>P*B@Ty{oY`YZi6Bs@a+dQxRzhX@aQEh&(hrND{InsOups|G{Z
zSrMZUXi=N=01V+c0DahU0ELl!Txo}n4Nf9Gi(w<8&dwhB}BoK&uk0S$VV
zqhvTeZHf9&vYkHc@g&pgQ;8QtRVxZhq03(+yZM`?0R;eukVy#4x
zM0rgM^BIMJRDtaA3h_!Ek!;4oO5)N55DnSi(tBi}bM2BW8O$#$?z+pyL3}Nw&+@=c
zU%Sl${Fq3yDV?nZ_^~!gO5C>t=$
ziEBc)(a3*psz*rF_^J>1`8Ab6s?ZE`|lnzBG(mT2e>tGYUBUpCwYDgFfK0Sp$625gkC|*qi7$R%d1mA?CI9liTy6
z&uV1w!f3vx^GNDa4P5TQ;?DMEOXytGn4!cBf7N9_1A(wJ1}LY2og#dDW7xgb=dt2=
zh7`YAmq`G*{^9%R`SY@@h}oL}np!M*^rYZGY;x0>=h^99vrhw!igdHZJewC5D@F$%
zMgYaNubwR)|6dq^skhf6^6=xl>tr2?ha#ypdyy=^#e{61Ca0~E864-#1izCujfHp*
zvA&N|s=q5cecYxFSu|2E8o4Hky-&`B^2w*zees9o&+)W`hzTR6ddFJ;*ERJzidQk6
zL^zG4vVpU>lJ~X>1gT^UZYleyHsag}w!OqA8uyNJirZDSS5f68j=IaVRoX*(ZJ%*8
zFBNoboaEKxqg^+L%QEY=SjmZ|AbxqLzB&Gr*hI>bG?1wc=v>&CvV32
z!gKk%r88WFNuRZ}5wi<`p0=32guaE;QG=Mt=~uf2p`!%{`8n0mUF9E0>?svZ5Md2q
zWhc19-(kXG?5x8t+uHFxk6uy4%-zvJ@^`oILOGiDPIonQW9cRHcX`*|Ejse*XwUkJ
zXekZ2yb(p7@!-9Fb5m72{tXHO!BNopy6;u;LrzZ^DEMcS?lOIVD_G&PY7d6!x@*L}
z1=6QSGL$NN&7|2DoCdqY-ke$EE~A&_3-1}yp@XxKs=f({M|!1dcPPq8PCzR?X#cd-`(-;=Iid_BAm$8T2Gy|
z#4`BO5%O_Q$`|SX?vMA-$lgI+2SYr16bWv-!xaC%pQ{FNVn=0nv#aFkYqUE8oWtkr
zd1DAO;_~X@jNU>xDia-%96%rR@R!Ok9Y7_0j9c@gT%Yr8I!fP=(8((nc3K$D@)NnSc_B
ziTZtEBa;EP7oaDjB2&YdWu}uh3RI|%hvVxdv?|8co>Yz2Gvydif6Ax2Z%&?33w-s&
z-anB3uFa-6u%^V@%p;VoUlTc0@$Blm6*UdyC(sHapSDhKs}1N;UG*uV_HB#sQtJ=_
zr<+!X;e`r9M^VXl{#z7M2W#cB&X6prVOy3&=&%6XD?cF8rZzxizzG*>mf&bxH6MfH
zr+@_Niq89OE*>dVf|0kbNxxt!wvhC~uG~YJZL9yb)B${^*~KqD@11#Q%X}~+6a?jv
z0pPe1EmhPRVYPQKWea*+AjjBBbx0E+x>wF)Svq=#!;*~Y{zT}1O|6%3tsy|m*N_$z
zH5>rO0%x2eNm*Caq%|9Kf`|rxbNIOw7)Gahmd$k|baV*%>f;PHBMNFr@x72PBpntP
z%H$1$cz0v>b4axZec5H6kWQ<)%fhjfvS!}vSL9ih%eELzj^?FZvtMdAf-SCQlA1|;
z=nK`ETPbaybu61
zP_c@}uqX^OTU9S;arz^uCKRD47XooU+jE-8?giT}GleXHT7&f>EXJ=Z-)BOv2PbXG
zAwXz#;-^|?J$s-P<_2YlaNKjO>Vr`Y>0;o-cWQ~JhW5EeZ5Gf>C#%UqM
zMf6al%_mT%?oQ9qxGxV5XIn&`R9FEhzxky$edKWOWgOu(m&CE^c;bNA%c
znxYudVs=}F^9tt6&-h9BeC%aH%9HWdXzTh*YG!{i*b-&pWak#ZlpuM3Or1?=@jv<9
z#L?AgCyeBE{ZfMOk&u&xTWGmw(x}305G|r*YY=D-mCUvjQfgBcnWxHH!o!B@WW?u)
zT)m7gRt%GF9NvEh#pyocleSjOc1BnYZZjK
zc`aDLh`p^j%W)S#))tGqMhs$XQIZFSns&4pha08D7KsehGOVz^Y+;AW&U34B%N94@
zIu%=J+d2im>-hLhxV}=Qjd@@Pp!%|P*WR*g{n;q+=e}+8e}VjeYa3Ai)i$a$lL*Ut
zAyI&U4uXMzDE?av3Sz>FDvVwZZq5egR?aT224=|#BpCnIIkq^E{;PBBYu4xlN;h3^
zM(XJZW|!%N*WtIh3o^q2lP+LjMyg8Dv^C!J-%)MUp+-1YPzau4QXcNy-*(4)NO*nB
zRKJ!J@_##rDd$=&EG+vLs^vMc4_B^^l0l_SmG
zz?apCo~-|SeRM~-*W-Jl(WNs?lU6e?Iq7L|GJKIexJBsid-DrEAsehs#lyow1g?`(
zhDoAT%tZ>5)B-(Vf@ciEW^-i~q1mJpGVVTY9ZCdG{25xZz=i?6qdhlCP7kXp5tNt~
zQXK=Z6by!SR74MP5c7&D&fkE^2@79K7hHDmo^=}OSR|XuQiqK}DT-`QD5Cp=D(}gH
z69+kdP;nQ^^|7l}8DA`rm6FB|t2YDh_~em{Qo@~uv~ScrdR{piE{-^`dNfx$KS#*;
z^oV`=F4C_daDI5#-}bND@ZBQ4hHrxgJj?>{Pt9po{EyOQ(elkm^*%;=toxF`9|9Tp
z&||1XhXu-!A2(jsY^w^4s%d%kKDfmXtwM08dauh8Mp9=54(~jnwB-0fa#_mFm3wCs==T^cpY5^{uosIy*LK-(t`9}!w35!imNFMJwlc?1qQR+^@
zq6jQ9>~iS8z@^vJ#vi<)T(OnPqIHQFk}4I*Jf&;Ng5-pN=;aJO7_Z1v;Ud_=rA>Ey
z@g`W{pV}8?{*THKy@)CiQ?JAYyvq!bUz4F&mF^NO=wJGdeGR`0rJGi`fa^7=QLX_5
zC_v(#)Gd_=30pW;h%T-n(qxL`mM>~#f{>@UgCRx_sN}asG^H4
z{HiU#BNXA_+axOqb^!k%2&S^aMbZyemwR|Y3eyIFh)I+oDRUM9(VkaWN6V*+)pdWY
z1*?$f*7)KGd^wVAgqa6N$O5+NxKJzHII3DC*9t
z%3bVaJJ?nm&1+CPEd~sM;a|s}F1mP}BTY@$=gw4%A(h^dt7TZT^Q-!p9d^&;QO6jX
zNu3O^y@?r#3O5$F*BAoaj05fP
z_$!_TfI_l;&gl0@ZY3;m1LgQMsHy2hgxLU2&^0@u7HOLc&N5Eb_&Z~$M;ayzTu>Fg
z`C7D)ajUUr4-Ax99(DC)=*>L?cF}trwC%1zpr_gj`Q4)ne(f|iEQlXwe%w2FXN7|S
z&aY0_C0B|F!=~)zd9?74b0WF3W*dQv^u=DEekGtTw#Bv8l`j!QQ+pf)LBZB(QRhPb
zT85inAd_J2x!mOlDyeV!^=s6fuaD8wTEvG>b;iwLrwE@iMR&8+_y1L2oBAHKAgmHn~xi;XON_zgfKGc9D5ho@{C2J^zP&>Jc$sHn7&xdJYiY1$d$Rli%$)cjmMhZcJ!J$+f
zo%GV9?r4I`gZtt;`QzN~IsBLOg<}G}X@kFg;0>&00t>_g`4@_f*XGhUE{LqAxYY0g
z&O8qS0ogT7FqKi0Ape5xi4=(-VFEb8EP;y1#EsN-{U8)k!IUv11Lw)IEDFF*O9*6h
zog5p<2bB<#lmyv{SW!FsI4uugP+5(H=eUN?40nrZoDM?8#Xmu5;L{qa4yg
zc`3bbU<&5b?x<22MNN0S`CyDE|lhh*b07Qu2(3y>jX<0Q-eM!`=(Lq;Bv2;+gY7OqmH{jsH-
zr0z(AQOKJZis26Lx{&l!EX|w%%x~06amE=;iYNvaqs3_HoW}4;xIvdd$s8*G&ARm^
zN2Jl2R;Ky}im2+D(InSYBUd6u8MdG9%tMt-P8~9!rN`wrC5z@q5Vq{3Gl?3T0d`Et
z$-rI3E;NlLL0}I=3#^nLj*()t>aM)QCeB@#O%N6^YFT~$lw!6=CG0B%IR1M>XZ%9_
zU#6w0^B)1A`R~;esn%!)Zc=!8+MxpbY%Dc|7di|hS+bzoq>?KXrNIsVe8%B%I>9Pc
zp=ntX;AI?hhwpXzySDVl8h4c`Q%?zM&%`bZPgTqLNk+X@eUn+`EkC=tMjW>5UA-#F
zcCAiP>ATemWG$yq&jWO^t5OQ0JaovmMkuSht85XfZGG^e#WD*GYjm9=4)-K>A9fyc_A$uMNv=FR_^
zEq;yUl*x`yMB!3t2;fti?8%0ZY>>UNAR=yqfN8O@ojAP^d`
z)wb32IZA4<7o?d=a4S!$e2AiX?8me9xYfz&fZ2N-W>$l}_gACRBef=~9|zwikDF!B
z&i<^${=KducL3+t{Zs}~@yr#0FKU#G#Qp1UTUdM
zo1CT-4){et_a`_s$S#_Q;!R3FDWrJK+)Jzuxu)^(!T)|!#JI|LGWl+J{QOF-PpWZT
zf#5w>t3FA-shM!%i^`u9$n|AOA1QI}tBlGbJ9hGsdFe<`-lW`3{M1Xkoe(e+Q5IcU
zj(bb$=uhsk@^I?){?kW6D4mohc2iC93^tdC05CIi61{(^IG!tesi2-4c9eHEYc&cE
zn4QfN;rY1ocrSu|#Fgrqwy^9x0
z2Z)t52$(47#O{0M%RGDgOqdD`UzLnc#J|pJXlX-=4|k^{?={0d%rCi1TGyTN8}E>h
za(Jwq3=Wle{Mluqe+}REYVUnR7nydEv_y%78gvV1+1a6k1qzQS=n~<0EOQ4yeiWSV=Ue*PJXg*c*HJeMFWn_WTMlMh)kS
z`d&t}x@wc+5LxQ;Gji&MfKLOt@_ivm--!9pBa@_ZBE9PQA*V9a-;EV}L`R(Z{olQN
ztvPAG#e~%RN(+T9r%mJ}Dy5$yCg?Izd4q@+Iv^P%7eb*H^*5;PS6}(kxVr>k%bb)5
z^gIY5I|H{j?fCR2>3=W^oFgi9KK>6&n2|eUs`)XwNsx|vZI7t9f{w9gNG4A9ch6^%
zcHVc|oJax%e-6Ap%A^UI@n6y!1#OxgYxjS01lmHL8LIZAG1!z}3zraHG8ak`to;urXPsvF{U%
z%RLxN=ZLGB`yHCbZ#$QF@)avSmRxDq1*#{YMTtGv_(2`=!H}fzCPV-Tl+edlhQS)+
zPQXJ_msX-@PWBc9ij;w_z#-kwn*dLMg-g?fNlI-sL!jy$-9(x>1DQ)4sp<-Kt~WXN
z1cZThjj91Nkt2$F3c`#?R}p9wto+%S4noLQo_cCRkY?;WXQS9T@@V5ka4_RW)0awA2
zbsf=@8t789xoi(@aRU0}UZwtVlMTxZq{UUOpX1!)L9`0Jv>83gEJVx-wf~f1=pu+v
zm$fd!A#TMHlb=C$BagUhv-?6CLWUd0hOr|%69K^eB=jS2Yl1o9XW;g#s_%DphoG(l@7lPx
zr?ncDjr?JtRoe}i8wjKq6S3s)%6-t36A0avkLo}CcS3&)ap@4-Cm;A<6;>8!rr
zj^OcyQ3=tfSbd~%F9)Y|Gt(OsrFFv?T|d>~_LI?jxE;yE;crrP9f8|^
z)wFEGKW&y}rZPAZ?V|>5nL7
zzk{h?({eh=kdM26D+)yb%IR?vuEmXp|3=?;y4+2B%B6U0@)FH5S%JIsfK~!=sHy_Olt#Gu|MTc={A-ugcY60|rf6HbApot6rdV=YSB@h(=cR%f~
zK4$QiMSqqJx6vDVKm4tgm_1IrsX^Jjr{@&Uyc_B_rvWAuPPnG5IBOq;O;^wCZ;=&a
z8?s9{P?8?-)0#dT`#y2Tpw$*wXsfYGO&8l$f8Lgp4~x1ZcV1Lkd%;f5rce7t(s{Ao
zPbZbYjgxY2Zc`clRC`{D1xaT7|3)Wn05I+30X9a!e`qv@;{RH|&SWVMDp^9N-9)AX
zwz%F4)LZBX3nr#iQ<>Vq-Z|CW9a97GewVvJ?j|L=nv(ib>ZK%kyWNL9%PC^O*Zp(w
ze3MP+AZgsh#?w7S_>fR4Jl8Xl;fn)Sz4F%}v`=-aKp;~#2=dR-L7|oOsPifFLE_Bs
zY{WOfLQPgnfBWX*;L+EJ9p1Y)TJUr;T5zAn$dy2AvN{G^`Y?oMW_p@-x`_8hQjwo+
zyM)f06jlsF*Gfd6Pg015+k*O{E4rNzaQs>i;^8V$UVxqLB}0)>Bu*1QqKri`FflXS
z8l%K?{rO$nm=f({yvi+!{bCrTX%kPCHv0!)w@SSPZBhoqsbtIic&eKXWOCb-BH*}!
zzy0+!HMi&MbfdE&-57^H2fskGi{9z?E%$b#VF0-J1uoD9zg2W~4|@Yc7u4`@?xS#$
zWRj28SKD;CCBz~urh>95^5mO{wz6=CF
z5)N|UDKFxoF=qUT6A0*cbooWbdq&*B)E^1uV3R}So8e!L1DuHvXS5JvdsN8At1ir`{R94Zg<6W
zvicdPuM0!;cZipA5N)hAg@mql0_U6R`*x;UwjF4Cwj8>2die8A?R@pyEuU;tq0YN<#igz%PK>i5J#7
zXmaX)71i2FVHy-#adxj_`YG-mWc^M@Q@x=HU+>_cvlT=8wk;IjPa0dPPA&BB0HucY1?H)BG7b>Ro!|I
z9YOxrNWpy_FW91#@juYvK8e8^w)*xqx-ijU1(?7oI5e(cpob!)Hf`SrYt9UWw<$0q
zL^=kzY>$Y*&6QcgDw(@^gHhSSklip4e)V!`0~uZY(u+7?cq|ogBo{#P`adG#s;8Aw%q1S8hFu#-hQjdi2SLSLODJuz6tZLZsrGaK
z-1ZkQ&_|;VnwXLmxriId>$)SL+HLsY&48T17Vt{%E~dn^Wg|u;lr6LnEQBxBF4%Ok%pOZ^M5a^KKN~Y*H1QzeE%48@esxjNkxM@N?0m3
zXBn0v$~UGJ6x{=u?OXMxt@nN|NNOTWu&2}Q^q;J)D`?uy7OQz2ZY(taYxzc&;72G>
z|NVC_0u`K?t1;Yx*%{{SbIWAGE&{XwOY2v
zIkwkBINZ0#reib4jnFCK*!im4x4J5Y?@12H{=MJ9p2c
z!~#dz9mfM4b_{pz%Ho|Q;(VK{r_
z9Xi9mI@A7p%3@XB;l>nP+)nhS
zZRY@>5vcJUpjJ_S)qC^1J%`AxENufgd=)w6lwG@te%L
zts(BO!da#w?G>dgxg#D*3%Epb&RuNjlh;-T0tLQw;l>XdJPuG4$BiWHub0#fJO|{(
zT|SQ)|0k|zH1!;^CDHum>N{js>@=jm0O0FVmqa&5eaDrI)oN;T`^IH~*leuJFSjrK
zclZtXIUvmX%i>XZ(5HEy{}tFsG%s_Yp;XGAbCY?In1}1sr7_;o<8SdC4h*_~OqnzF
zW^(qwoo(X@aiJ-hs9IFz7!8EKY9}pkz~-NHc?SFaX8Vz}95+y7VC}hN1kPj|0kW?#
zUoUOt?J+04v^+Nk3xp`?yl>YXt+L~VF$q5>QT&gFx&P3NtO#!lJ0v>=DGN}M5ogXc
zuVaXeaAboD-ldsTs7joVLyQYAp|E{MNFZh3!)PI6jD^dmiB1H?eDhRy6iikm%E@~&
zI#>ahK1-c;b~?}o8-G@V2qIYH0LZwMUo~#Cu#Y&^9P@2>q(8D{Hw0I0eD>RhQ-iI~
zYLdLPYz=JS*u&}=Ff885eM{~VorZ`b;3q)^LSm3&V4L%Yar+8d0fI3Dl01bM_*m1<
z-76SVO(6EpaPaul7GlGoVKwub%;G~24~7wc6UzTO?4-w3QXv_|9jiu}0IDxtLv==n
zlGf=ct*Ak5?30jvWji1~iVcrQLN#d~W=Ew({t`A8)nlQT
zOr#W?r3=x-(k?OsC5s>q?F(AG5K}E0FqH2?CZV8d@!AD>!q{TEyZN7_*)t%!i}lI6D#sCGVXh)kXoJT
zVd3;^uj2QVcVJRpSkloa&x)^yo)uz4ed_O=&@=MvZY0rcAiP-xm7AV|$6;ibGKH~j
znltTSeY5ett;v%Kg*|u`@;D;JiwLS~Qh;>Z?LB)&?gTfNMnj0r;@cN{g?mz7bhn
z0DpI3uTIV@dD-jA4<7mPV#Dr5PF)at@mfjHxDk9=kbAyPgSQ{xvb12k|#pCk*aq%cS4f?i?Wan-D%p;lf$Ux<>uoM<^-aVGD6PDdzIkewf-kWo?d{^9F
z0*dl>f?3-H0Ckgk74oPZRJ}h7^t9C!n`_9D1t-h^#*U9*&4HJ~i<7+*3qJI^D=TJPwx~^PHTR{p}D({;4yB|Pp1pA6rS(r-?&y09nI1UiEU?R
z`eIC|`}P0xreR1~R$u=$X>bgHlGg(rh>CaLbc~7y*!bIX%7)AL;q>
zQnrg^9jpV8S|yS5|oWd11BK9AgZu8jS8MEc_9bYjkI9djffiw)EIEWNLn{Zi_}xB%AUta#TE0=
z>;X{;hW`&$=MfZ9A#hM#XkgNh-VgNLFlx)Yh?byB43-@fO7
z-}YYHcF8*k_{dJ(KVv!olrhNGK_=WjzkpR}f+B%5DlYu9
z%YX1yvJftX{`=zDc&D)5(PVA!6)a{e_c#J)X5NA3ZJ|oI+9S<0T`0$z$K*z;4#daj
z2YB%$1oaqphN+kjS(2e^%Y4Onc)KcF#NMxNGbG&Kt>X=$XrsJqsFm4nyGFp2n505v
z2F_^(08f9pH~IuYR8#=B*sk7aM2%=Y>)U?jH0d5zQ;%MEWb=g3DC}fNBBRdZiRh2V
z#bT0&ZH|{cuk-T^HpgDeC=$6k;rcLSkWrjuL0PR!O;KmEp0%WnNTJX`>|i|EQolHb
zL`9Ir&_$ZBzaw{Fg~m=e6$$Kex#n$
z8mYYgy4QT9Hd)ZB;#Oa1uoMnAgY|w*UAfTl6KxgkQB;f7@Fu}=)WEG-{#m$9-ANe0pK49SL;y>2J@D(T@
zNqU707_aF(N1mQOyl?UHVVQTYl_tB4O?%*gh^JCd#A?&G-qY>p$U*_qaCZi-;A?Nm
z$~I@$doZCccyToYrCqFLstp+w16Gab|uu>(le8&7f2N(%DKetU4HuoYed`UWaH
z1{$~m_5Fr8tUFDi_4@7i#tk+z(u~Y|b7@*x6CdjDXc%fGb-FP@qY0sUpm0GL0U;Roq%QE9$uCKtl+u(yByb`
zrk;NvuPTm?UHVw86E5e((q0Ux^Vo>KjSe=!PtGztTN@66I(wSPp?mLO?-*LFRpGyc
zO(-GX9$u|s^ml!v(u3a!A`8w?Rvs+~eAS6+xoHR1pg?Cu(+H&6sZVW_9MZft+Z!0`
zPmF`rfc^pJLpyH+!-+luVBOMlknulEp)l+tai!ONMHew8k=c)o4S*HRbcZL|`A)PF
zANRY(5D9~-^cGpCeeBQZ8K^k5O#tyLg^VaL=Te5n1bPs`>f95yc-i5q;OQr{BeNS#
zvn5fq6hq?)hAqtO*+qqqmi?jP>ffUV$@6T
zELvd1%YKYSME|U`UnL-n>NQB9&`}$w>tF{RN~SI_j)6y}8*cO_Np2thvT)(XQ=EP_
zFT7d3(n0ODV(=gi_4A)hXGA91%=8<6))|$@O0p1y06zVuKVFqK6%kt(4D;uEUM;;q
zpAI7jnlzH<*hE&V$Il!IW8w@rB_zKQV-Q}4&pxYzEon~qjL)A{s#X=SeD16miy^VE
zjSqWQ6f=)wV>!8Ec@_9Ps+28pLiUzr&WU_KG>KWjp0L}xQV{V58LQ{2)&CDx&T((+vh@g~5YHQCNwgZ!;#-!r)7Ay$-MMC$irH(+b
zEYpsugR0>YWW1^c=S6hP*K|77vrOByjsmEEIim*@E|#g9C-jJI94iyN3!}9@*Ab&K
zH*ZnT&6O8RfBX9Kw3R=R6VX0RyOnJ;c)wDD0vco|{q!7}5j8krNry|%C?>5r5zuA+28|c7CRy1%rMnWLNLqFj7AgNCXYf8=@d})
zH0r6(4h#xOz(;85ME;^~i(^pE
zWd3?Czg`Syi^Y|#UqEv2o!7XxBsxe(AUNkFj{6>C>Di~0
ztoQE`({p5coy>zh@dBynylb
znP=berY<`>&}#s^Q^DohyISxmV{z>JoUhy)b!Bh^A1nmGzi>yO_
zZ={E1CiiB-vW@uxthlp0YO!r8Pg9-?xoN1B
z&AtX~41{jew#y^rRc-I{J6eIpgy4E26LM+6({nXnAxu7JL)ZMpGUs3FJy4~IUsfQa
z+h5j)-|c%J(`Ilmy_Ztv!-U=Ot(+kd%byE6mS@P;3ru#mFQ)?T?U)bpfOK5LH?jl+
zYn+E5A)IT_l6wK6EH)sk@wbN;`)g+AT}jL30r1yJ69Sf{oGCw10vczc$`R_VJsq5D
ztdMJu`1G|;lQpG9hu{a!*yTxz>6Bi#jY;PV{Ccvxm`#0aOU+{xXSJ7pY&o&@Kp*W6
zoJsML`Ij&ET&gB)D+;q}fI{<;m|0^D)-e7Wxa?bc?fYKsR936IBuwLnz}z^szy4Ok
zd1Whnywq6gbITu>-|X8*M#|C~1o}#>{RtNu>G)nNZ+nd`Lh$3|>VMzI@p(swc`q;7
z`6uarmAGxg^TA<;(y=AHLs1vGOC=LGA~z+27%1m(mm|3x4gjY+0yc3McQFW82;K39
zLPJAWXe1@OJs)#O>VS*0SrlFjk*&hygZ}!@Ir%;g{7+2PY_)MYW?JE%y0Vw*}fX{F1;`a)$a0AAf)=r3JBQEIqsvtN%KSDUZ%cieKVwOAK0=Ss*
zJKGAfrNlsochmjsLW*))P{uAbLq2Q}#Y=ZK6I3SAu2|A)j(M;wddl93z1B4fFaud1r@7$d`aa8wcQg#Hk;v
zKU!Lr1qu6K@n9V%n`R~KIiEkzNmd#{+fghS2;(n@grDFj3(ey?ncbrQe8)eN85T@I
zCfT3Hp8mW&?RnkUx#+0IGk$wEG!+vg`0^!OS>}eC2xBgCIygay{H*tRz^lO6k8eaz
zU%dmYGm@Z*bngAOsni>EqH*9u(JB!sh@A13qdRb%bbIEj+T|CWMMez>ws0e}x~!$97UrqU*?2(7
zpY1qi(Iy?(PI3uW;+^0scB|R2h!2%4K}oCaS$*Dx>6U$T<|!S=ae=VprN>E2XD6N}
z#CxhfE^-9`o5Nwh!*xTblGU;OQhKeryX(YGCP_JEKk)w^0qS#ty&ls80Rg;#fk^(J
zd)mX<$~75A83$mecqoDNHCw;VKyxj<9``5$m#)W_$ril4BMS7tW(jBq~Utn@9-Vr3aII
z#s3&_msIe2K~_~j;Tcn%sw9(#>ol>0IW;u-^r?c}rzTdCs{a}c}axJD=s7b9*4kBLK
zezxA5(Es0g?1A`831gY0JDK71)>*GQQ|Y5LtjD@P1Ru-Lo=*;dd525EI|FKe*&|
z)hXUbj78Ll)5;cFaRQaGeCdR$Z1ta9xL(!zOLw2N=+9eRRJX=&AM}Nun7*$tB*U{P|AeYYoa%!}&ecWL9+1#PZ9(JCVj{R;+zX{L
zXRR_j#Rh*Pw*`_wg8>&h5+Uvi0O{5w8V8R8q9aflC?c=tYhC?-E1JjK>PB1921NMn
z=S48ZTO~IYR^NLCmVxbtxWABvhVS%kVZ-$pB@Pu@YKU2M%~Dn-R8>LQ(3~}7;UeUvO
zKN~4sXDpV&Q-TEybjPM%_uKz^0^q)bTs^iE0A*1siP~6rEox;B&Ni=eBu;{qwfIvI
zP}e^NTXlKWP4OGIw<{l4it|u@8bN;PEBVMSeOY7fQvk9(aWVel`Z5&?qvMElMvk(rGgc^hq8VkHQGVEG*D6MVc`+$?DSHR>UxJlE
zh=mdhQ}TUf)v?*nK>9tq*c_Lex)4+|yzg#jo%#;*xKFVjaM`JP$s8M_?OvR=06WP5
zjFTX8_APdJ0yUP{es;4WK;G6RdqRoRu2%8xtjQwB!X>+CHy3m4iFmc`@bxoJg?i4F
zfPw>RuK{aIbaHYQm(!LbD^7*8Aka3m>l)^b#k2k+xSnPV9Q-GwAE5-o#`=k8#G`z^VDXGOvp0$)4pRi67(h^OV9Jioe~FF)mC
zOKJ9!kj3_kBI@_^)`q45IUi6+;oa60Z&H`;xIEbi$?xeKPSA&O`K2BW=%u-^R9iLmDk5_#j8_sp0Bc8jtw>h2X*7FTMv??TqueuwS+KyIPhY_MwLlI($GPEJ(kqU5+5OKo
zW67lwG4EE5h;6!4#PUul^3LSce9E%uTY^>n-Z{nF17N9d6BK<0!ubXkpQy-0mw)`d
z6;*FV9+^>sEhw>3in}JjrsSdF$SN}1f1TJ*$RO67N9s<1LyvgFS4H6$&7b3#W5PjF>A
zRIXtjqH%&{Uamob-J2Szc=D#2>b~bf?T*7vb3b|ZuHgDf&=vQp=qJ+B4^V;T%Ewhz
zHMs)+w6M-S?IF6TM!wi6P{aic4JZiOGfK_{xWhLTtAkT@pXh=h^q>+68qn|Vwgx}f
z7J^K}1U&Xscf-hnRH#XH0y~QeN!PEmbkKoQHy(-PdKRZGUtaKUb3^PV)d80j`Opw0
zfHZ5&m4Xz?o!m}T7)`OvtWF6K>u3`MHO4
zC3~BJ*o6zpjG2cMM2y<**M^7Me6>XvloP4}Z@lUky_AMJxw+wCllo-ZMfw?j7zlPw
zmNE+Iuwcscg14{8jh|(~l3FKdu9ITvhp0X~W!|;&JB?nAgYFf7f+T-v^MmJI9=iA?
z^XqT}&|=U;T$>?*BfIg_JfeWASIc30_=Sn$|W|Xrsn9aqL`CZk*uekL~Lx3x*#H$l-7Q}ba
zt@2_;U43>}Lmi*0&K#G4fymyi7N0t1eV}eK>Y+uE6rvy>dG%kVdSb8^Rf}#wK{5UoKSe`1+lMq+O_vN1)h&J}uTRfl?Bs$3%lL6w6P=!lyzro-X@^Ig
zW7KFaW}P4hmNyXlY;X$QFZ-?I}=}-D49a4pvRap&TEWuu$b#}Jd=a)lfCZPhV
zr;t1L=Z0DL-8
z0J$Aih24B~uqi#r+PtsLW5VXipI1T%>Tu`ZqIgj0LmWyw*eh#A9b2aNifCQqj?U+D
zmB4M5g9@JmBr+~0!AHVXYFQtGHR;7?IR?$H#fyMKbu2d;@Zv?5sa%MrtVUy!5KXKM
zbNnlb3doG>P=$)FAPxByqJspWM_uLQ5OJVf0J*M2-rx(<;0@DSh>!l1k9^yRdn|%L
z`kX-c@~pm_^7f9*)s1(2^&l|IK)Ak;XXUsiR>?~Gnl0y
zqPFsITG3}ua3%jOc-yaS4m;nOs!VW+Ke4lGZ
z#GsCi@vK~@bDxezRGNAsSmg48hvIB%A|*&kN@FQ3Y5_{fls#1SPY`1aKqMd7sYcz%
zay&Y>Lg$A%=L-AZ&+96<$WYq8UOp+Dx5)N&DKPI9}x>MMcZKz<0z{bS@-WtyS
zsCA$DS*zGhCo}LMCK1y1_XnKRW)vaI@>-GDmj@hbdKv7gxD}`YVX{)zps$L;^L=1~
z>a1K|6gEIRo{+9Zs3@Q8tbq$DK31e2RRaQPAdiiGNn2O|QK||`Y6dXEqI5MJRbeSw
z2^X~>Ymu;A5pYadoDnx5D4^4hj#c@A8ooX-m0`Jt%z*9APluYGHDO
z&<_%AY#!eVr73Yp0Y%=-)aro6J~{Y!7_bcd489OuLd8DZRj7zWU@?F}QZ*ihW>G7?;)E;-$Y_bFlFH$#i?qgw6AS$~`2Z~u6Bdc1!P9~Zo-&S7%vd8TY6(r9
z=oDl?$1WEm0{Sef|F$cHcj494#{e+BfJIaUBi1W!I>kc*kZ>@rUyegIq3j?Mw0al8
z?snoTdPN!a+i^O%#30d?b(iV5ynvU=eh+GWg6Ew67Yw#fc=`lXQv|C(p_IiD)q
z_Hb!j6PAabTTbZLS@{{o|HGcU9AEW_H&g5nucNwb+HI(^+RRI3-%5MFpaDR68_BX<
z&NegC>2cz3#r^havyTFTcoIGvwzp`o(SNsIY0fr$
zt!_8dPi>uh_+=n~xO4WZ&{$+3zLwZuz2;_WpU^{1=-2zWf139k>~O9O=RYU_!DuNy
z6V+C0(5KylxaeJzF1oQ?0}V);Kt|b!1MPgQ#P;~YRNn&|m-^If&%2R@8Bty05AlPS
zzJA%IUw1gF1u(SHgP)o)1_$-{u2bKRd1GGxS@0MgYc`*W>J_(JKrh+>4tqL4QiMPt(Epxne?DS<_6vP)3QtBXnXd+e(_F
z)X)?a6I#?sqICODjzL407@e$xO5oNUt4<^R2{;9=w#wJFYMgW(UGq$f*ZXC+jaBywzq1T`-(a~k0j&&FVG@C2_VNpv
z=y4gijXy7{%sSh48&LY%Ti5l{uVBd7r*i>6?hJqdE;bj{mx;U|(SU9$qKfC+$T~eG
zjre;64nRr~o=in6%Ypug4G$ZZa9-su=1WRvN2iO-Kmu<7@-jv}9c};Obuf9TxI=(^Y7l
z&bFuY*;ztZm&OV>_K7|bH~}6YXijYe>q^5jYb(Am5))L5zcS(D1j!LT4Z$D)nwJI`
z`I_3$%8g~Da9E47;Lztx6xQUe@ztq(dUe`}N8r9GOvZ5DpF9p^Lq^YQ6rE_eT)DY;
z0WOv?42I~~cQRaQlY+n8O>u}agx?XK!gbCKDXCrbEfF{MxzB0=xpKg13^4n=)BylU
z@Vi^|eKDAQM15dy0*RT{g?<>*&=k;$k5t(mNCh{-?ZTVjk1j#w9Z3GQUZ=+SNU1{f
z#J>MZ-2WeqTf$emMOOfj#b(kx0;?DS=_yvh64@=`V)EO)MPmRk&Oo?1;NF-n7SI-c
zMcnzqe@c+=lgxqHP%$O0lOmopM-xGy`uyjgnDcg_iwijWsEWbBHBOOrUaGcR7J272
zG{izAds8S?>rO`)l-7?tyU9RSR88#NX|nT=-DEp)Ya0h@VZ0AeIupf0t!Z;vCZHF2
z4G~^22s6h3e)(2o=U@DHQK6BuF61maI0I4GWqu7KlXr>d_+oe@
z&J8i<|M|Y|xlE%Wp*Zh_Of$)I2xNeS
z;8%}KSGG(CEw*drG&HGHRbW}UOP2G-VVx|IR-s9ro1V87^IrKC7@wuPTl7hP{7!L1gw7
z>N3|sMgstwNJ6cpe2GFdL(kx}AdYK5pCjxTGY8PZri!UvUnkzWMPjEdd<)Qnui!MTz-8SI=+IUy8Esa|)|4pUz?YPqh3rr{ycR|rby)1diAoakrsS0x~
zY;d}MTcpx9^G!aoP@}4LmbVqq{z_<5={ED}$mj%2b52V*m>#?@mqKrJ3eF;3zd^XLC{Xq%3ZFB)r>iZrn<4P8wf?`KSG~
zP9?-M4s=7vyS<42uCMxZ@Q05;b
zdD8}kc2$!};7K{@C5ErBXE1s+CFC@c3;YXz0@sDHj$~RkWKA%muRb{^X
z0-Wt1Rk!ugbyWS?-ru^)_GP=A;@o&u`YtY-@ahP``ir`j}UIo^t~Y
z;8ar%Us5_R7IFLUeN-NzeidQ*TTjT*^{ZO{I%s8>#%jh*qaL=#)fpYOx^)BrUBLp1
z@%N*90#%65jax0_g}ex(5|=AaAJ!GE-gfuo1BBf#FMbTR$cC3oU!v?s(K1ZBdLC{|
zYn&%1qJEYapEKiK*3M#;p&JSk-x@COAR)o*uI4rQ)uL%3c_K|@D4r~}F2vVc0&@a9
z;6@WfI_ZA8zMZI`2}Y7|_PECYve@V98McR_o_AN<@LNI}=WjE7`Ls37+ts>EVWU%J
zy8XXyn$L4UA+%HVl;z9ULsj|yfcxgOqFCFAw^7Xxsu0zaGMDn?$J*xV&$ps~zXWS^?$w7gz&^K*$aWT#&GW5Al*n@w1+##K(bnWvAL&{Mtr<>O8xI0IKAW
z90*80;LZTU8Z(<5fHxNr0(1_=Jagi^pLe=O4JMhf(=MomEo5=2aJ|_*DTb)_<^i!~
z%f1*Awz%$qWR*a_xVeWu3beSVE5sp7?H(ao9&n>7O5+muhyo^sdmgd7R3pcRadWiD
z>ITZg97ZJA-}p(e;^HJgRtwLZK10ZU4Ks&h08t{!v8gO&Ue1N!Qy6HY2e;mCwuD2b
zkUS0ynI|yQ^pU7xsY>o@Qs^6|dF+}Bv=)>YPctLa4q5?M6CBUJ?}Q79mt3A1G3nvF
zSRWADxIzo(NRs#JWLC>$UI6?`3LA~Tm8zJD66=DQ6~vDgPHeJ33^%J*^|N(YBa4WzLPwDfPA>w;?fpOrMHsGQpdvP6)qI$z
zp|I!vB2o=J2T~o8@#sG%I%?G@6e?b2=irk7XmAh{(vH9wA&+_709@F>))Oqiuq-sH
zT06Y*5I+^Sq$rfD;7sLUIihhinDdzkoJnCP3!D!M)-pbnY7VR@xt-#W8#
zjSf0m+l0NBbu7BAFJXBOX=*>5XvGoC0SWZcy|2y|2*g0&>EsvL2#^54)69AeO>Xwf
zt*G9zX=
zt1%=o5f}IRR{cUrrD+rx+!WK!-iYw*XTSQy0CkF
z68T2pW_^OLP&H=dTT`2Gt#vIsMj1*+lCT&Fz18|=)eMR-YF&!)cs-$>L|Mco#66H`
z6p~wO6Buv$yJCO}o205xPr9oFpl!-GUUOx*`xJ&t*lsb-0FVd(ANkd%;2X}~r5{Ax
zcV*80A{19KEVCMe%Wp@wkR3rrn>AP2VW7%~v%jIf;EYF+0uOPdrbieiLEh|&?c
z0VaJTAegZm>hCQVwK4u*uGaLzhlZM}!?`_ff!C;?zaFtON*Zgr5i26G1pLFU-VsX4
zKlLjdLgZ7@tOKj3hlYA3>85}!c*erf?L07TAKiDQ5mAUfnHjn+P
zJ)?xcx*
zIv#*_6bGe>ecN>Uzr^1}?`g)?M_41PHmS1+C7JxZRAe8iUnMAxbdosUT)H%hTk@XM`TUM3?C`@cJlJg)SM7kx5ahU6ZHL6i*
ze(-a@+W5h)-R*G_a*H|L`o2$;z(f|0;DQDko!5IQO)9apPUpSgx;}`jGPPNZxgft#
z{-YW~kXM?{J?Za%&U>!No*R>0^H=sQC|%YB?4aP)5*jzox?1}LCq7y{20d9-?6(@#
zH%aPw>tTx*JphbX?engWuOPO%?__zfmCbxg+4#H~5y27eRf2e}0*zSg`!E_Xv;`mj
zC8h^(7Z(6c%-ppj);$oDHkauBURpe3H*qfVR>sQt@MV}h$!jhO&oh~{N>DUt>#LeWzGe|k|jEMwRp#w_v7h6=!S=O
zuh*HBC14ijvX*h}T?pu3*}g`VHnfD&%wM)X2$yLE{lgmdI;@O+T~2ZXh$4^PZ}zlD
zZseTdpZ+mJ;ZVFMYqYR2&>aEPP}^l`X7NisiC0ry{{$CyZss#yG5~kJ=|irI^ZT2@
zWz%nH-IbqaXH_nO|L%2peDBoYO8^kkKL`1bACME~RD*i-u;!p9_&<0EZQ=jilDwxiHqosuFgLXEM7CaT|o7bz`x5rll
zRq5HyFc~%I&9+l5BWGuKScE_A)S6JN`;>foPNM<`qz`9KwYuj21NwfofD#xx$H3t{k&HW%9JpLnBjzFCEwVBwe0PCa_LsHe0Yx
zrth5w1#iY_y!vJ`cgeaoV7XvKvN${7jDnzR;8jt8c&s150_n)(Mj#&@iTU(d?50wg
zl^HLQ?ISASkSUh_;$G@q;uG0LFFNEo#@zH0$k2&Y;gE^HR&lYVaE(-3<-Z2INp1Ji
z$Q)?PhWoY(c?N!oI|8=n602NZX*=_pS!hvn=+S4M@q5uZf<;^oFiWaxBX_S_j=dw!
zQ)CX$T3tW7LU;m5V_agJG>6UpPG^`GhcX>Gf7uaYpb&tJeH!XkFUez=BQw4AVZT7_
zc5!dUFE#zB>oL5Kl>b~sOo(O_)?g`@=xvQ!kz;(*v7)4zusJ}Ep|1_?Bm(|5ipn|d
z4}o?pzZ(AeL`tR)K!T>@uk?m`LM#!*IaflKw?CeokQd%v9D7Uu19be?9z1iLt;Y9}
zSdO%8w5Od;(GS65i59)SG;5~Yxr=ll1EeWxS@rVk4XGsGX9Vr;+p%T8Ga5l_o8}tx
zzk=%YCa#ThFeluA45asOQ?C4~rlb@G;&-)lca2WueS4mRDVf
zh%6edzM^@0>v8Sx(`TTMvFi!qXnerqr954bHnJS?2Q(Aa%5ep
zD}T9eaR4wKHr;t&M08Uy6KSqrW}blLl?CG0x5Pv;76$w^YqO7$BoKnnV_Hu3{Bv4u
zT@A}1*4)H9Z!Yy$>bEG`fc-O!@6Gj7R9vgT6f8>tZnk=b^yr!dT$wg=FzrG83UGne^p&PilbBM>Q#50UzBPataX>|
z(p_d-d#P$B&hXHy*v-CKLfp30VUCF)5zLUH-bg|{xnklLK#LD9N(;85>?D9kqWT-3ncUuVu_3mCxXBy
z=hGs^0cH{8-;5-o!#s1`Yc-sfdmKn8J#o?uhgb8gFp#)5ub;-_2kb~MU?YtbjCf@?
z7DR;0PQ9sh8U&PI)N2e@(-esQwEKAI!v+Y60T!Yi-y|)Hn0wllwjb9%LjztHdl}bk^i2@i1
zgq^Q2XHQ5>jij@RjA@+8-oFRCf-nt2$oIff3TR@ZxK>G#28Fn6aq_mzYRx7v+B!X!
zje9iHo55dFSH1QA%C&4!4EPD`GW;fAvD~Q<9ci%rqBhp@TQms?1&f?ppG^SpZdG*j
z77O|<00&|^bj!AbW
zquavy@A}gy(@un8`Jr^qmgk4Gp9c-?yCfQ^bJ;-mj783g|6(jDBPLTb-@6>~m6HT%
z@fC|?PkJSmBuvJDI!37$%#x_~ZKH%8KvE)DLj6U1ANQ^wm<$5$p;XgKMZ-B0_N4#cP@x4yaHqd9#|k-TMcZeB%=UKnbD%RnJ6H
zdoKU%ZaT$_&HwK58Ls+T$0sKilSl*tj-VfjgJkH?(omR~OhKQ7ioZ=mnhmbT8f=UYF
zCRhcYhl2dc96>FKK&Yhx2!SpJ>Yf8xR}goaYRe0=YB&691F!Lgrx%uGoaxzPhJSzhm#7OmjP$=#>$x={2eMajnsVx$Vwnj_&y!5_XSGq
zBISreCb70=5d#>G7`wuRr;Jo*oB#D8f&+!%$8bxGQ-Pizfz7)N_nF>6VaH)Zd(0bQ
z9RT8%6wAO0$o1X*v|-#97!X2n0N}u|6sriYA+fT(NrumIFyl9D>=Y%x9l3ktac=IE
zc+B;8Uz+T3>I!ZH+QV0Fd^vvEpXQ&Ri>4EG&;7>m1@~UA^8cu|f%^jK?8>(P1qwsC
znC;h{wie#j`|b2TSlqaY#?7zY;zaM^_q(d^KyJgz#j-QVOLGN0a_?P}O;p)i(col(
zlA8EX3|w=#Q<8Wz^N6_%JC2|#rZ(w~XOHeQT9XK8jAVfgur(#VbVZ=Xmg9$aP;@Rp
z@8s`kzmXVk#t)k`?j_ajnO>&dX0CSfF8K
zxkXzf1YB?CS!-OlxYp9wQ2NLSx=etI98MeoA|zSopx|w}AP{Tjz%!HD*%5B#S+|`4
z)wUnsaf$SX^Ev?6XXoSmJ0@4Z-rVJfdvo$^`Q+JFy>ew?^8EY8Upf8@=zkkz`_oIw
zu#<@Z;(&IpMjP0(O;`BUBG6TPtgt~tvduqqi8RuqFI``+_|#jCs?u$E9#YB!wIjT{
z{IA>D5PQAe-w(rt{d$E`l#KuuCPB-*5j}XP61Sf+x3N+0D8H)cTFf=bldP;F&^y
zxxc0n)Z><)^{A%bTIhaR=A63TS{ZS*Wg5j!2Y^U5BSJ6;Wb#4>a!BZv|CnE+E{JEJ
z);mWgbo-X+SeKS!Vu%foCp~2tTDc$C3;UXUK%&JsC4_b=h_3w{zb)C&KUKkhw-s
zrl?A{Bw&FD*~~MIXg+F^Fa=$gZS{1snfpdPk$_XnpKijY3`s3|Mx+{ysEMasq;4ga
z@E;N*WG?32?-Vj;v!1uq34FcWzmIFyrRq~_1e#V#$UtP15E>Y??|nU;w0>{~u--k0
zeoaOWPU-@)cOWMh1dsxdA`Ahzg=YFWbanZYFSfOI579`?nJY&W7k3}myw{5)g<&mF+*08M&-xi7Ubf?Dp
zZgy7y29zMF4?hq!y4AD%)O96G;4_}j6zI=?u0{EX%ESB~xNpfKfh2$yC5NAB6Vq?h
zfl1VXM(F(-6rg(a1tHc!{A6oK9!hX+^Ox6;^N11p)HrH9&&*5PXBn1{ZD}TwBxFis
zBPs3_j7;U^TRKYOM!{@^tj*wRo4rLZD3fItud8423nHdUH3DXGDr~bI@zNj7XR`
zk@p!gI7QjuB)-7kt)pO%l_H$Ur-59+7|BFIm;iX_vN0mhNEDD&Ty+L-`f1{%5@t)b
z05BjM`=>=ypX1H8?JV6g7NHA5C2+l{bUH97kVG)US@cp$k<@XbFIm9u2whUakmOvsGO&l^wh)PIn%(sGU$w5qek
zg|du0B6xq2erQf-L=z_G`Ip5)3vB^%(iA`L`ODL#Sj6
zJ%xR5BG%_f9|Oj?;2RNL!D-BxYgOOnB>^+4XH!aloMv4xr8Ze#t|ms17w;KeFN?bcttwAYCFfr#d+?Z$uTE7^Geig{^S
zgzTyVIG6G52)=L<4XS;h0gO6!E*4(XQ@U!`5!ZWN7gcqxh*RTMaNR0;IfT%|U!%%3
z4O!mfi>@0yX=Squwmdj;lB&!6DnAu)XNaXQSu5EW`ul{e?q6NQo}4P-_6e~$2F9BH
zvju&N{{25h5RDXQ)lEA0S9itcF0(4r$Bt}z7vTElWTy~p02}U%LA%p#uVuPFx2__-
zM9XYMV{Nzb2$j?AUQ@|ttKxD9ZHeABw}@?rS{ulElV3#JIP8z9R%5I9WtVobZT>Ly
zfGN+F#&eNdZ0ivGj3_UKp0_)MfDtbIzC?F0ncwHGtxY8W1{0-$_nYfQ{zKbs?H@#8
z^L4-Zzk=Igp1%#Jl7;Zoj8ixj_;XYTq|el(4dw9_DSxOyv(#%TLaR?1bic>Jc*tPh
zCODcvf7!hRt^^JwyK+#
zR1xvNk0|9CNlc_Db&4f7Rvd|Tc)|FH
zEa@gooMB$%$BGlmhC|m^=q7*lhiF6C4x-}8Bm9>(vWTdJ(Z7QCv2cFv9jG*kz_is@
z#e7zX2wSfNYum|aCTRc@b2wT)-Z@YX&fp_uu?f*H7jdEt8bh|rEUhc(0$6mO>x(EY<)L_f`nuoSV6>;UzZfiq7wtiEy97^&
z64c5zJwY2X!0Z9edNhv||JC_3|ChQQcJoX_{j*21Jc(axXk$Z>-0G)C@_OBk-s$OkHgKhig+MaTYP)z9*sBPvEONxQ7u|0zxns7X5#+Xy+*4v!FWuC
zDm#re;h&S&?51ya^6Az}f8VPV+UR%jRw%xXvkLgsNtxDtUZvDg^WLp#eVf6X&D*X^
zn1P!}`!R=@ho`GlP2V+-SH*S!2hj(hTvh&EV4YFd*MpwW@4((hqf~ifBj3Al+*eiI=KKbzY
zhHbN76^;*|?i&uG-l0xUm4*50au>oht(cn5@$XjIb+22=-S^(hhGO7>fvPY1o4;nm
zMI~xk>E(Z{PaT5y#PzvYPhveuQwURC-8p+dpR2f
zS2mvKI|;bbw}xV1GKCSUDVvYVO1LMMpF2mkmhu^eD_1DQWO0p>nlu?cZ>FL!W(+aV
zS?wI9B+E!sjB=F7vSX@bQbTJYWy+-~#^e=>oRN+5@AsFI5lI05_dAto-1)Lsg@`90dAu=*zbJpvwX~%
z(7;;ll}k(hb7GAltBhr&@uz{j1f@yTw97V;bF?kHT4)VMN_V34MeH!mCt3+NLs6P&Wqg;*rSW01cH>gc>
znm>pI@(EXIJPDfcZy;kAI_7nL}CWETcMtUc528A8ytVUp!-V9GlzejxdL(7s046
zpADmFlR;-^YHq+6Q>v|Ejy;>1H7&yyFol$|)x)FluC$Wv;*728PM%Rh
zGKdZ3^H=ltSQod4sis@#65V);cpy;&^Wl@C5)}XT?5J22Sa;1oQiMoXfZG>xfpnWHa
ztuFi4En-dyVLq+e4Ebyn=*NW>nOL|e7WBTfxHDX6o9_jry?0KfkxxH0eR~FjLj-Sa
z6;EOX&}2H}&vsP^&B)pzMuVGY^W-cQ8VtmvRvW9iuPYB>($lJ1#%>siQl-V4Vw9q7
zGltFR*Pu&EGZ9FV&gB$o&IcCpnNbl7HPj?=R`Gf6sNqp2Wd!hBCBdK%`S~vXvB*Ra
zj!;2^S_0Q@a0C7~(G@<^=CSknIQkH_$9p3HIK)j4{QkfDy#9lqMDxr0sp^-V`_)o7
zt-T;Bs07Cd9`#gkuZ$Mjuh~qZNz6vN%xU?4(^E`Q5gU;-J`cu(RM#2q!R4VM8gAu9
zw}(}OLpv0^>Qz2~!qA%YxLsIl-)*S{W`qwe)E>cH=llNrSmH_Uev|5e#2e-hK-KrU
zYq0qDzKI<@$OAf*?|G8SfuW<5$wcE^_hBs=Ra-XXzy3}Dg&xC$=RJ>EGdF|dPVhJ(
zhr3Ro>3KvTqHK4ST7bHh_cRf2JQ6519IFB0j~Q|s5k(mvZHOfrx(Hk(-4!}7C{B3p
zG1JjNl9P0dZYqJxZ5EE=GK)Y4_$*vUmLOSAvD2l_WfMc+x}?g&RQ(`W1b5@g
zdeK4|-DeJ~p1mif$NlGT1vly$)v1+?U2UY_!KjldVU#201jp#r3Wvv;(_5aJnVc$x
zQf8De;*|rbIJCHQwuiyW;=#!B5L_%dNUJF>lqMl{?0T6#w(ZrGN56OsFuxxuDG+x3
zX4}^)3Y9QSw^2v-Jmci`IJ&8pun9N=7dm7RChHt*wuZuxWEN#~!2c!Oh+wqD=xOwE
zE6v^7E7FD-fx7OZy-26)R8vpDR%xZ8@;6n(qdl?8K>#miB}RG%n-to??*ZSR=T+L$
z&K}wr?Tg(cK|54JbI#@vpj`5=jOMe6XqjU#l&8J*M7QJEko85PN_R2WlA%I|a(zmjSyE|vB
z$q;CCKzH0B@ek>^i!r)U^|uv5RtJri#N~&*87!Z*zH9P)5(Y4H@>voyVDO;Y&wvBI
zax+bpO||V9`0Oa`a=bI$J2G(pt9~Iz_N$YRkoGaX(H^>ztaDU2{$FwD+hqryCFpV@
zEBi13<5xq^xEC|N&J2FO04=M<&dAhYp{BSOHX1ek;6eB`G}vP5%bJONCHnrBYxYu{
z8@9GBq|aGO?FiK|+t6M;;CJ0arM+E_)#TjH!sM0p^hPa_;2BpTkh|_)D~@ZN7$kwy
zsA>zy{N!w{^yrt*^5Gs|ODh$-<6ov-4e1A&mSPGzltqAISuvd6K(DstTQ>Jjs@e
zncm()-S7S4;)WOf%4j-Gqsta5o`S>QUBBuk^VbbV<=ZFzMQF%RDW4B(N$i0s7f~jcn)Hp{Pu?~MWgyOJh*jTCH-Ms{8h&k~1EPWPm
z{V%xiFi%=iV_<}INd&{&0M%sdG9VDLra>6OLSp^QrqWbhU_eAxk|7GUr^3MLEqEL?
zzpF`vZ=el%qXH$q;E#%8tctHtEr2kmbp(TR*!LX(tH!F%$1@}rDjB8L0b??jOzS&h(C79lUP
z`1BcW+8*PiKU
z@oHW?b<$tSC+WO^`dGg#3wQTiFLQCVw_2OVsOrh;ipP@@R>{|7+M{$cKL;-dJ%h4j5OyT02}&0&cu*ii`;tvjq@|xZZ=f0FRye78Bj6|
zqb}7|kZOV?5c~+U4LmI@Z#VNo5uQ$LXVU6nOa8vwquuruCLPU^%~-
zD8^6keMx&GoR_5D{@!$OwId$J6JO+tph*_jg;!Ss9#huCvzHk%Vmo7<-x_fbv`^nE
zNw;q!o@_FNrEhqa-v77gLv#z6aUS*Kaq)HZ}j(C)*l=k@Y>%=
z?;Vc-;M;WPA}(`XWsjUpDwmaAs4;85Mx40-I41>67gmK!ML)c-6l{{ud>B}Yl?u5x0ea6-XrFhu=x=uM
zWkQ|~9=@zJbxoVs0V~KcR_TxLE2(V`jKVRqkY}L16y~jN?_uqY?Nl2f4|eKqW-B}Z
z+t{%q369R`WLA%Cz27%792SOsD?iTJh-XNEh5)_^e)S#F(38OWgTon6oA#tH`@J{*
zYU|od6N*Muhm)^|ibho)qexXepT5eN&Dhl)j0M;BOY&QeKCZL3ljQvg(^o&=kf63}f>k>YuwK
zF7@B{`i)}u^0BNF0n~Dgvudy3`7XWiGYI;lV>yvYT&OfI+%t@hhdy=x&ubfU
zsTe3Fx5m-MDnu2enRk-3nF`|zDR7cxM&|lcC-=b-EfB^vqmij8L#(Y5lW(ovH@{ZR
zwmqUZI>^=B?}+^4Dlhnfy|tTZyoMDt)_ZUo-Rl8b06XqvJ%KUtbhUu1%bQ@mr_`p|
z&x@(i+--HY-&(5{Jysm4;6hVCwkl5|Hs|lK0SGn~6+axRSR1N(aMAfC71p089`25`
zG<|oQiwA&LP8krF^a~g4o5p5Cp$cpP7j*=ztQTcBL?@8ys?digrACWRcC!=ci#6A-
zpbPM53A=rA(N?A#3|UGae@R);({1@SfiG$`#iGjKG
zYaeVbhCzHB-Mc!tdBoXovo6R^ppD@ruOB{#^MzD@az~hwVQJidegVegviQ3-h^z!i
z8%Zs|ZG>+DB_`RiLSdt(S?u)AKe0OtbmwP))->veSD;V8611ZWQ
zl;}f%C7
z-$y%UmlLjH9qBT=T1
z6qIagczl=Qa`mrO)iq;;9ctN62>@^cvgmXSUI~&c6)|hFq+(EUz7%W$G^j06OF|MA
zas*|sL7~nqp;(r`L6^aSgRG659|$!Ag0j~g7d1m%^*`pqe|es7yJ9#cny!h`8MC(mbNj;q6rr4^n!hrM;R95v6
zMfy8rZPw68K^3rM;KZcclpbZ5N+!{M=rrA;kn`D=Bxz+*ve1u7lHy23%9uLdLEU-7
z+#S&WRx#>1;EBQcooI+gpQ2iQUNvl7D(ppagv^ylRYg3IAC`hZ-|gO!Lg`;$F3VF?
zm3x(^l$*ql{(L*H?*I=61dp!IkJ>Mvr!R94PF-1m?EQMf&nBI5`WCyMk`bI5H9VQ%
zz0PBC{pi}rJd3tHuFwbG^*vOZ=uZl|8z1wjnw&)IEh)8Qh+wvsK9EVZiR!01y$Jvi`i=*g<^39|MEv
zSZh6brcYqEz%Y$d^DvjB=TjeBMSb?bBbC@-n32{6H&3CUAh3Z;G=r7jYxVk&e-|c{
zJ8F=RFkg`D0C3d0ra~zs58JBVKbNOv@{zBKkUxpB=WDEnepwc~X7a!^AF1%CO@3gt
zSF_f>{F%nDnjcOe-#4d>vz1DSt*$f4GK-(Wr>qnwAMWWFD2rigz|Xm!>UgZDVD2RHuZI;79>^RlMuAt}AkG9y!j4}kFlvN>Lo=GF
zgerhm1fv7>lBQhnOw-hCfAeOqcwI+=U$U-Hcq~?wb-9Hspfz=1U8ffg7fnn=Y7p5Q
zyA-B&2NV*9xf*I^GTm;dYU(ytIID}?zQs!V{ewr{%KDav(?~2^-s+yAE@36r*9hf2
zc2y&dGO`XmIYBNDjGz%;F@>_7Lkm%HN9SLBkPSElO9IQtzk3MbCfAUl+!{0Q}ao{#7soAA}SlQSx`}8WXKGU6DlsX6G>pz
z08yCttZ2p|YaCoP##s|$Tvy8+>JLn0KO;(bD>DunOonXBC>dAMJ7+}gD-p45PNOww
z2kc^Y6D{o%7@R*xp1i&ZGZ^kmezE!b6)d!{*NnccmOeg|+NfC5U7wFM2Jb2OyS9bPx~aB$pVOf^5;bbfbX7W9@b#eK2aQaG~mCvKD#{rzJ8kmG^o`A
z{9qKZociLHEpYqLTwuRr8aaJUVEx^%1t5U5>B}X2f@y9+5SFd-c_~Z@?(gF)PFS0SI#
z`1txeqa726^U)r&yMT{JvM9HKprXiE!HWZtnn0yc(E{jrzUJ%8s5#)h5U!_E4*$TDzn?K
z5Y)()1OyoH)c{5}{aS;FIn1Xn{bz3x@mKVPY_w`tEgJ#V38AF=ilT=wWOUTRHiJ-*x$?@CL@%NLucL)Ekqts^(PV>Jf
z(LqHd2PA-t;&E9~95m+Yy|^gQ_LRZQzDzFfEiu*&zK}W^y|lXL1NtpLYAHr&G11mN
z=w4zh&9z`oNtr41LF}A<@LLtC1_Q2!sJ(o=6Eoa6Nq_Criekt&9
zWr%>s7Zvh{rcu~W+bHgxxr;-C=Kc4Q^A|hcSB-2PpH8-aayk9su^p9tsG0ICdIX+;
zmF>TkXdKt;tbIyg^EVrP9&Ld)*83pzN5_d$aFR20q|j{WTWrvf3ir2tV*UgiV$s7%
zlB;MaWn+GwexwzjJ94-oWW)BdMG=maY|JF&-&Xh;YyMlj%@tzujz*cdD}r=$X_0cC
zYV8p8T-TYjh&$5-)P!=Vg>&MjmZALse&nQlt|PE(glZ{Mn<6wgtS`yY0LVQicAXL`
zAkoZjdTJXK^ARh2Z!*%4$mEnnRM@;ZvFdxdfb0R8q)owd8{^)AdIpkXT<74h-w1^p
zOBRAE6i{;+nuKI1%0t0S>(Rd^`lI*i8y1jhsf>YuRq#M*;}bG4PD#;?E|2s81Kf~8
zugFzdiwsmVd~hysZ|in
z(lYSUkdTN~DAAV$742u8fspNm#d#l)t3{v?s@WjMHtzynN9KUR#9)+7CO;A{h{e-Q
z$%mF-npiNHNz
z8eizvuADph2DROJJ{=>r9k3~Pe;OQ
zVBZq$V&|5kze3H8uL_=96R*-LG)<>mZAny73f!Y%PeD~_ysE(|=1vO)YDrtbF-uT_
zA7G^lkQh~nZ|1nt8CA3aq~X{=pWF!rY_ICU{kbuMJ3Exx{-F3+1db4sRKv(3r!`w~_Kx;h7vwGm}DP?2gnxhYI#<
z#zLD=4omUluqaaQWOWVu;dwavB^Y@iSwW6%lwin(VkF{`4h2ELrsUsWL{5~|&RUl9
z<~W9?>@C$S_AOjd0siId$AS*RNmpoef+0-Mb=4u%cO!O285e4Ut1pHgC1JJC5SRBC
zJiX$NMhMiYg{$No7&l2EkR4y0W!2Jsjjo>d1(nn=@tGG4)M4uz{ruC?KDSqIOqg(8
z!A(s{7K&UeH>HLElMU04?NR@Y%Tvd0!1UVoP(R%^xJn+iU2BWPxmr6-fxP1m#MM5x
zBuW|^ztZ=NL*!E|el5<%@OVDCJcQjGer>S!@OZsk4je(_e@?CBa$Gm_)ws)5wLu&f
zi~qO?RNZ9l?w?n~S$YDV!0{7f{ruIJ1*cyUkl+TN%K&h}w_T*lQ7|824&_(bNn*or
zYO{|3*?jgkN4jlng6r;M;_8JO$IteLi<<}G{M@rP02RPTVy%t`7lHfJrD!OQrnYjY
z@(KS>>ye2%7AG5vhj4ST(OLFnX`7l1($Fem$+w)g-M5t@MRcZOrS>lkRG3n^#fm2+
zwQuAqHUM^W6hytZT1=3gdo@g$DZ=mC3%B>rd;4!+#!5;|f3@^W4r_fH7K#>XbBC(I
zMtNMbE!mrNOmh81HZfNM>Oo_m(}THInt_fQHH
zszjtpdF$u%Iv-j~a*
zv<&3cSeT+58~)B3HJ#X~4{Qojaw!yr!dJ?X%S&~D>u7_^>LZ=GLxP`ME^Ys14NdtJ
zf~cSUq%1~d$do7f_AR^2)PfUh^n#SA@hXFGW@+veXL8|G>_CU7LC84Nd$Y>XOEf?^
z)xyrig3w%GQ}`IRBFntNRepmZI0-2j#0l9P@bPfYi~9(NVcu{dzi8O{*Nk3l+f%&z
zvN65iOo7SgndU#SnW?4HB;n|fLzx_-Cp?q~P-1HS_Cn=#YbM;&4#jao`)y0#rRw77hR`}#OojwB+GPxffX+ow!BvH1V&?`$X*
zMJPlHj$>c8aN3iJ{Mwj(rU@#WRbZ&x*?-`(!TqsdnV6T4)NR5z_{_hB^F0q
zDIOM!+!8BvGvX`RZ_vAxbO6nIkto|$zaoE+Y13UkV@nm`oRrU5>PSXn`%@rEyjuiE
z*c5AkL^4ZQM-KfiznK?-wQ+A{7TLVJ(v6?!TP>_fNEogyXIfxmz!`6042aF(oek*F
zOM#89OqMlv-)34DC;xM%eJdvsTP;R5b?)Nz9nZs@C&7|Lel~PH=3}BU$BkwxU#1fY
z+JX7X+ET=Qt%2eK>yim`fp~tnD
zHmtlnQ%+dBCW5JBt8yO%_fo*)7<}mwptNJUeK}%4bmlD{_$N8EP8iReBYBwHVUw$3
z+%(hU^`W_f{n5202i2^Gaf(W!>99^37{DBN$$UJWp*o^nO2B5~o8C|!<1|>}yA%_t
zH)708-tub!rwMX0Bb}iurU0vGN-^D+w}TH^Y9iJVLHic%x8~g-4AAa-Yei?UnNsU8
zQ_mX`hp5H*k_kx?pj;my
zURkQacDD_xgB(fcHM})t7R!5irx7AA*-BE~js`jKmJzGcSVrNp=IQnZStYuP!?b~9
zo<=D^dDBXUE1o!xWki&c*ZPocQjrKI>=92I@Jl9YXT
ztt^gZUyR5L`b8>p;pg{l~A4e<|64%fb@&+xzYWfEcMmjpn`AWhDI$YRS77k-oLxAbqdr5?_Hi`^c(
z)ja=K@Auo|!AI-&2fGc_X}eI@PlESib^^r6&)1vRu19kppIz6}Gr8p`iIXdj-O>wz
z3RwZguCF>jNbs2K4U_>)vyyoN7EFJJv#hZT<5K>q4j{>${!!L-tNUSR@w
z!AbP|SeaBWu4>%C!R~(%0&cCAv03}^k$K&e&_{f*#WfiLSQ5bO}21Dhxs>w5pN%uq#nflw1%CAJ1`HEluK*>#CuBf~$~Te~z7Y
zrVbn>JJ}r?T_6=ympWW9#>+_Gfv>)26t+2b+n}lM`3>xNc$%v!yCpeGldNhaJB^9Q
zcV4UA)1|^zV+^Is6;qr5bS1@2w*oIUjRwAX-yG~#0kLw8Y7t~wCKctSNz(L8i`Fak
zyNSg)vYAU@H1Yw^6uPKfu-`n5O3Aj2`$2u6GW`{Wx=~srp0B=-Jow21fmrd9NtdRZ
ze-MJjMU*8E(~{bnZC!Tls-2cMmfNMwq5Q*GhTd=fE&(B-WQIH!@xyAryDjIrh5XS#XkD>nTk2mM4SXJ?@hB~9i%*)
znceH{@gh5!&L*8R5Vyg@{WA9m8+Maiy}jP!TtM*RE`@mgb#gY*G^$*t4j+}SjDEX5QoM#ka389GZduehoo@G=N*SO_T5sYl
zaBG#d2Er$ECJGrB74#j~*?$X!zeTC0+1=QfAC)E-MG-zV6kJH#1tNs=J<1+Ftd>>}{q+)v9k6B2i_
zna*XHU~i)Uh;I&Fe{wX8ncA!`(veo@g3ES9?)eWZ}
z#!e_n0h1|(wG4celPb)BoXAR$d8LXL&*gYx%zpPIttm+{N&f}WPCqz5p3H1hCP+n7
zMmbWR$EYhF-uUOh0TKmW9U4OB&)fL#W^n+r3vrYHP-2y{zW36EG`1;r$?ly5{@eTx
zEdEdBRu2#g?YRHLd7zl6rWo?K@^_lV-nx}#FZg0ey3uf^W9We
z&8TIxZZ58%yaAVSp{KMPwdDkx)GKk%-qI%lT%=SMjC{nQHgqm^X*#Cs9}_Uin#d4x
zIjjCtS>kZqcRDa4X_SiM5#nVy2B_FnVvsVldo4Pn%Zsy;~
z=jQgOIZtmM|17-&N7weSqd<*Z$~
zb8@{=(jo9?i?8?n;LXU#&*!DfxcJ}NUEx#yTP7By9VUR&-O{JS@%-e^?uT>F_BLO$
z?(19YIbeX_eYB_zZzNupV)tb5<)T3(&mjz1r~1?VUqObKaI+@QTgdC_bQWzYq(9rb
zAzH!l)2LAh2NDrs2^d6j&~2oi`-2eVB5^TpP{JQ3xSH^@E+gJL11rNz<$9h8
zGyuzeM&j8HB_Q-pxZ+Tr=)z;nX5$H6yi?eU3)tazh}YU9C~KVS(efMKSn24OH=2;6GXR-nq0Dk-sU3>#oy+LSoxB$-4Lo!fZh@P)M!8c9O?f6iNNX
zQ1*n^BuHpE51?5qG&;3Uf)t4%j(q1$wT$YFF3Ow{Z4oq8KEK`Q%5WCvnW
zW*Zh1B5iPuktk|vY}a#%zpcY9847i
zl%`@DYj$h2I2{Uwl2nBF@qH6(LbyTDQ7
zJoxoFUYbZlM+&2e3UOM41mpieRwO7`=Mwd~nHFP6cFUN6NR3d2=b_T}<+a3%G?O@?
zDKGbFW1xtnSgm*P8e<@3P~SO;Li#M6J}*^j6DYDow2ho2uy-(I7z8h^`BpVzkii=}?))
zW~H?WE9BQA1H=l`)(bmEudxGP=-!wP5i&0x*iyT)4loL<5-FqA3V4^O_1VY_2O1zt
z-`{a{g-xxRdIRnjTRj5k3H`c~mLEtMSp($ZA>F!u8BJaX)H9CEz07OMe0H{Z1zn$c
zQhBo3(DT%a@}o*;b|N0=&+n#NogVWNBwu9vt?c!R18OSffw5NTnQ1>79tY}bm?w){
zwQ1mp*I>O5*f6vZYLkCNcs&ZYD`L=XV*B+c`_fjk?9PgJ{q*K@Y8&~$g0$~TWfr{i
z!fS{o!_qdYc(L_fBWCTt$BNEs2~%p?;4dj{q18Dh;*9tzq#mJv){QX{tk+xIRuKr6
zvUYrW0Oz$CSf1+)Z#+AllTlnURVlIA)S4W({EXVmMILeS2kduhgZbw;dlMg!0@{w|No)GhguR)hYqV@@BJyT;MXMlt}W
zQ1nkXY=lC&e>-01=Y?6XS7c<~dDsCk?Az`Rz=)r4-vJ=ggk|O>)r56<9e4a+qyCnb
zj18^?Qt!J)1oYhx3QKQ!Eh}$N#lAnIg$xJTti;j`h4J;KA`Pt9c9yYd#gJCL3sy!$
zpz_@3LH7zFylLN@lWTYTO8(E5KA?xIhgSecnn_JaoIBO%fOv2=WIqNAjX|U?NL7yKaX^nWRU6u0bigI#gi1Lf2CCUSDa1#Q4Mr%P
zsEzb@$7$Y>C}Vz}W&E{7=JM=;7@&>RfKG3oh~8U@MgDH%B#1wfZg9OryXE6jD-!np
ze(|`SEx=&^jG=+Jn~rNJ@4cd}=rc70@$+zVv0HvynJ;w#48Hvv1ax<6Gyff_xxk`w
zwiArZZk^^vzWRa;ILS2^k+E}8b~Z{rSR^iJ?mZ{a1QE+RqXK6GH;4*j0*qLl8z6de
zm~b^SneB~sg0cpXV7VnV3vxbQPo}P<$N~-x(v5MZEDO@6Y{-gLxi8Y0Ch~aJr{#bDwVd*usnDgI5)F%@ek?ZCWhVpInw1BZ$6Py
z#vr(b%-0?=`gq*5{;B$L26((6y9-sJiQu*BGcV0&TSFW|kEE&Xd$+N$*PMYugpvyu
zsR~oW@ZF`r2f%^qqNwvMxG!WDWTulU%k5kn514!6#HuaWiylfy<#kgd`vpGsZUL+}
z13MAK9;7G>b&4X~mOt6HoNoeuO2@!cTHxkX@~e;HbIk>`OOG4w0en}GUE#pNI_rY`
zE`{b%j~|c^b97)nN{5QFc>&;tpn~Ye((BPF@`0E*J~>i4MH6L-KMc~jW(hZWAL=uO
zb+J(P_JgSb!qH|bv7iN8w>wQ1G2)0caw;zLQo$X0K+Vhn%lG`vnjr1l%K2_oz$y5sQ+jO)7dBES_@2v
z6f#&uX63-k6j(tLFzFQ3o8Ewy*_^%NinGS*;-{j?>z}&N>9C6k_F+R?bU3*|UA~aU
zL@=L~FCSPXLRe298Vyz}@Yu@m5k5xhyK^&n3>i)lPY6rDhGe}JotROEiu{d`7WP3=
z$MDRnpnz^gQ|hP~<4WJew|>2#p!fs;C7Tb60aT(`#F2$OdBr8m?gYxS)w5fWk`vF86r|B
zg